1/***************************************************************************
2 * _ _ ____ _
3 * Project ___| | | | _ \| |
4 * / __| | | | |_) | |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
7 *
8 * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
9 *
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at https://curl.se/docs/copyright.html.
13 *
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
17 *
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
20 *
21 * SPDX-License-Identifier: curl
22 *
23 ***************************************************************************/
24
25#include "curl_setup.h"
26#include "strtoofft.h"
27
28#ifdef HAVE_NETINET_IN_H
29#include <netinet/in.h>
30#endif
31#ifdef HAVE_NETDB_H
32#include <netdb.h>
33#endif
34#ifdef HAVE_ARPA_INET_H
35#include <arpa/inet.h>
36#endif
37#ifdef HAVE_NET_IF_H
38#include <net/if.h>
39#endif
40#ifdef HAVE_SYS_IOCTL_H
41#include <sys/ioctl.h>
42#endif
43#include <signal.h>
44
45#ifdef HAVE_SYS_PARAM_H
46#include <sys/param.h>
47#endif
48
49#ifdef HAVE_SYS_SELECT_H
50#include <sys/select.h>
51#elif defined(HAVE_UNISTD_H)
52#include <unistd.h>
53#endif
54
55#ifndef HAVE_SOCKET
56#error "We can't compile without socket() support!"
57#endif
58
59#include "urldata.h"
60#include <curl/curl.h>
61#include "netrc.h"
62
63#include "content_encoding.h"
64#include "hostip.h"
65#include "cfilters.h"
66#include "transfer.h"
67#include "sendf.h"
68#include "speedcheck.h"
69#include "progress.h"
70#include "http.h"
71#include "url.h"
72#include "getinfo.h"
73#include "vtls/vtls.h"
74#include "vquic/vquic.h"
75#include "select.h"
76#include "multiif.h"
77#include "connect.h"
78#include "http2.h"
79#include "mime.h"
80#include "strcase.h"
81#include "urlapi-int.h"
82#include "hsts.h"
83#include "setopt.h"
84#include "headers.h"
85
86/* The last 3 #include files should be in this order */
87#include "curl_printf.h"
88#include "curl_memory.h"
89#include "memdebug.h"
90
91#if !defined(CURL_DISABLE_HTTP) || !defined(CURL_DISABLE_SMTP) || \
92 !defined(CURL_DISABLE_IMAP)
93/*
94 * checkheaders() checks the linked list of custom headers for a
95 * particular header (prefix). Provide the prefix without colon!
96 *
97 * Returns a pointer to the first matching header or NULL if none matched.
98 */
99char *Curl_checkheaders(const struct Curl_easy *data,
100 const char *thisheader,
101 const size_t thislen)
102{
103 struct curl_slist *head;
104 DEBUGASSERT(thislen);
105 DEBUGASSERT(thisheader[thislen-1] != ':');
106
107 for(head = data->set.headers; head; head = head->next) {
108 if(strncasecompare(head->data, thisheader, thislen) &&
109 Curl_headersep(head->data[thislen]) )
110 return head->data;
111 }
112
113 return NULL;
114}
115#endif
116
117CURLcode Curl_get_upload_buffer(struct Curl_easy *data)
118{
119 if(!data->state.ulbuf) {
120 data->state.ulbuf = malloc(data->set.upload_buffer_size);
121 if(!data->state.ulbuf)
122 return CURLE_OUT_OF_MEMORY;
123 }
124 return CURLE_OK;
125}
126
127#ifndef CURL_DISABLE_HTTP
128/*
129 * This function will be called to loop through the trailers buffer
130 * until no more data is available for sending.
131 */
132static size_t trailers_read(char *buffer, size_t size, size_t nitems,
133 void *raw)
134{
135 struct Curl_easy *data = (struct Curl_easy *)raw;
136 struct dynbuf *trailers_buf = &data->state.trailers_buf;
137 size_t bytes_left = Curl_dyn_len(s: trailers_buf) -
138 data->state.trailers_bytes_sent;
139 size_t to_copy = (size*nitems < bytes_left) ? size*nitems : bytes_left;
140 if(to_copy) {
141 memcpy(dest: buffer,
142 src: Curl_dyn_ptr(s: trailers_buf) + data->state.trailers_bytes_sent,
143 n: to_copy);
144 data->state.trailers_bytes_sent += to_copy;
145 }
146 return to_copy;
147}
148
149static size_t trailers_left(void *raw)
150{
151 struct Curl_easy *data = (struct Curl_easy *)raw;
152 struct dynbuf *trailers_buf = &data->state.trailers_buf;
153 return Curl_dyn_len(s: trailers_buf) - data->state.trailers_bytes_sent;
154}
155#endif
156
157/*
158 * This function will call the read callback to fill our buffer with data
159 * to upload.
160 */
161CURLcode Curl_fillreadbuffer(struct Curl_easy *data, size_t bytes,
162 size_t *nreadp)
163{
164 size_t buffersize = bytes;
165 size_t nread;
166
167 curl_read_callback readfunc = NULL;
168 void *extra_data = NULL;
169
170#ifndef CURL_DISABLE_HTTP
171 if(data->state.trailers_state == TRAILERS_INITIALIZED) {
172 struct curl_slist *trailers = NULL;
173 CURLcode result;
174 int trailers_ret_code;
175
176 /* at this point we already verified that the callback exists
177 so we compile and store the trailers buffer, then proceed */
178 infof(data,
179 "Moving trailers state machine from initialized to sending.");
180 data->state.trailers_state = TRAILERS_SENDING;
181 Curl_dyn_init(s: &data->state.trailers_buf, DYN_TRAILERS);
182
183 data->state.trailers_bytes_sent = 0;
184 Curl_set_in_callback(data, true);
185 trailers_ret_code = data->set.trailer_callback(&trailers,
186 data->set.trailer_data);
187 Curl_set_in_callback(data, false);
188 if(trailers_ret_code == CURL_TRAILERFUNC_OK) {
189 result = Curl_http_compile_trailers(trailers, buf: &data->state.trailers_buf,
190 handle: data);
191 }
192 else {
193 failf(data, fmt: "operation aborted by trailing headers callback");
194 *nreadp = 0;
195 result = CURLE_ABORTED_BY_CALLBACK;
196 }
197 if(result) {
198 Curl_dyn_free(s: &data->state.trailers_buf);
199 curl_slist_free_all(list: trailers);
200 return result;
201 }
202 infof(data, "Successfully compiled trailers.");
203 curl_slist_free_all(list: trailers);
204 }
205#endif
206
207#ifndef CURL_DISABLE_HTTP
208 /* if we are transmitting trailing data, we don't need to write
209 a chunk size so we skip this */
210 if(data->req.upload_chunky &&
211 data->state.trailers_state == TRAILERS_NONE) {
212 /* if chunked Transfer-Encoding */
213 buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
214 data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
215 }
216
217 if(data->state.trailers_state == TRAILERS_SENDING) {
218 /* if we're here then that means that we already sent the last empty chunk
219 but we didn't send a final CR LF, so we sent 0 CR LF. We then start
220 pulling trailing data until we have no more at which point we
221 simply return to the previous point in the state machine as if
222 nothing happened.
223 */
224 readfunc = trailers_read;
225 extra_data = (void *)data;
226 }
227 else
228#endif
229 {
230 readfunc = data->state.fread_func;
231 extra_data = data->state.in;
232 }
233
234 Curl_set_in_callback(data, true);
235 nread = readfunc(data->req.upload_fromhere, 1,
236 buffersize, extra_data);
237 Curl_set_in_callback(data, false);
238
239 if(nread == CURL_READFUNC_ABORT) {
240 failf(data, fmt: "operation aborted by callback");
241 *nreadp = 0;
242 return CURLE_ABORTED_BY_CALLBACK;
243 }
244 if(nread == CURL_READFUNC_PAUSE) {
245 struct SingleRequest *k = &data->req;
246
247 if(data->conn->handler->flags & PROTOPT_NONETWORK) {
248 /* protocols that work without network cannot be paused. This is
249 actually only FILE:// just now, and it can't pause since the transfer
250 isn't done using the "normal" procedure. */
251 failf(data, fmt: "Read callback asked for PAUSE when not supported");
252 return CURLE_READ_ERROR;
253 }
254
255 /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
256 k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
257 if(data->req.upload_chunky) {
258 /* Back out the preallocation done above */
259 data->req.upload_fromhere -= (8 + 2);
260 }
261 *nreadp = 0;
262
263 return CURLE_OK; /* nothing was read */
264 }
265 else if(nread > buffersize) {
266 /* the read function returned a too large value */
267 *nreadp = 0;
268 failf(data, fmt: "read function returned funny value");
269 return CURLE_READ_ERROR;
270 }
271
272#ifndef CURL_DISABLE_HTTP
273 if(!data->req.forbidchunk && data->req.upload_chunky) {
274 /* if chunked Transfer-Encoding
275 * build chunk:
276 *
277 * <HEX SIZE> CRLF
278 * <DATA> CRLF
279 */
280 /* On non-ASCII platforms the <DATA> may or may not be
281 translated based on state.prefer_ascii while the protocol
282 portion must always be translated to the network encoding.
283 To further complicate matters, line end conversion might be
284 done later on, so we need to prevent CRLFs from becoming
285 CRCRLFs if that's the case. To do this we use bare LFs
286 here, knowing they'll become CRLFs later on.
287 */
288
289 bool added_crlf = FALSE;
290 int hexlen = 0;
291 const char *endofline_native;
292 const char *endofline_network;
293
294 if(
295#ifdef CURL_DO_LINEEND_CONV
296 (data->state.prefer_ascii) ||
297#endif
298 (data->set.crlf)) {
299 /* \n will become \r\n later on */
300 endofline_native = "\n";
301 endofline_network = "\x0a";
302 }
303 else {
304 endofline_native = "\r\n";
305 endofline_network = "\x0d\x0a";
306 }
307
308 /* if we're not handling trailing data, proceed as usual */
309 if(data->state.trailers_state != TRAILERS_SENDING) {
310 char hexbuffer[11] = "";
311 hexlen = msnprintf(buffer: hexbuffer, maxlength: sizeof(hexbuffer),
312 format: "%zx%s", nread, endofline_native);
313
314 /* move buffer pointer */
315 data->req.upload_fromhere -= hexlen;
316 nread += hexlen;
317
318 /* copy the prefix to the buffer, leaving out the NUL */
319 memcpy(dest: data->req.upload_fromhere, src: hexbuffer, n: hexlen);
320
321 /* always append ASCII CRLF to the data unless
322 we have a valid trailer callback */
323 if((nread-hexlen) == 0 &&
324 data->set.trailer_callback != NULL &&
325 data->state.trailers_state == TRAILERS_NONE) {
326 data->state.trailers_state = TRAILERS_INITIALIZED;
327 }
328 else {
329 memcpy(dest: data->req.upload_fromhere + nread,
330 src: endofline_network,
331 n: strlen(s: endofline_network));
332 added_crlf = TRUE;
333 }
334 }
335
336 if(data->state.trailers_state == TRAILERS_SENDING &&
337 !trailers_left(raw: data)) {
338 Curl_dyn_free(s: &data->state.trailers_buf);
339 data->state.trailers_state = TRAILERS_DONE;
340 data->set.trailer_data = NULL;
341 data->set.trailer_callback = NULL;
342 /* mark the transfer as done */
343 data->req.upload_done = TRUE;
344 infof(data, "Signaling end of chunked upload after trailers.");
345 }
346 else
347 if((nread - hexlen) == 0 &&
348 data->state.trailers_state != TRAILERS_INITIALIZED) {
349 /* mark this as done once this chunk is transferred */
350 data->req.upload_done = TRUE;
351 infof(data,
352 "Signaling end of chunked upload via terminating chunk.");
353 }
354
355 if(added_crlf)
356 nread += strlen(s: endofline_network); /* for the added end of line */
357 }
358#endif
359
360 *nreadp = nread;
361
362 return CURLE_OK;
363}
364
365static int data_pending(struct Curl_easy *data)
366{
367 struct connectdata *conn = data->conn;
368
369 if(conn->handler->protocol&PROTO_FAMILY_FTP)
370 return Curl_conn_data_pending(data, SECONDARYSOCKET);
371
372 /* in the case of libssh2, we can never be really sure that we have emptied
373 its internal buffers so we MUST always try until we get EAGAIN back */
374 return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
375 Curl_conn_data_pending(data, FIRSTSOCKET);
376}
377
378/*
379 * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
380 * remote document with the time provided by CURLOPT_TIMEVAL
381 */
382bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc)
383{
384 if((timeofdoc == 0) || (data->set.timevalue == 0))
385 return TRUE;
386
387 switch(data->set.timecondition) {
388 case CURL_TIMECOND_IFMODSINCE:
389 default:
390 if(timeofdoc <= data->set.timevalue) {
391 infof(data,
392 "The requested document is not new enough");
393 data->info.timecond = TRUE;
394 return FALSE;
395 }
396 break;
397 case CURL_TIMECOND_IFUNMODSINCE:
398 if(timeofdoc >= data->set.timevalue) {
399 infof(data,
400 "The requested document is not old enough");
401 data->info.timecond = TRUE;
402 return FALSE;
403 }
404 break;
405 }
406
407 return TRUE;
408}
409
410/*
411 * Go ahead and do a read if we have a readable socket or if
412 * the stream was rewound (in which case we have data in a
413 * buffer)
414 *
415 * return '*comeback' TRUE if we didn't properly drain the socket so this
416 * function should get called again without select() or similar in between!
417 */
418static CURLcode readwrite_data(struct Curl_easy *data,
419 struct connectdata *conn,
420 struct SingleRequest *k,
421 int *didwhat, bool *done,
422 bool *comeback)
423{
424 CURLcode result = CURLE_OK;
425 ssize_t nread; /* number of bytes read */
426 size_t excess = 0; /* excess bytes read */
427 bool readmore = FALSE; /* used by RTP to signal for more data */
428 int maxloops = 100;
429 curl_off_t max_recv = data->set.max_recv_speed?
430 data->set.max_recv_speed : CURL_OFF_T_MAX;
431 char *buf = data->state.buffer;
432 bool data_eof_handled = FALSE;
433 DEBUGASSERT(buf);
434
435 *done = FALSE;
436 *comeback = FALSE;
437
438 /* This is where we loop until we have read everything there is to
439 read or we get a CURLE_AGAIN */
440 do {
441 bool is_empty_data = FALSE;
442 size_t buffersize = data->set.buffer_size;
443 size_t bytestoread = buffersize;
444 /* For HTTP/2 and HTTP/3, read data without caring about the content
445 length. This is safe because body in HTTP/2 is always segmented
446 thanks to its framing layer. Meanwhile, we have to call Curl_read
447 to ensure that http2_handle_stream_close is called when we read all
448 incoming bytes for a particular stream. */
449 bool is_http3 = Curl_conn_is_http3(data, conn, FIRSTSOCKET);
450 data_eof_handled = is_http3 || Curl_conn_is_http2(data, conn, FIRSTSOCKET);
451
452 if(!data_eof_handled && k->size != -1 && !k->header) {
453 /* make sure we don't read too much */
454 curl_off_t totalleft = k->size - k->bytecount;
455 if(totalleft < (curl_off_t)bytestoread)
456 bytestoread = (size_t)totalleft;
457 }
458
459 if(bytestoread) {
460 /* receive data from the network! */
461 result = Curl_read(data, sockfd: conn->sockfd, buf, buffersize: bytestoread, n: &nread);
462
463 /* read would've blocked */
464 if(CURLE_AGAIN == result) {
465 result = CURLE_OK;
466 break; /* get out of loop */
467 }
468
469 if(result>0)
470 goto out;
471 }
472 else {
473 /* read nothing but since we wanted nothing we consider this an OK
474 situation to proceed from */
475 DEBUGF(infof(data, "readwrite_data: we're done"));
476 nread = 0;
477 }
478
479 if(!k->bytecount) {
480 Curl_pgrsTime(data, timer: TIMER_STARTTRANSFER);
481 if(k->exp100 > EXP100_SEND_DATA)
482 /* set time stamp to compare with when waiting for the 100 */
483 k->start100 = Curl_now();
484 }
485
486 *didwhat |= KEEP_RECV;
487 /* indicates data of zero size, i.e. empty file */
488 is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
489
490 if(0 < nread || is_empty_data) {
491 buf[nread] = 0;
492 }
493 if(!nread) {
494 /* if we receive 0 or less here, either the data transfer is done or the
495 server closed the connection and we bail out from this! */
496 if(data_eof_handled)
497 DEBUGF(infof(data, "nread == 0, stream closed, bailing"));
498 else
499 DEBUGF(infof(data, "nread <= 0, server closed connection, bailing"));
500 k->keepon = 0; /* stop sending as well */
501 if(!is_empty_data)
502 break;
503 }
504
505 /* Default buffer to use when we write the buffer, it may be changed
506 in the flow below before the actual storing is done. */
507 k->str = buf;
508
509 if(conn->handler->readwrite) {
510 result = conn->handler->readwrite(data, conn, &nread, &readmore);
511 if(result)
512 goto out;
513 if(readmore)
514 break;
515 }
516
517#ifndef CURL_DISABLE_HTTP
518 /* Since this is a two-state thing, we check if we are parsing
519 headers at the moment or not. */
520 if(k->header) {
521 /* we are in parse-the-header-mode */
522 bool stop_reading = FALSE;
523 result = Curl_http_readwrite_headers(data, conn, nread: &nread, stop_reading: &stop_reading);
524 if(result)
525 goto out;
526
527 if(conn->handler->readwrite &&
528 (k->maxdownload <= 0 && nread > 0)) {
529 result = conn->handler->readwrite(data, conn, &nread, &readmore);
530 if(result)
531 goto out;
532 if(readmore)
533 break;
534 }
535
536 if(stop_reading) {
537 /* We've stopped dealing with input, get out of the do-while loop */
538
539 if(nread > 0) {
540 infof(data,
541 "Excess found:"
542 " excess = %zd"
543 " url = %s (zero-length body)",
544 nread, data->state.up.path);
545 }
546
547 break;
548 }
549 }
550#endif /* CURL_DISABLE_HTTP */
551
552
553 /* This is not an 'else if' since it may be a rest from the header
554 parsing, where the beginning of the buffer is headers and the end
555 is non-headers. */
556 if(!k->header && (nread > 0 || is_empty_data)) {
557
558 if(data->req.no_body) {
559 /* data arrives although we want none, bail out */
560 streamclose(conn, "ignoring body");
561 *done = TRUE;
562 result = CURLE_WEIRD_SERVER_REPLY;
563 goto out;
564 }
565
566#ifndef CURL_DISABLE_HTTP
567 if(0 == k->bodywrites && !is_empty_data) {
568 /* These checks are only made the first time we are about to
569 write a piece of the body */
570 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
571 /* HTTP-only checks */
572 result = Curl_http_firstwrite(data, conn, done);
573 if(result || *done)
574 goto out;
575 }
576 } /* this is the first time we write a body part */
577#endif /* CURL_DISABLE_HTTP */
578
579 k->bodywrites++;
580
581 /* pass data to the debug function before it gets "dechunked" */
582 if(data->set.verbose) {
583 if(k->badheader) {
584 Curl_debug(data, type: CURLINFO_DATA_IN,
585 ptr: Curl_dyn_ptr(s: &data->state.headerb),
586 size: Curl_dyn_len(s: &data->state.headerb));
587 if(k->badheader == HEADER_PARTHEADER)
588 Curl_debug(data, type: CURLINFO_DATA_IN,
589 ptr: k->str, size: (size_t)nread);
590 }
591 else
592 Curl_debug(data, type: CURLINFO_DATA_IN,
593 ptr: k->str, size: (size_t)nread);
594 }
595
596#ifndef CURL_DISABLE_HTTP
597 if(k->chunk) {
598 /*
599 * Here comes a chunked transfer flying and we need to decode this
600 * properly. While the name says read, this function both reads
601 * and writes away the data. The returned 'nread' holds the number
602 * of actual data it wrote to the client.
603 */
604 CURLcode extra;
605 CHUNKcode res =
606 Curl_httpchunk_read(data, datap: k->str, length: nread, wrote: &nread, passthru: &extra);
607
608 if(CHUNKE_OK < res) {
609 if(CHUNKE_PASSTHRU_ERROR == res) {
610 failf(data, fmt: "Failed reading the chunked-encoded stream");
611 result = extra;
612 goto out;
613 }
614 failf(data, fmt: "%s in chunked-encoding", Curl_chunked_strerror(code: res));
615 result = CURLE_RECV_ERROR;
616 goto out;
617 }
618 if(CHUNKE_STOP == res) {
619 /* we're done reading chunks! */
620 k->keepon &= ~KEEP_RECV; /* read no more */
621
622 /* N number of bytes at the end of the str buffer that weren't
623 written to the client. */
624 if(conn->chunk.datasize) {
625 infof(data, "Leftovers after chunking: % "
626 CURL_FORMAT_CURL_OFF_T "u bytes",
627 conn->chunk.datasize);
628 }
629 }
630 /* If it returned OK, we just keep going */
631 }
632#endif /* CURL_DISABLE_HTTP */
633
634 /* Account for body content stored in the header buffer */
635 if((k->badheader == HEADER_PARTHEADER) && !k->ignorebody) {
636 size_t headlen = Curl_dyn_len(s: &data->state.headerb);
637 DEBUGF(infof(data, "Increasing bytecount by %zu", headlen));
638 k->bytecount += headlen;
639 }
640
641 if((-1 != k->maxdownload) &&
642 (k->bytecount + nread >= k->maxdownload)) {
643
644 excess = (size_t)(k->bytecount + nread - k->maxdownload);
645 if(excess > 0 && !k->ignorebody) {
646 infof(data,
647 "Excess found in a read:"
648 " excess = %zu"
649 ", size = %" CURL_FORMAT_CURL_OFF_T
650 ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
651 ", bytecount = %" CURL_FORMAT_CURL_OFF_T,
652 excess, k->size, k->maxdownload, k->bytecount);
653 connclose(conn, "excess found in a read");
654 }
655
656 nread = (ssize_t) (k->maxdownload - k->bytecount);
657 if(nread < 0) /* this should be unusual */
658 nread = 0;
659
660 /* HTTP/3 over QUIC should keep reading until QUIC connection
661 is closed. In contrast to HTTP/2 which can stop reading
662 from TCP connection, HTTP/3 over QUIC needs ACK from server
663 to ensure stream closure. It should keep reading. */
664 if(!is_http3) {
665 k->keepon &= ~KEEP_RECV; /* we're done reading */
666 }
667 }
668
669 k->bytecount += nread;
670 max_recv -= nread;
671
672 result = Curl_pgrsSetDownloadCounter(data, size: k->bytecount);
673 if(result)
674 goto out;
675
676 if(!k->chunk && (nread || k->badheader || is_empty_data)) {
677 /* If this is chunky transfer, it was already written */
678
679 if(k->badheader && !k->ignorebody) {
680 /* we parsed a piece of data wrongly assuming it was a header
681 and now we output it as body instead */
682 size_t headlen = Curl_dyn_len(s: &data->state.headerb);
683
684 /* Don't let excess data pollute body writes */
685 if(k->maxdownload == -1 || (curl_off_t)headlen <= k->maxdownload)
686 result = Curl_client_write(data, CLIENTWRITE_BODY,
687 ptr: Curl_dyn_ptr(s: &data->state.headerb),
688 len: headlen);
689 else
690 result = Curl_client_write(data, CLIENTWRITE_BODY,
691 ptr: Curl_dyn_ptr(s: &data->state.headerb),
692 len: (size_t)k->maxdownload);
693
694 if(result)
695 goto out;
696 }
697 if(k->badheader < HEADER_ALLBAD) {
698 /* This switch handles various content encodings. If there's an
699 error here, be sure to check over the almost identical code
700 in http_chunks.c.
701 Make sure that ALL_CONTENT_ENCODINGS contains all the
702 encodings handled here. */
703 if(!k->ignorebody && nread) {
704#ifndef CURL_DISABLE_POP3
705 if(conn->handler->protocol & PROTO_FAMILY_POP3)
706 result = Curl_pop3_write(data, k->str, nread);
707 else
708#endif /* CURL_DISABLE_POP3 */
709 result = Curl_client_write(data, CLIENTWRITE_BODY, ptr: k->str,
710 len: nread);
711 }
712 }
713 k->badheader = HEADER_NORMAL; /* taken care of now */
714
715 if(result)
716 goto out;
717 }
718
719 } /* if(!header and data to read) */
720
721 if(conn->handler->readwrite && excess) {
722 /* Parse the excess data */
723 k->str += nread;
724
725 if(&k->str[excess] > &buf[data->set.buffer_size]) {
726 /* the excess amount was too excessive(!), make sure
727 it doesn't read out of buffer */
728 excess = &buf[data->set.buffer_size] - k->str;
729 }
730 nread = (ssize_t)excess;
731
732 result = conn->handler->readwrite(data, conn, &nread, &readmore);
733 if(result)
734 goto out;
735
736 if(readmore)
737 k->keepon |= KEEP_RECV; /* we're not done reading */
738 break;
739 }
740
741 if(is_empty_data) {
742 /* if we received nothing, the server closed the connection and we
743 are done */
744 k->keepon &= ~KEEP_RECV;
745 }
746
747 if((k->keepon & KEEP_RECV_PAUSE) || !(k->keepon & KEEP_RECV)) {
748 /* this is a paused or stopped transfer */
749 break;
750 }
751
752 } while((max_recv > 0) && data_pending(data) && maxloops--);
753
754 if(maxloops <= 0 || max_recv <= 0) {
755 /* we mark it as read-again-please */
756 data->state.dselect_bits = CURL_CSELECT_IN;
757 *comeback = TRUE;
758 }
759
760 if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
761 (conn->bits.close || data_eof_handled)) {
762 /* When we've read the entire thing and the close bit is set, the server
763 may now close the connection. If there's now any kind of sending going
764 on from our side, we need to stop that immediately. */
765 infof(data, "we are done reading and this is set to close, stop send");
766 k->keepon &= ~KEEP_SEND; /* no writing anymore either */
767 }
768
769out:
770 if(result)
771 DEBUGF(infof(data, "readwrite_data() -> %d", result));
772 return result;
773}
774
775CURLcode Curl_done_sending(struct Curl_easy *data,
776 struct SingleRequest *k)
777{
778 k->keepon &= ~KEEP_SEND; /* we're done writing */
779
780 /* These functions should be moved into the handler struct! */
781 Curl_conn_ev_data_done_send(data);
782
783 return CURLE_OK;
784}
785
786#if defined(WIN32) && defined(USE_WINSOCK)
787#ifndef SIO_IDEAL_SEND_BACKLOG_QUERY
788#define SIO_IDEAL_SEND_BACKLOG_QUERY 0x4004747B
789#endif
790
791static void win_update_buffer_size(curl_socket_t sockfd)
792{
793 int result;
794 ULONG ideal;
795 DWORD ideallen;
796 result = WSAIoctl(sockfd, SIO_IDEAL_SEND_BACKLOG_QUERY, 0, 0,
797 &ideal, sizeof(ideal), &ideallen, 0, 0);
798 if(result == 0) {
799 setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF,
800 (const char *)&ideal, sizeof(ideal));
801 }
802}
803#else
804#define win_update_buffer_size(x)
805#endif
806
807#define curl_upload_refill_watermark(data) \
808 ((ssize_t)((data)->set.upload_buffer_size >> 5))
809
810/*
811 * Send data to upload to the server, when the socket is writable.
812 */
813static CURLcode readwrite_upload(struct Curl_easy *data,
814 struct connectdata *conn,
815 int *didwhat)
816{
817 ssize_t i, si;
818 ssize_t bytes_written;
819 CURLcode result;
820 ssize_t nread; /* number of bytes read */
821 bool sending_http_headers = FALSE;
822 struct SingleRequest *k = &data->req;
823
824 *didwhat |= KEEP_SEND;
825
826 do {
827 curl_off_t nbody;
828 ssize_t offset = 0;
829
830 if(0 != k->upload_present &&
831 k->upload_present < curl_upload_refill_watermark(data) &&
832 !k->upload_chunky &&/*(variable sized chunked header; append not safe)*/
833 !k->upload_done && /*!(k->upload_done once k->upload_present sent)*/
834 !(k->writebytecount + k->upload_present - k->pendingheader ==
835 data->state.infilesize)) {
836 offset = k->upload_present;
837 }
838
839 /* only read more data if there's no upload data already
840 present in the upload buffer, or if appending to upload buffer */
841 if(0 == k->upload_present || offset) {
842 result = Curl_get_upload_buffer(data);
843 if(result)
844 return result;
845 if(offset && k->upload_fromhere != data->state.ulbuf)
846 memmove(dest: data->state.ulbuf, src: k->upload_fromhere, n: offset);
847 /* init the "upload from here" pointer */
848 k->upload_fromhere = data->state.ulbuf;
849
850 if(!k->upload_done) {
851 /* HTTP pollution, this should be written nicer to become more
852 protocol agnostic. */
853 size_t fillcount;
854 struct HTTP *http = k->p.http;
855
856 if((k->exp100 == EXP100_SENDING_REQUEST) &&
857 (http->sending == HTTPSEND_BODY)) {
858 /* If this call is to send body data, we must take some action:
859 We have sent off the full HTTP 1.1 request, and we shall now
860 go into the Expect: 100 state and await such a header */
861 k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
862 k->keepon &= ~KEEP_SEND; /* disable writing */
863 k->start100 = Curl_now(); /* timeout count starts now */
864 *didwhat &= ~KEEP_SEND; /* we didn't write anything actually */
865 /* set a timeout for the multi interface */
866 Curl_expire(data, milli: data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
867 break;
868 }
869
870 if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
871 if(http->sending == HTTPSEND_REQUEST)
872 /* We're sending the HTTP request headers, not the data.
873 Remember that so we don't change the line endings. */
874 sending_http_headers = TRUE;
875 else
876 sending_http_headers = FALSE;
877 }
878
879 k->upload_fromhere += offset;
880 result = Curl_fillreadbuffer(data, bytes: data->set.upload_buffer_size-offset,
881 nreadp: &fillcount);
882 k->upload_fromhere -= offset;
883 if(result)
884 return result;
885
886 nread = offset + fillcount;
887 }
888 else
889 nread = 0; /* we're done uploading/reading */
890
891 if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
892 /* this is a paused transfer */
893 break;
894 }
895 if(nread <= 0) {
896 result = Curl_done_sending(data, k);
897 if(result)
898 return result;
899 break;
900 }
901
902 /* store number of bytes available for upload */
903 k->upload_present = nread;
904
905 /* convert LF to CRLF if so asked */
906 if((!sending_http_headers) && (
907#ifdef CURL_DO_LINEEND_CONV
908 /* always convert if we're FTPing in ASCII mode */
909 (data->state.prefer_ascii) ||
910#endif
911 (data->set.crlf))) {
912 /* Do we need to allocate a scratch buffer? */
913 if(!data->state.scratch) {
914 data->state.scratch = malloc(2 * data->set.upload_buffer_size);
915 if(!data->state.scratch) {
916 failf(data, fmt: "Failed to alloc scratch buffer");
917
918 return CURLE_OUT_OF_MEMORY;
919 }
920 }
921
922 /*
923 * ASCII/EBCDIC Note: This is presumably a text (not binary)
924 * transfer so the data should already be in ASCII.
925 * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
926 * must be used instead of the escape sequences \r & \n.
927 */
928 if(offset)
929 memcpy(dest: data->state.scratch, src: k->upload_fromhere, n: offset);
930 for(i = offset, si = offset; i < nread; i++, si++) {
931 if(k->upload_fromhere[i] == 0x0a) {
932 data->state.scratch[si++] = 0x0d;
933 data->state.scratch[si] = 0x0a;
934 if(!data->set.crlf) {
935 /* we're here only because FTP is in ASCII mode...
936 bump infilesize for the LF we just added */
937 if(data->state.infilesize != -1)
938 data->state.infilesize++;
939 }
940 }
941 else
942 data->state.scratch[si] = k->upload_fromhere[i];
943 }
944
945 if(si != nread) {
946 /* only perform the special operation if we really did replace
947 anything */
948 nread = si;
949
950 /* upload from the new (replaced) buffer instead */
951 k->upload_fromhere = data->state.scratch;
952
953 /* set the new amount too */
954 k->upload_present = nread;
955 }
956 }
957
958#ifndef CURL_DISABLE_SMTP
959 if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
960 result = Curl_smtp_escape_eob(data, nread, offset);
961 if(result)
962 return result;
963 }
964#endif /* CURL_DISABLE_SMTP */
965 } /* if 0 == k->upload_present or appended to upload buffer */
966 else {
967 /* We have a partial buffer left from a previous "round". Use
968 that instead of reading more data */
969 }
970
971 /* write to socket (send away data) */
972 result = Curl_write(data,
973 sockfd: conn->writesockfd, /* socket to send to */
974 mem: k->upload_fromhere, /* buffer pointer */
975 len: k->upload_present, /* buffer size */
976 written: &bytes_written); /* actually sent */
977 if(result)
978 return result;
979
980#if defined(WIN32) && defined(USE_WINSOCK)
981 {
982 struct curltime n = Curl_now();
983 if(Curl_timediff(n, k->last_sndbuf_update) > 1000) {
984 win_update_buffer_size(conn->writesockfd);
985 k->last_sndbuf_update = n;
986 }
987 }
988#endif
989
990 if(k->pendingheader) {
991 /* parts of what was sent was header */
992 curl_off_t n = CURLMIN(k->pendingheader, bytes_written);
993 /* show the data before we change the pointer upload_fromhere */
994 Curl_debug(data, type: CURLINFO_HEADER_OUT, ptr: k->upload_fromhere, size: (size_t)n);
995 k->pendingheader -= n;
996 nbody = bytes_written - n; /* size of the written body part */
997 }
998 else
999 nbody = bytes_written;
1000
1001 if(nbody) {
1002 /* show the data before we change the pointer upload_fromhere */
1003 Curl_debug(data, type: CURLINFO_DATA_OUT,
1004 ptr: &k->upload_fromhere[bytes_written - nbody],
1005 size: (size_t)nbody);
1006
1007 k->writebytecount += nbody;
1008 Curl_pgrsSetUploadCounter(data, size: k->writebytecount);
1009 }
1010
1011 if((!k->upload_chunky || k->forbidchunk) &&
1012 (k->writebytecount == data->state.infilesize)) {
1013 /* we have sent all data we were supposed to */
1014 k->upload_done = TRUE;
1015 infof(data, "We are completely uploaded and fine");
1016 }
1017
1018 if(k->upload_present != bytes_written) {
1019 /* we only wrote a part of the buffer (if anything), deal with it! */
1020
1021 /* store the amount of bytes left in the buffer to write */
1022 k->upload_present -= bytes_written;
1023
1024 /* advance the pointer where to find the buffer when the next send
1025 is to happen */
1026 k->upload_fromhere += bytes_written;
1027 }
1028 else {
1029 /* we've uploaded that buffer now */
1030 result = Curl_get_upload_buffer(data);
1031 if(result)
1032 return result;
1033 k->upload_fromhere = data->state.ulbuf;
1034 k->upload_present = 0; /* no more bytes left */
1035
1036 if(k->upload_done) {
1037 result = Curl_done_sending(data, k);
1038 if(result)
1039 return result;
1040 }
1041 }
1042
1043
1044 } while(0); /* just to break out from! */
1045
1046 return CURLE_OK;
1047}
1048
1049static int select_bits_paused(struct Curl_easy *data, int select_bits)
1050{
1051 /* See issue #11982: we really need to be careful not to progress
1052 * a transfer direction when that direction is paused. Not all parts
1053 * of our state machine are handling PAUSED transfers correctly. So, we
1054 * do not want to go there.
1055 * NOTE: we are only interested in PAUSE, not HOLD. */
1056 return (((select_bits & CURL_CSELECT_IN) &&
1057 (data->req.keepon & KEEP_RECV_PAUSE)) ||
1058 ((select_bits & CURL_CSELECT_OUT) &&
1059 (data->req.keepon & KEEP_SEND_PAUSE)));
1060}
1061
1062/*
1063 * Curl_readwrite() is the low-level function to be called when data is to
1064 * be read and written to/from the connection.
1065 *
1066 * return '*comeback' TRUE if we didn't properly drain the socket so this
1067 * function should get called again without select() or similar in between!
1068 */
1069CURLcode Curl_readwrite(struct connectdata *conn,
1070 struct Curl_easy *data,
1071 bool *done,
1072 bool *comeback)
1073{
1074 struct SingleRequest *k = &data->req;
1075 CURLcode result;
1076 struct curltime now;
1077 int didwhat = 0;
1078 int select_bits;
1079
1080 if(data->state.dselect_bits) {
1081 if(select_bits_paused(data, select_bits: data->state.dselect_bits)) {
1082 /* leave the bits unchanged, so they'll tell us what to do when
1083 * this transfer gets unpaused. */
1084 DEBUGF(infof(data, "readwrite, dselect_bits, early return on PAUSED"));
1085 result = CURLE_OK;
1086 goto out;
1087 }
1088 select_bits = data->state.dselect_bits;
1089 data->state.dselect_bits = 0;
1090 }
1091 else if(conn->cselect_bits) {
1092 /* CAVEAT: adding `select_bits_paused()` check here makes test640 hang
1093 * (among others). Which hints at strange state handling in FTP land... */
1094 select_bits = conn->cselect_bits;
1095 conn->cselect_bits = 0;
1096 }
1097 else {
1098 curl_socket_t fd_read;
1099 curl_socket_t fd_write;
1100 /* only use the proper socket if the *_HOLD bit is not set simultaneously
1101 as then we are in rate limiting state in that transfer direction */
1102 if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1103 fd_read = conn->sockfd;
1104 else
1105 fd_read = CURL_SOCKET_BAD;
1106
1107 if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1108 fd_write = conn->writesockfd;
1109 else
1110 fd_write = CURL_SOCKET_BAD;
1111
1112 select_bits = Curl_socket_check(readfd: fd_read, CURL_SOCKET_BAD, writefd: fd_write, timeout_ms: 0);
1113 }
1114
1115 if(select_bits == CURL_CSELECT_ERR) {
1116 failf(data, fmt: "select/poll returned error");
1117 result = CURLE_SEND_ERROR;
1118 goto out;
1119 }
1120
1121#ifdef USE_HYPER
1122 if(conn->datastream) {
1123 result = conn->datastream(data, conn, &didwhat, done, select_bits);
1124 if(result || *done)
1125 goto out;
1126 }
1127 else {
1128#endif
1129 /* We go ahead and do a read if we have a readable socket or if
1130 the stream was rewound (in which case we have data in a
1131 buffer) */
1132 if((k->keepon & KEEP_RECV) && (select_bits & CURL_CSELECT_IN)) {
1133 result = readwrite_data(data, conn, k, didwhat: &didwhat, done, comeback);
1134 if(result || *done)
1135 goto out;
1136 }
1137
1138 /* If we still have writing to do, we check if we have a writable socket. */
1139 if((k->keepon & KEEP_SEND) && (select_bits & CURL_CSELECT_OUT)) {
1140 /* write */
1141
1142 result = readwrite_upload(data, conn, didwhat: &didwhat);
1143 if(result)
1144 goto out;
1145 }
1146#ifdef USE_HYPER
1147 }
1148#endif
1149
1150 now = Curl_now();
1151 if(!didwhat) {
1152 /* no read no write, this is a timeout? */
1153 if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1154 /* This should allow some time for the header to arrive, but only a
1155 very short time as otherwise it'll be too much wasted time too
1156 often. */
1157
1158 /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1159
1160 Therefore, when a client sends this header field to an origin server
1161 (possibly via a proxy) from which it has never seen a 100 (Continue)
1162 status, the client SHOULD NOT wait for an indefinite period before
1163 sending the request body.
1164
1165 */
1166
1167 timediff_t ms = Curl_timediff(newer: now, older: k->start100);
1168 if(ms >= data->set.expect_100_timeout) {
1169 /* we've waited long enough, continue anyway */
1170 k->exp100 = EXP100_SEND_DATA;
1171 k->keepon |= KEEP_SEND;
1172 Curl_expire_done(data, id: EXPIRE_100_TIMEOUT);
1173 infof(data, "Done waiting for 100-continue");
1174 }
1175 }
1176
1177 result = Curl_conn_ev_data_idle(data);
1178 if(result)
1179 goto out;
1180 }
1181
1182 if(Curl_pgrsUpdate(data))
1183 result = CURLE_ABORTED_BY_CALLBACK;
1184 else
1185 result = Curl_speedcheck(data, now);
1186 if(result)
1187 goto out;
1188
1189 if(k->keepon) {
1190 if(0 > Curl_timeleft(data, nowp: &now, FALSE)) {
1191 if(k->size != -1) {
1192 failf(data, fmt: "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1193 " milliseconds with %" CURL_FORMAT_CURL_OFF_T " out of %"
1194 CURL_FORMAT_CURL_OFF_T " bytes received",
1195 Curl_timediff(newer: now, older: data->progress.t_startsingle),
1196 k->bytecount, k->size);
1197 }
1198 else {
1199 failf(data, fmt: "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T
1200 " milliseconds with %" CURL_FORMAT_CURL_OFF_T " bytes received",
1201 Curl_timediff(newer: now, older: data->progress.t_startsingle),
1202 k->bytecount);
1203 }
1204 result = CURLE_OPERATION_TIMEDOUT;
1205 goto out;
1206 }
1207 }
1208 else {
1209 /*
1210 * The transfer has been performed. Just make some general checks before
1211 * returning.
1212 */
1213
1214 if(!(data->req.no_body) && (k->size != -1) &&
1215 (k->bytecount != k->size) &&
1216#ifdef CURL_DO_LINEEND_CONV
1217 /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1218 so we'll check to see if the discrepancy can be explained
1219 by the number of CRLFs we've changed to LFs.
1220 */
1221 (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1222#endif /* CURL_DO_LINEEND_CONV */
1223 !k->newurl) {
1224 failf(data, fmt: "transfer closed with %" CURL_FORMAT_CURL_OFF_T
1225 " bytes remaining to read", k->size - k->bytecount);
1226 result = CURLE_PARTIAL_FILE;
1227 goto out;
1228 }
1229 if(!(data->req.no_body) && k->chunk &&
1230 (conn->chunk.state != CHUNK_STOP)) {
1231 /*
1232 * In chunked mode, return an error if the connection is closed prior to
1233 * the empty (terminating) chunk is read.
1234 *
1235 * The condition above used to check for
1236 * conn->proto.http->chunk.datasize != 0 which is true after reading
1237 * *any* chunk, not just the empty chunk.
1238 *
1239 */
1240 failf(data, fmt: "transfer closed with outstanding read data remaining");
1241 result = CURLE_PARTIAL_FILE;
1242 goto out;
1243 }
1244 if(Curl_pgrsUpdate(data)) {
1245 result = CURLE_ABORTED_BY_CALLBACK;
1246 goto out;
1247 }
1248 }
1249
1250 /* Now update the "done" boolean we return */
1251 *done = (0 == (k->keepon&(KEEP_RECVBITS|KEEP_SENDBITS))) ? TRUE : FALSE;
1252out:
1253 if(result)
1254 DEBUGF(infof(data, "Curl_readwrite() -> %d", result));
1255 return result;
1256}
1257
1258/*
1259 * Curl_single_getsock() gets called by the multi interface code when the app
1260 * has requested to get the sockets for the current connection. This function
1261 * will then be called once for every connection that the multi interface
1262 * keeps track of. This function will only be called for connections that are
1263 * in the proper state to have this information available.
1264 */
1265int Curl_single_getsock(struct Curl_easy *data,
1266 struct connectdata *conn,
1267 curl_socket_t *sock)
1268{
1269 int bitmap = GETSOCK_BLANK;
1270 unsigned sockindex = 0;
1271
1272 if(conn->handler->perform_getsock)
1273 return conn->handler->perform_getsock(data, conn, sock);
1274
1275 /* don't include HOLD and PAUSE connections */
1276 if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1277
1278 DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1279
1280 bitmap |= GETSOCK_READSOCK(sockindex);
1281 sock[sockindex] = conn->sockfd;
1282 }
1283
1284 /* don't include HOLD and PAUSE connections */
1285 if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1286 if((conn->sockfd != conn->writesockfd) ||
1287 bitmap == GETSOCK_BLANK) {
1288 /* only if they are not the same socket and we have a readable
1289 one, we increase index */
1290 if(bitmap != GETSOCK_BLANK)
1291 sockindex++; /* increase index if we need two entries */
1292
1293 DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1294
1295 sock[sockindex] = conn->writesockfd;
1296 }
1297
1298 bitmap |= GETSOCK_WRITESOCK(sockindex);
1299 }
1300
1301 return bitmap;
1302}
1303
1304/* Curl_init_CONNECT() gets called each time the handle switches to CONNECT
1305 which means this gets called once for each subsequent redirect etc */
1306void Curl_init_CONNECT(struct Curl_easy *data)
1307{
1308 data->state.fread_func = data->set.fread_func_set;
1309 data->state.in = data->set.in_set;
1310 data->state.upload = (data->state.httpreq == HTTPREQ_PUT);
1311}
1312
1313/*
1314 * Curl_pretransfer() is called immediately before a transfer starts, and only
1315 * once for one transfer no matter if it has redirects or do multi-pass
1316 * authentication etc.
1317 */
1318CURLcode Curl_pretransfer(struct Curl_easy *data)
1319{
1320 CURLcode result;
1321
1322 if(!data->state.url && !data->set.uh) {
1323 /* we can't do anything without URL */
1324 failf(data, fmt: "No URL set");
1325 return CURLE_URL_MALFORMAT;
1326 }
1327
1328 /* since the URL may have been redirected in a previous use of this handle */
1329 if(data->state.url_alloc) {
1330 /* the already set URL is allocated, free it first! */
1331 Curl_safefree(data->state.url);
1332 data->state.url_alloc = FALSE;
1333 }
1334
1335 if(!data->state.url && data->set.uh) {
1336 CURLUcode uc;
1337 free(data->set.str[STRING_SET_URL]);
1338 uc = curl_url_get(handle: data->set.uh,
1339 what: CURLUPART_URL, part: &data->set.str[STRING_SET_URL], flags: 0);
1340 if(uc) {
1341 failf(data, fmt: "No URL set");
1342 return CURLE_URL_MALFORMAT;
1343 }
1344 }
1345
1346 if(data->set.postfields && data->set.set_resume_from) {
1347 /* we can't */
1348 failf(data, fmt: "cannot mix POSTFIELDS with RESUME_FROM");
1349 return CURLE_BAD_FUNCTION_ARGUMENT;
1350 }
1351
1352 data->state.prefer_ascii = data->set.prefer_ascii;
1353#ifdef CURL_LIST_ONLY_PROTOCOL
1354 data->state.list_only = data->set.list_only;
1355#endif
1356 data->state.httpreq = data->set.method;
1357 data->state.url = data->set.str[STRING_SET_URL];
1358
1359 /* Init the SSL session ID cache here. We do it here since we want to do it
1360 after the *_setopt() calls (that could specify the size of the cache) but
1361 before any transfer takes place. */
1362 result = Curl_ssl_initsessions(data, data->set.general_ssl.max_ssl_sessions);
1363 if(result)
1364 return result;
1365
1366 data->state.requests = 0;
1367 data->state.followlocation = 0; /* reset the location-follow counter */
1368 data->state.this_is_a_follow = FALSE; /* reset this */
1369 data->state.errorbuf = FALSE; /* no error has occurred */
1370 data->state.httpwant = data->set.httpwant;
1371 data->state.httpversion = 0;
1372 data->state.authproblem = FALSE;
1373 data->state.authhost.want = data->set.httpauth;
1374 data->state.authproxy.want = data->set.proxyauth;
1375 Curl_safefree(data->info.wouldredirect);
1376 Curl_data_priority_clear_state(data);
1377
1378 if(data->state.httpreq == HTTPREQ_PUT)
1379 data->state.infilesize = data->set.filesize;
1380 else if((data->state.httpreq != HTTPREQ_GET) &&
1381 (data->state.httpreq != HTTPREQ_HEAD)) {
1382 data->state.infilesize = data->set.postfieldsize;
1383 if(data->set.postfields && (data->state.infilesize == -1))
1384 data->state.infilesize = (curl_off_t)strlen(s: data->set.postfields);
1385 }
1386 else
1387 data->state.infilesize = 0;
1388
1389 /* If there is a list of cookie files to read, do it now! */
1390 Curl_cookie_loadfiles(data);
1391
1392 /* If there is a list of host pairs to deal with */
1393 if(data->state.resolve)
1394 result = Curl_loadhostpairs(data);
1395
1396 /* If there is a list of hsts files to read */
1397 Curl_hsts_loadfiles(data);
1398
1399 if(!result) {
1400 /* Allow data->set.use_port to set which port to use. This needs to be
1401 * disabled for example when we follow Location: headers to URLs using
1402 * different ports! */
1403 data->state.allow_port = TRUE;
1404
1405#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1406 /*************************************************************
1407 * Tell signal handler to ignore SIGPIPE
1408 *************************************************************/
1409 if(!data->set.no_signal)
1410 data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1411#endif
1412
1413 Curl_initinfo(data); /* reset session-specific information "variables" */
1414 Curl_pgrsResetTransferSizes(data);
1415 Curl_pgrsStartNow(data);
1416
1417 /* In case the handle is reused and an authentication method was picked
1418 in the session we need to make sure we only use the one(s) we now
1419 consider to be fine */
1420 data->state.authhost.picked &= data->state.authhost.want;
1421 data->state.authproxy.picked &= data->state.authproxy.want;
1422
1423#ifndef CURL_DISABLE_FTP
1424 data->state.wildcardmatch = data->set.wildcard_enabled;
1425 if(data->state.wildcardmatch) {
1426 struct WildcardData *wc;
1427 if(!data->wildcard) {
1428 data->wildcard = calloc(1, sizeof(struct WildcardData));
1429 if(!data->wildcard)
1430 return CURLE_OUT_OF_MEMORY;
1431 }
1432 wc = data->wildcard;
1433 if((wc->state < CURLWC_INIT) ||
1434 (wc->state >= CURLWC_CLEAN)) {
1435 if(wc->ftpwc)
1436 wc->dtor(wc->ftpwc);
1437 Curl_safefree(wc->pattern);
1438 Curl_safefree(wc->path);
1439 result = Curl_wildcard_init(wc); /* init wildcard structures */
1440 if(result)
1441 return CURLE_OUT_OF_MEMORY;
1442 }
1443 }
1444#endif
1445 result = Curl_hsts_loadcb(data, h: data->hsts);
1446 }
1447
1448 /*
1449 * Set user-agent. Used for HTTP, but since we can attempt to tunnel
1450 * basically anything through an HTTP proxy we can't limit this based on
1451 * protocol.
1452 */
1453 if(data->set.str[STRING_USERAGENT]) {
1454 Curl_safefree(data->state.aptr.uagent);
1455 data->state.aptr.uagent =
1456 aprintf(format: "User-Agent: %s\r\n", data->set.str[STRING_USERAGENT]);
1457 if(!data->state.aptr.uagent)
1458 return CURLE_OUT_OF_MEMORY;
1459 }
1460
1461 if(!result)
1462 result = Curl_setstropt(charp: &data->state.aptr.user,
1463 s: data->set.str[STRING_USERNAME]);
1464 if(!result)
1465 result = Curl_setstropt(charp: &data->state.aptr.passwd,
1466 s: data->set.str[STRING_PASSWORD]);
1467 if(!result)
1468 result = Curl_setstropt(charp: &data->state.aptr.proxyuser,
1469 s: data->set.str[STRING_PROXYUSERNAME]);
1470 if(!result)
1471 result = Curl_setstropt(charp: &data->state.aptr.proxypasswd,
1472 s: data->set.str[STRING_PROXYPASSWORD]);
1473
1474 data->req.headerbytecount = 0;
1475 Curl_headers_cleanup(data);
1476 return result;
1477}
1478
1479/*
1480 * Curl_posttransfer() is called immediately after a transfer ends
1481 */
1482CURLcode Curl_posttransfer(struct Curl_easy *data)
1483{
1484#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1485 /* restore the signal handler for SIGPIPE before we get back */
1486 if(!data->set.no_signal)
1487 signal(SIGPIPE, data->state.prev_signal);
1488#else
1489 (void)data; /* unused parameter */
1490#endif
1491
1492 return CURLE_OK;
1493}
1494
1495/*
1496 * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1497 * as given by the remote server and set up the new URL to request.
1498 *
1499 * This function DOES NOT FREE the given url.
1500 */
1501CURLcode Curl_follow(struct Curl_easy *data,
1502 char *newurl, /* the Location: string */
1503 followtype type) /* see transfer.h */
1504{
1505#ifdef CURL_DISABLE_HTTP
1506 (void)data;
1507 (void)newurl;
1508 (void)type;
1509 /* Location: following will not happen when HTTP is disabled */
1510 return CURLE_TOO_MANY_REDIRECTS;
1511#else
1512
1513 /* Location: redirect */
1514 bool disallowport = FALSE;
1515 bool reachedmax = FALSE;
1516 CURLUcode uc;
1517
1518 DEBUGASSERT(type != FOLLOW_NONE);
1519
1520 if(type != FOLLOW_FAKE)
1521 data->state.requests++; /* count all real follows */
1522 if(type == FOLLOW_REDIR) {
1523 if((data->set.maxredirs != -1) &&
1524 (data->state.followlocation >= data->set.maxredirs)) {
1525 reachedmax = TRUE;
1526 type = FOLLOW_FAKE; /* switch to fake to store the would-be-redirected
1527 to URL */
1528 }
1529 else {
1530 data->state.followlocation++; /* count redirect-followings, including
1531 auth reloads */
1532
1533 if(data->set.http_auto_referer) {
1534 CURLU *u;
1535 char *referer = NULL;
1536
1537 /* We are asked to automatically set the previous URL as the referer
1538 when we get the next URL. We pick the ->url field, which may or may
1539 not be 100% correct */
1540
1541 if(data->state.referer_alloc) {
1542 Curl_safefree(data->state.referer);
1543 data->state.referer_alloc = FALSE;
1544 }
1545
1546 /* Make a copy of the URL without credentials and fragment */
1547 u = curl_url();
1548 if(!u)
1549 return CURLE_OUT_OF_MEMORY;
1550
1551 uc = curl_url_set(handle: u, what: CURLUPART_URL, part: data->state.url, flags: 0);
1552 if(!uc)
1553 uc = curl_url_set(handle: u, what: CURLUPART_FRAGMENT, NULL, flags: 0);
1554 if(!uc)
1555 uc = curl_url_set(handle: u, what: CURLUPART_USER, NULL, flags: 0);
1556 if(!uc)
1557 uc = curl_url_set(handle: u, what: CURLUPART_PASSWORD, NULL, flags: 0);
1558 if(!uc)
1559 uc = curl_url_get(handle: u, what: CURLUPART_URL, part: &referer, flags: 0);
1560
1561 curl_url_cleanup(handle: u);
1562
1563 if(uc || !referer)
1564 return CURLE_OUT_OF_MEMORY;
1565
1566 data->state.referer = referer;
1567 data->state.referer_alloc = TRUE; /* yes, free this later */
1568 }
1569 }
1570 }
1571
1572 if((type != FOLLOW_RETRY) &&
1573 (data->req.httpcode != 401) && (data->req.httpcode != 407) &&
1574 Curl_is_absolute_url(url: newurl, NULL, buflen: 0, FALSE)) {
1575 /* If this is not redirect due to a 401 or 407 response and an absolute
1576 URL: don't allow a custom port number */
1577 disallowport = TRUE;
1578 }
1579
1580 DEBUGASSERT(data->state.uh);
1581 uc = curl_url_set(handle: data->state.uh, what: CURLUPART_URL, part: newurl,
1582 flags: (type == FOLLOW_FAKE) ? CURLU_NON_SUPPORT_SCHEME :
1583 ((type == FOLLOW_REDIR) ? CURLU_URLENCODE : 0) |
1584 CURLU_ALLOW_SPACE |
1585 (data->set.path_as_is ? CURLU_PATH_AS_IS : 0));
1586 if(uc) {
1587 if(type != FOLLOW_FAKE) {
1588 failf(data, fmt: "The redirect target URL could not be parsed: %s",
1589 curl_url_strerror(uc));
1590 return Curl_uc_to_curlcode(uc);
1591 }
1592
1593 /* the URL could not be parsed for some reason, but since this is FAKE
1594 mode, just duplicate the field as-is */
1595 newurl = strdup(newurl);
1596 if(!newurl)
1597 return CURLE_OUT_OF_MEMORY;
1598 }
1599 else {
1600 uc = curl_url_get(handle: data->state.uh, what: CURLUPART_URL, part: &newurl, flags: 0);
1601 if(uc)
1602 return Curl_uc_to_curlcode(uc);
1603
1604 /* Clear auth if this redirects to a different port number or protocol,
1605 unless permitted */
1606 if(!data->set.allow_auth_to_other_hosts && (type != FOLLOW_FAKE)) {
1607 char *portnum;
1608 int port;
1609 bool clear = FALSE;
1610
1611 if(data->set.use_port && data->state.allow_port)
1612 /* a custom port is used */
1613 port = (int)data->set.use_port;
1614 else {
1615 uc = curl_url_get(handle: data->state.uh, what: CURLUPART_PORT, part: &portnum,
1616 CURLU_DEFAULT_PORT);
1617 if(uc) {
1618 free(newurl);
1619 return Curl_uc_to_curlcode(uc);
1620 }
1621 port = atoi(nptr: portnum);
1622 free(portnum);
1623 }
1624 if(port != data->info.conn_remote_port) {
1625 infof(data, "Clear auth, redirects to port from %u to %u",
1626 data->info.conn_remote_port, port);
1627 clear = TRUE;
1628 }
1629 else {
1630 char *scheme;
1631 const struct Curl_handler *p;
1632 uc = curl_url_get(handle: data->state.uh, what: CURLUPART_SCHEME, part: &scheme, flags: 0);
1633 if(uc) {
1634 free(newurl);
1635 return Curl_uc_to_curlcode(uc);
1636 }
1637
1638 p = Curl_builtin_scheme(scheme, CURL_ZERO_TERMINATED);
1639 if(p && (p->protocol != data->info.conn_protocol)) {
1640 infof(data, "Clear auth, redirects scheme from %s to %s",
1641 data->info.conn_scheme, scheme);
1642 clear = TRUE;
1643 }
1644 free(scheme);
1645 }
1646 if(clear) {
1647 Curl_safefree(data->state.aptr.user);
1648 Curl_safefree(data->state.aptr.passwd);
1649 }
1650 }
1651 }
1652
1653 if(type == FOLLOW_FAKE) {
1654 /* we're only figuring out the new url if we would've followed locations
1655 but now we're done so we can get out! */
1656 data->info.wouldredirect = newurl;
1657
1658 if(reachedmax) {
1659 failf(data, fmt: "Maximum (%ld) redirects followed", data->set.maxredirs);
1660 return CURLE_TOO_MANY_REDIRECTS;
1661 }
1662 return CURLE_OK;
1663 }
1664
1665 if(disallowport)
1666 data->state.allow_port = FALSE;
1667
1668 if(data->state.url_alloc)
1669 Curl_safefree(data->state.url);
1670
1671 data->state.url = newurl;
1672 data->state.url_alloc = TRUE;
1673
1674 infof(data, "Issue another request to this URL: '%s'", data->state.url);
1675
1676 /*
1677 * We get here when the HTTP code is 300-399 (and 401). We need to perform
1678 * differently based on exactly what return code there was.
1679 *
1680 * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1681 * an HTTP (proxy-) authentication scheme other than Basic.
1682 */
1683 switch(data->info.httpcode) {
1684 /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1685 Authorization: XXXX header in the HTTP request code snippet */
1686 /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1687 Proxy-Authorization: XXXX header in the HTTP request code snippet */
1688 /* 300 - Multiple Choices */
1689 /* 306 - Not used */
1690 /* 307 - Temporary Redirect */
1691 default: /* for all above (and the unknown ones) */
1692 /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1693 * seem to be OK to POST to.
1694 */
1695 break;
1696 case 301: /* Moved Permanently */
1697 /* (quote from RFC7231, section 6.4.2)
1698 *
1699 * Note: For historical reasons, a user agent MAY change the request
1700 * method from POST to GET for the subsequent request. If this
1701 * behavior is undesired, the 307 (Temporary Redirect) status code
1702 * can be used instead.
1703 *
1704 * ----
1705 *
1706 * Many webservers expect this, so these servers often answers to a POST
1707 * request with an error page. To be sure that libcurl gets the page that
1708 * most user agents would get, libcurl has to force GET.
1709 *
1710 * This behavior is forbidden by RFC1945 and the obsolete RFC2616, and
1711 * can be overridden with CURLOPT_POSTREDIR.
1712 */
1713 if((data->state.httpreq == HTTPREQ_POST
1714 || data->state.httpreq == HTTPREQ_POST_FORM
1715 || data->state.httpreq == HTTPREQ_POST_MIME)
1716 && !(data->set.keep_post & CURL_REDIR_POST_301)) {
1717 infof(data, "Switch from POST to GET");
1718 data->state.httpreq = HTTPREQ_GET;
1719 }
1720 break;
1721 case 302: /* Found */
1722 /* (quote from RFC7231, section 6.4.3)
1723 *
1724 * Note: For historical reasons, a user agent MAY change the request
1725 * method from POST to GET for the subsequent request. If this
1726 * behavior is undesired, the 307 (Temporary Redirect) status code
1727 * can be used instead.
1728 *
1729 * ----
1730 *
1731 * Many webservers expect this, so these servers often answers to a POST
1732 * request with an error page. To be sure that libcurl gets the page that
1733 * most user agents would get, libcurl has to force GET.
1734 *
1735 * This behavior is forbidden by RFC1945 and the obsolete RFC2616, and
1736 * can be overridden with CURLOPT_POSTREDIR.
1737 */
1738 if((data->state.httpreq == HTTPREQ_POST
1739 || data->state.httpreq == HTTPREQ_POST_FORM
1740 || data->state.httpreq == HTTPREQ_POST_MIME)
1741 && !(data->set.keep_post & CURL_REDIR_POST_302)) {
1742 infof(data, "Switch from POST to GET");
1743 data->state.httpreq = HTTPREQ_GET;
1744 }
1745 break;
1746
1747 case 303: /* See Other */
1748 /* 'See Other' location is not the resource but a substitute for the
1749 * resource. In this case we switch the method to GET/HEAD, unless the
1750 * method is POST and the user specified to keep it as POST.
1751 * https://github.com/curl/curl/issues/5237#issuecomment-614641049
1752 */
1753 if(data->state.httpreq != HTTPREQ_GET &&
1754 ((data->state.httpreq != HTTPREQ_POST &&
1755 data->state.httpreq != HTTPREQ_POST_FORM &&
1756 data->state.httpreq != HTTPREQ_POST_MIME) ||
1757 !(data->set.keep_post & CURL_REDIR_POST_303))) {
1758 data->state.httpreq = HTTPREQ_GET;
1759 infof(data, "Switch to %s",
1760 data->req.no_body?"HEAD":"GET");
1761 }
1762 break;
1763 case 304: /* Not Modified */
1764 /* 304 means we did a conditional request and it was "Not modified".
1765 * We shouldn't get any Location: header in this response!
1766 */
1767 break;
1768 case 305: /* Use Proxy */
1769 /* (quote from RFC2616, section 10.3.6):
1770 * "The requested resource MUST be accessed through the proxy given
1771 * by the Location field. The Location field gives the URI of the
1772 * proxy. The recipient is expected to repeat this single request
1773 * via the proxy. 305 responses MUST only be generated by origin
1774 * servers."
1775 */
1776 break;
1777 }
1778 Curl_pgrsTime(data, timer: TIMER_REDIRECT);
1779 Curl_pgrsResetTransferSizes(data);
1780
1781 return CURLE_OK;
1782#endif /* CURL_DISABLE_HTTP */
1783}
1784
1785/* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1786
1787 NOTE: that the *url is malloc()ed. */
1788CURLcode Curl_retry_request(struct Curl_easy *data, char **url)
1789{
1790 struct connectdata *conn = data->conn;
1791 bool retry = FALSE;
1792 *url = NULL;
1793
1794 /* if we're talking upload, we can't do the checks below, unless the protocol
1795 is HTTP as when uploading over HTTP we will still get a response */
1796 if(data->state.upload &&
1797 !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
1798 return CURLE_OK;
1799
1800 if((data->req.bytecount + data->req.headerbytecount == 0) &&
1801 conn->bits.reuse &&
1802 (!data->req.no_body || (conn->handler->protocol & PROTO_FAMILY_HTTP))
1803#ifndef CURL_DISABLE_RTSP
1804 && (data->set.rtspreq != RTSPREQ_RECEIVE)
1805#endif
1806 )
1807 /* We got no data, we attempted to reuse a connection. For HTTP this
1808 can be a retry so we try again regardless if we expected a body.
1809 For other protocols we only try again only if we expected a body.
1810
1811 This might happen if the connection was left alive when we were
1812 done using it before, but that was closed when we wanted to read from
1813 it again. Bad luck. Retry the same request on a fresh connect! */
1814 retry = TRUE;
1815 else if(data->state.refused_stream &&
1816 (data->req.bytecount + data->req.headerbytecount == 0) ) {
1817 /* This was sent on a refused stream, safe to rerun. A refused stream
1818 error can typically only happen on HTTP/2 level if the stream is safe
1819 to issue again, but the nghttp2 API can deliver the message to other
1820 streams as well, which is why this adds the check the data counters
1821 too. */
1822 infof(data, "REFUSED_STREAM, retrying a fresh connect");
1823 data->state.refused_stream = FALSE; /* clear again */
1824 retry = TRUE;
1825 }
1826 if(retry) {
1827#define CONN_MAX_RETRIES 5
1828 if(data->state.retrycount++ >= CONN_MAX_RETRIES) {
1829 failf(data, fmt: "Connection died, tried %d times before giving up",
1830 CONN_MAX_RETRIES);
1831 data->state.retrycount = 0;
1832 return CURLE_SEND_ERROR;
1833 }
1834 infof(data, "Connection died, retrying a fresh connect (retry count: %d)",
1835 data->state.retrycount);
1836 *url = strdup(data->state.url);
1837 if(!*url)
1838 return CURLE_OUT_OF_MEMORY;
1839
1840 connclose(conn, "retry"); /* close this connection */
1841 conn->bits.retry = TRUE; /* mark this as a connection we're about
1842 to retry. Marking it this way should
1843 prevent i.e HTTP transfers to return
1844 error just because nothing has been
1845 transferred! */
1846
1847
1848 if((conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1849 data->req.writebytecount) {
1850 data->state.rewindbeforesend = TRUE;
1851 infof(data, "state.rewindbeforesend = TRUE");
1852 }
1853 }
1854 return CURLE_OK;
1855}
1856
1857/*
1858 * Curl_setup_transfer() is called to setup some basic properties for the
1859 * upcoming transfer.
1860 */
1861void
1862Curl_setup_transfer(
1863 struct Curl_easy *data, /* transfer */
1864 int sockindex, /* socket index to read from or -1 */
1865 curl_off_t size, /* -1 if unknown at this point */
1866 bool getheader, /* TRUE if header parsing is wanted */
1867 int writesockindex /* socket index to write to, it may very well be
1868 the same we read from. -1 disables */
1869 )
1870{
1871 struct SingleRequest *k = &data->req;
1872 struct connectdata *conn = data->conn;
1873 struct HTTP *http = data->req.p.http;
1874 bool httpsending;
1875
1876 DEBUGASSERT(conn != NULL);
1877 DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
1878
1879 httpsending = ((conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1880 (http->sending == HTTPSEND_REQUEST));
1881
1882 if(conn->bits.multiplex || conn->httpversion >= 20 || httpsending) {
1883 /* when multiplexing, the read/write sockets need to be the same! */
1884 conn->sockfd = sockindex == -1 ?
1885 ((writesockindex == -1 ? CURL_SOCKET_BAD : conn->sock[writesockindex])) :
1886 conn->sock[sockindex];
1887 conn->writesockfd = conn->sockfd;
1888 if(httpsending)
1889 /* special and very HTTP-specific */
1890 writesockindex = FIRSTSOCKET;
1891 }
1892 else {
1893 conn->sockfd = sockindex == -1 ?
1894 CURL_SOCKET_BAD : conn->sock[sockindex];
1895 conn->writesockfd = writesockindex == -1 ?
1896 CURL_SOCKET_BAD:conn->sock[writesockindex];
1897 }
1898 k->getheader = getheader;
1899
1900 k->size = size;
1901
1902 /* The code sequence below is placed in this function just because all
1903 necessary input is not always known in do_complete() as this function may
1904 be called after that */
1905
1906 if(!k->getheader) {
1907 k->header = FALSE;
1908 if(size > 0)
1909 Curl_pgrsSetDownloadSize(data, size);
1910 }
1911 /* we want header and/or body, if neither then don't do this! */
1912 if(k->getheader || !data->req.no_body) {
1913
1914 if(sockindex != -1)
1915 k->keepon |= KEEP_RECV;
1916
1917 if(writesockindex != -1) {
1918 /* HTTP 1.1 magic:
1919
1920 Even if we require a 100-return code before uploading data, we might
1921 need to write data before that since the REQUEST may not have been
1922 finished sent off just yet.
1923
1924 Thus, we must check if the request has been sent before we set the
1925 state info where we wait for the 100-return code
1926 */
1927 if((data->state.expect100header) &&
1928 (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
1929 (http->sending == HTTPSEND_BODY)) {
1930 /* wait with write until we either got 100-continue or a timeout */
1931 k->exp100 = EXP100_AWAITING_CONTINUE;
1932 k->start100 = Curl_now();
1933
1934 /* Set a timeout for the multi interface. Add the inaccuracy margin so
1935 that we don't fire slightly too early and get denied to run. */
1936 Curl_expire(data, milli: data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
1937 }
1938 else {
1939 if(data->state.expect100header)
1940 /* when we've sent off the rest of the headers, we must await a
1941 100-continue but first finish sending the request */
1942 k->exp100 = EXP100_SENDING_REQUEST;
1943
1944 /* enable the write bit when we're not waiting for continue */
1945 k->keepon |= KEEP_SEND;
1946 }
1947 } /* if(writesockindex != -1) */
1948 } /* if(k->getheader || !data->req.no_body) */
1949
1950}
1951