2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
24 * Changes: Pedro Roque : Retransmit queue handled by TCP.
25 * : Fragmentation on mtu decrease
26 * : Segment collapse on retransmit
29 * Linus Torvalds : send_delayed_ack
30 * David S. Miller : Charge memory using the right skb
31 * during syn/ack processing.
32 * David S. Miller : Output engine completely rewritten.
33 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr.
34 * Cacophonix Gaul : draft-minshall-nagle-01
35 * J Hadi Salim : ECN support
41 #include <linux/compiler.h>
42 #include <linux/module.h>
44 /* People can turn this off for buggy TCP's found in printers etc. */
45 int sysctl_tcp_retrans_collapse __read_mostly = 1;
47 /* People can turn this on to work with those rare, broken TCPs that
48 * interpret the window field as a signed quantity.
50 int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
52 /* This limits the percentage of the congestion window which we
53 * will allow a single TSO frame to consume. Building TSO frames
54 * which are too large can cause TCP streams to be bursty.
56 int sysctl_tcp_tso_win_divisor __read_mostly = 3;
58 int sysctl_tcp_mtu_probing __read_mostly = 0;
59 int sysctl_tcp_base_mss __read_mostly = 512;
61 /* By default, RFC2861 behavior. */
62 int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
64 static void update_send_head(struct sock *sk, struct sk_buff *skb)
66 struct tcp_sock *tp = tcp_sk(sk);
68 tcp_advance_send_head(sk, skb);
69 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
70 tcp_packets_out_inc(sk, skb);
73 /* SND.NXT, if window was not shrunk.
74 * If window has been shrunk, what should we make? It is not clear at all.
75 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
76 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
77 * invalid. OK, let's make this for now:
79 static inline __u32 tcp_acceptable_seq(struct sock *sk)
81 struct tcp_sock *tp = tcp_sk(sk);
83 if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
86 return tp->snd_una+tp->snd_wnd;
89 /* Calculate mss to advertise in SYN segment.
90 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
92 * 1. It is independent of path mtu.
93 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
94 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
95 * attached devices, because some buggy hosts are confused by
97 * 4. We do not make 3, we advertise MSS, calculated from first
98 * hop device mtu, but allow to raise it to ip_rt_min_advmss.
99 * This may be overridden via information stored in routing table.
100 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
101 * probably even Jumbo".
103 static __u16 tcp_advertise_mss(struct sock *sk)
105 struct tcp_sock *tp = tcp_sk(sk);
106 struct dst_entry *dst = __sk_dst_get(sk);
107 int mss = tp->advmss;
109 if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) {
110 mss = dst_metric(dst, RTAX_ADVMSS);
117 /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
118 * This is the first part of cwnd validation mechanism. */
119 static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
121 struct tcp_sock *tp = tcp_sk(sk);
122 s32 delta = tcp_time_stamp - tp->lsndtime;
123 u32 restart_cwnd = tcp_init_cwnd(tp, dst);
124 u32 cwnd = tp->snd_cwnd;
126 tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
128 tp->snd_ssthresh = tcp_current_ssthresh(sk);
129 restart_cwnd = min(restart_cwnd, cwnd);
131 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
133 tp->snd_cwnd = max(cwnd, restart_cwnd);
134 tp->snd_cwnd_stamp = tcp_time_stamp;
135 tp->snd_cwnd_used = 0;
138 static void tcp_event_data_sent(struct tcp_sock *tp,
139 struct sk_buff *skb, struct sock *sk)
141 struct inet_connection_sock *icsk = inet_csk(sk);
142 const u32 now = tcp_time_stamp;
144 if (sysctl_tcp_slow_start_after_idle &&
145 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
146 tcp_cwnd_restart(sk, __sk_dst_get(sk));
150 /* If it is a reply for ato after last received
151 * packet, enter pingpong mode.
153 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
154 icsk->icsk_ack.pingpong = 1;
157 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
159 tcp_dec_quickack_mode(sk, pkts);
160 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
163 /* Determine a window scaling and initial window to offer.
164 * Based on the assumption that the given amount of space
165 * will be offered. Store the results in the tp structure.
166 * NOTE: for smooth operation initial space offering should
167 * be a multiple of mss if possible. We assume here that mss >= 1.
168 * This MUST be enforced by all callers.
170 void tcp_select_initial_window(int __space, __u32 mss,
171 __u32 *rcv_wnd, __u32 *window_clamp,
172 int wscale_ok, __u8 *rcv_wscale)
174 unsigned int space = (__space < 0 ? 0 : __space);
176 /* If no clamp set the clamp to the max possible scaled window */
177 if (*window_clamp == 0)
178 (*window_clamp) = (65535 << 14);
179 space = min(*window_clamp, space);
181 /* Quantize space offering to a multiple of mss if possible. */
183 space = (space / mss) * mss;
185 /* NOTE: offering an initial window larger than 32767
186 * will break some buggy TCP stacks. If the admin tells us
187 * it is likely we could be speaking with such a buggy stack
188 * we will truncate our initial window offering to 32K-1
189 * unless the remote has sent us a window scaling option,
190 * which we interpret as a sign the remote TCP is not
191 * misinterpreting the window field as a signed quantity.
193 if (sysctl_tcp_workaround_signed_windows)
194 (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
200 /* Set window scaling on max possible window
201 * See RFC1323 for an explanation of the limit to 14
203 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
204 space = min_t(u32, space, *window_clamp);
205 while (space > 65535 && (*rcv_wscale) < 14) {
211 /* Set initial window to value enough for senders,
212 * following RFC2414. Senders, not following this RFC,
213 * will be satisfied with 2.
215 if (mss > (1<<*rcv_wscale)) {
221 if (*rcv_wnd > init_cwnd*mss)
222 *rcv_wnd = init_cwnd*mss;
225 /* Set the clamp no higher than max representable value */
226 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
229 /* Chose a new window to advertise, update state in tcp_sock for the
230 * socket, and return result with RFC1323 scaling applied. The return
231 * value can be stuffed directly into th->window for an outgoing
234 static u16 tcp_select_window(struct sock *sk)
236 struct tcp_sock *tp = tcp_sk(sk);
237 u32 cur_win = tcp_receive_window(tp);
238 u32 new_win = __tcp_select_window(sk);
240 /* Never shrink the offered window */
241 if (new_win < cur_win) {
242 /* Danger Will Robinson!
243 * Don't update rcv_wup/rcv_wnd here or else
244 * we will not be able to advertise a zero
245 * window in time. --DaveM
247 * Relax Will Robinson.
251 tp->rcv_wnd = new_win;
252 tp->rcv_wup = tp->rcv_nxt;
254 /* Make sure we do not exceed the maximum possible
257 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
258 new_win = min(new_win, MAX_TCP_WINDOW);
260 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
262 /* RFC1323 scaling applied */
263 new_win >>= tp->rx_opt.rcv_wscale;
265 /* If we advertise zero window, disable fast path. */
272 static inline void TCP_ECN_send_synack(struct tcp_sock *tp,
275 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
276 if (!(tp->ecn_flags&TCP_ECN_OK))
277 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
280 static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
282 struct tcp_sock *tp = tcp_sk(sk);
285 if (sysctl_tcp_ecn) {
286 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR;
287 tp->ecn_flags = TCP_ECN_OK;
291 static __inline__ void
292 TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
294 if (inet_rsk(req)->ecn_ok)
298 static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
301 struct tcp_sock *tp = tcp_sk(sk);
303 if (tp->ecn_flags & TCP_ECN_OK) {
304 /* Not-retransmitted data segment: set ECT and inject CWR. */
305 if (skb->len != tcp_header_len &&
306 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
308 if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) {
309 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
310 tcp_hdr(skb)->cwr = 1;
311 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
314 /* ACK or retransmitted segment: clear ECT|CE */
315 INET_ECN_dontxmit(sk);
317 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
318 tcp_hdr(skb)->ece = 1;
322 static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
323 __u32 tstamp, __u8 **md5_hash)
325 if (tp->rx_opt.tstamp_ok) {
326 *ptr++ = htonl((TCPOPT_NOP << 24) |
328 (TCPOPT_TIMESTAMP << 8) |
330 *ptr++ = htonl(tstamp);
331 *ptr++ = htonl(tp->rx_opt.ts_recent);
333 if (tp->rx_opt.eff_sacks) {
334 struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
337 *ptr++ = htonl((TCPOPT_NOP << 24) |
340 (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks *
341 TCPOLEN_SACK_PERBLOCK)));
343 for (this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
344 *ptr++ = htonl(sp[this_sack].start_seq);
345 *ptr++ = htonl(sp[this_sack].end_seq);
348 if (tp->rx_opt.dsack) {
349 tp->rx_opt.dsack = 0;
350 tp->rx_opt.eff_sacks--;
353 #ifdef CONFIG_TCP_MD5SIG
355 *ptr++ = htonl((TCPOPT_NOP << 24) |
357 (TCPOPT_MD5SIG << 8) |
359 *md5_hash = (__u8 *)ptr;
364 /* Construct a tcp options header for a SYN or SYN_ACK packet.
365 * If this is every changed make sure to change the definition of
366 * MAX_SYN_SIZE to match the new maximum number of options that you
369 * Note - that with the RFC2385 TCP option, we make room for the
370 * 16 byte MD5 hash. This will be filled in later, so the pointer for the
371 * location to be filled is passed back up.
373 static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
374 int offer_wscale, int wscale, __u32 tstamp,
375 __u32 ts_recent, __u8 **md5_hash)
377 /* We always get an MSS option.
378 * The option bytes which will be seen in normal data
379 * packets should timestamps be used, must be in the MSS
380 * advertised. But we subtract them from tp->mss_cache so
381 * that calculations in tcp_sendmsg are simpler etc.
382 * So account for this fact here if necessary. If we
383 * don't do this correctly, as a receiver we won't
384 * recognize data packets as being full sized when we
385 * should, and thus we won't abide by the delayed ACK
387 * SACKs don't matter, we never delay an ACK when we
388 * have any of those going out.
390 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
393 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
394 (TCPOLEN_SACK_PERM << 16) |
395 (TCPOPT_TIMESTAMP << 8) |
398 *ptr++ = htonl((TCPOPT_NOP << 24) |
400 (TCPOPT_TIMESTAMP << 8) |
402 *ptr++ = htonl(tstamp); /* TSVAL */
403 *ptr++ = htonl(ts_recent); /* TSECR */
405 *ptr++ = htonl((TCPOPT_NOP << 24) |
407 (TCPOPT_SACK_PERM << 8) |
410 *ptr++ = htonl((TCPOPT_NOP << 24) |
411 (TCPOPT_WINDOW << 16) |
412 (TCPOLEN_WINDOW << 8) |
414 #ifdef CONFIG_TCP_MD5SIG
416 * If MD5 is enabled, then we set the option, and include the size
417 * (always 18). The actual MD5 hash is added just before the
421 *ptr++ = htonl((TCPOPT_NOP << 24) |
423 (TCPOPT_MD5SIG << 8) |
425 *md5_hash = (__u8 *) ptr;
430 /* This routine actually transmits TCP packets queued in by
431 * tcp_do_sendmsg(). This is used by both the initial
432 * transmission and possible later retransmissions.
433 * All SKB's seen here are completely headerless. It is our
434 * job to build the TCP header, and pass the packet down to
435 * IP so it can do the same plus pass the packet off to the
438 * We are working here with either a clone of the original
439 * SKB, or a fresh unique copy made by the retransmit engine.
441 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask)
443 const struct inet_connection_sock *icsk = inet_csk(sk);
444 struct inet_sock *inet;
446 struct tcp_skb_cb *tcb;
448 #ifdef CONFIG_TCP_MD5SIG
449 struct tcp_md5sig_key *md5;
450 __u8 *md5_hash_location;
456 BUG_ON(!skb || !tcp_skb_pcount(skb));
458 /* If congestion control is doing timestamping, we must
459 * take such a timestamp before we potentially clone/copy.
461 if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
462 __net_timestamp(skb);
464 if (likely(clone_it)) {
465 if (unlikely(skb_cloned(skb)))
466 skb = pskb_copy(skb, gfp_mask);
468 skb = skb_clone(skb, gfp_mask);
475 tcb = TCP_SKB_CB(skb);
476 tcp_header_size = tp->tcp_header_len;
478 #define SYSCTL_FLAG_TSTAMPS 0x1
479 #define SYSCTL_FLAG_WSCALE 0x2
480 #define SYSCTL_FLAG_SACK 0x4
483 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
484 tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS;
485 if (sysctl_tcp_timestamps) {
486 tcp_header_size += TCPOLEN_TSTAMP_ALIGNED;
487 sysctl_flags |= SYSCTL_FLAG_TSTAMPS;
489 if (sysctl_tcp_window_scaling) {
490 tcp_header_size += TCPOLEN_WSCALE_ALIGNED;
491 sysctl_flags |= SYSCTL_FLAG_WSCALE;
493 if (sysctl_tcp_sack) {
494 sysctl_flags |= SYSCTL_FLAG_SACK;
495 if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS))
496 tcp_header_size += TCPOLEN_SACKPERM_ALIGNED;
498 } else if (unlikely(tp->rx_opt.eff_sacks)) {
499 /* A SACK is 2 pad bytes, a 2 byte header, plus
500 * 2 32-bit sequence numbers for each SACK block.
502 tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED +
503 (tp->rx_opt.eff_sacks *
504 TCPOLEN_SACK_PERBLOCK));
507 if (tcp_packets_in_flight(tp) == 0)
508 tcp_ca_event(sk, CA_EVENT_TX_START);
510 #ifdef CONFIG_TCP_MD5SIG
512 * Are we doing MD5 on this segment? If so - make
515 md5 = tp->af_specific->md5_lookup(sk, sk);
517 tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
520 skb_push(skb, tcp_header_size);
521 skb_reset_transport_header(skb);
522 skb_set_owner_w(skb, sk);
524 /* Build TCP header and checksum it. */
526 th->source = inet->sport;
527 th->dest = inet->dport;
528 th->seq = htonl(tcb->seq);
529 th->ack_seq = htonl(tp->rcv_nxt);
530 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
533 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
534 /* RFC1323: The window in SYN & SYN/ACK segments
537 th->window = htons(min(tp->rcv_wnd, 65535U));
539 th->window = htons(tcp_select_window(sk));
544 if (unlikely(tp->urg_mode &&
545 between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF))) {
546 th->urg_ptr = htons(tp->snd_up-tcb->seq);
550 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
551 tcp_syn_build_options((__be32 *)(th + 1),
552 tcp_advertise_mss(sk),
553 (sysctl_flags & SYSCTL_FLAG_TSTAMPS),
554 (sysctl_flags & SYSCTL_FLAG_SACK),
555 (sysctl_flags & SYSCTL_FLAG_WSCALE),
556 tp->rx_opt.rcv_wscale,
558 tp->rx_opt.ts_recent,
560 #ifdef CONFIG_TCP_MD5SIG
561 md5 ? &md5_hash_location :
565 tcp_build_and_update_options((__be32 *)(th + 1),
567 #ifdef CONFIG_TCP_MD5SIG
568 md5 ? &md5_hash_location :
571 TCP_ECN_send(sk, skb, tcp_header_size);
574 #ifdef CONFIG_TCP_MD5SIG
575 /* Calculate the MD5 hash, as we have all we need now */
577 tp->af_specific->calc_md5_hash(md5_hash_location,
586 icsk->icsk_af_ops->send_check(sk, skb->len, skb);
588 if (likely(tcb->flags & TCPCB_FLAG_ACK))
589 tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
591 if (skb->len != tcp_header_size)
592 tcp_event_data_sent(tp, skb, sk);
594 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
595 TCP_INC_STATS(TCP_MIB_OUTSEGS);
597 err = icsk->icsk_af_ops->queue_xmit(skb, 0);
598 if (likely(err <= 0))
601 tcp_enter_cwr(sk, 1);
603 return net_xmit_eval(err);
605 #undef SYSCTL_FLAG_TSTAMPS
606 #undef SYSCTL_FLAG_WSCALE
607 #undef SYSCTL_FLAG_SACK
611 /* This routine just queue's the buffer
613 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
614 * otherwise socket can stall.
616 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
618 struct tcp_sock *tp = tcp_sk(sk);
620 /* Advance write_seq and place onto the write_queue. */
621 tp->write_seq = TCP_SKB_CB(skb)->end_seq;
622 skb_header_release(skb);
623 tcp_add_write_queue_tail(sk, skb);
624 sk_charge_skb(sk, skb);
627 static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
629 if (skb->len <= mss_now || !sk_can_gso(sk)) {
630 /* Avoid the costly divide in the normal
633 skb_shinfo(skb)->gso_segs = 1;
634 skb_shinfo(skb)->gso_size = 0;
635 skb_shinfo(skb)->gso_type = 0;
639 factor = skb->len + (mss_now - 1);
641 skb_shinfo(skb)->gso_segs = factor;
642 skb_shinfo(skb)->gso_size = mss_now;
643 skb_shinfo(skb)->gso_type = sk->sk_gso_type;
647 /* Function to create two new TCP segments. Shrinks the given segment
648 * to the specified size and appends a new segment with the rest of the
649 * packet to the list. This won't be called frequently, I hope.
650 * Remember, these are still headerless SKBs at this point.
652 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
654 struct tcp_sock *tp = tcp_sk(sk);
655 struct sk_buff *buff;
656 int nsize, old_factor;
660 BUG_ON(len > skb->len);
662 clear_all_retrans_hints(tp);
663 nsize = skb_headlen(skb) - len;
667 if (skb_cloned(skb) &&
668 skb_is_nonlinear(skb) &&
669 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
672 /* Get a new skb... force flag on. */
673 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
675 return -ENOMEM; /* We'll just try again later. */
677 sk_charge_skb(sk, buff);
678 nlen = skb->len - len - nsize;
679 buff->truesize += nlen;
680 skb->truesize -= nlen;
682 /* Correct the sequence numbers. */
683 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
684 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
685 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
687 /* PSH and FIN should only be set in the second packet. */
688 flags = TCP_SKB_CB(skb)->flags;
689 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
690 TCP_SKB_CB(buff)->flags = flags;
691 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
692 TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL;
694 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
695 /* Copy and checksum data tail into the new buffer. */
696 buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize),
701 skb->csum = csum_block_sub(skb->csum, buff->csum, len);
703 skb->ip_summed = CHECKSUM_PARTIAL;
704 skb_split(skb, buff, len);
707 buff->ip_summed = skb->ip_summed;
709 /* Looks stupid, but our code really uses when of
710 * skbs, which it never sent before. --ANK
712 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
713 buff->tstamp = skb->tstamp;
715 old_factor = tcp_skb_pcount(skb);
717 /* Fix up tso_factor for both original and new SKB. */
718 tcp_set_skb_tso_segs(sk, skb, mss_now);
719 tcp_set_skb_tso_segs(sk, buff, mss_now);
721 /* If this packet has been sent out already, we must
722 * adjust the various packet counters.
724 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
725 int diff = old_factor - tcp_skb_pcount(skb) -
726 tcp_skb_pcount(buff);
728 tp->packets_out -= diff;
730 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
731 tp->sacked_out -= diff;
732 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
733 tp->retrans_out -= diff;
735 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
736 tp->lost_out -= diff;
737 tp->left_out -= diff;
741 /* Adjust Reno SACK estimate. */
742 if (!tp->rx_opt.sack_ok) {
743 tcp_dec_pcount_approx_int(&tp->sacked_out, diff);
744 tcp_sync_left_out(tp);
747 tcp_dec_pcount_approx_int(&tp->fackets_out, diff);
748 /* SACK fastpath might overwrite it unless dealt with */
749 if (tp->fastpath_skb_hint != NULL &&
750 after(TCP_SKB_CB(tp->fastpath_skb_hint)->seq,
751 TCP_SKB_CB(skb)->seq)) {
752 tcp_dec_pcount_approx_int(&tp->fastpath_cnt_hint, diff);
757 /* Link BUFF into the send queue. */
758 skb_header_release(buff);
759 tcp_insert_write_queue_after(skb, buff, sk);
764 /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
765 * eventually). The difference is that pulled data not copied, but
766 * immediately discarded.
768 static void __pskb_trim_head(struct sk_buff *skb, int len)
774 for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
775 if (skb_shinfo(skb)->frags[i].size <= eat) {
776 put_page(skb_shinfo(skb)->frags[i].page);
777 eat -= skb_shinfo(skb)->frags[i].size;
779 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
781 skb_shinfo(skb)->frags[k].page_offset += eat;
782 skb_shinfo(skb)->frags[k].size -= eat;
788 skb_shinfo(skb)->nr_frags = k;
790 skb_reset_tail_pointer(skb);
791 skb->data_len -= len;
792 skb->len = skb->data_len;
795 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
797 if (skb_cloned(skb) &&
798 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
801 /* If len == headlen, we avoid __skb_pull to preserve alignment. */
802 if (unlikely(len < skb_headlen(skb)))
803 __skb_pull(skb, len);
805 __pskb_trim_head(skb, len - skb_headlen(skb));
807 TCP_SKB_CB(skb)->seq += len;
808 skb->ip_summed = CHECKSUM_PARTIAL;
810 skb->truesize -= len;
811 sk->sk_wmem_queued -= len;
812 sk->sk_forward_alloc += len;
813 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
815 /* Any change of skb->len requires recalculation of tso
818 if (tcp_skb_pcount(skb) > 1)
819 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1));
824 /* Not accounting for SACKs here. */
825 int tcp_mtu_to_mss(struct sock *sk, int pmtu)
827 struct tcp_sock *tp = tcp_sk(sk);
828 struct inet_connection_sock *icsk = inet_csk(sk);
831 /* Calculate base mss without TCP options:
832 It is MMS_S - sizeof(tcphdr) of rfc1122
834 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
836 /* Clamp it (mss_clamp does not include tcp options) */
837 if (mss_now > tp->rx_opt.mss_clamp)
838 mss_now = tp->rx_opt.mss_clamp;
840 /* Now subtract optional transport overhead */
841 mss_now -= icsk->icsk_ext_hdr_len;
843 /* Then reserve room for full set of TCP options and 8 bytes of data */
847 /* Now subtract TCP options size, not including SACKs */
848 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
853 /* Inverse of above */
854 int tcp_mss_to_mtu(struct sock *sk, int mss)
856 struct tcp_sock *tp = tcp_sk(sk);
857 struct inet_connection_sock *icsk = inet_csk(sk);
862 icsk->icsk_ext_hdr_len +
863 icsk->icsk_af_ops->net_header_len;
868 void tcp_mtup_init(struct sock *sk)
870 struct tcp_sock *tp = tcp_sk(sk);
871 struct inet_connection_sock *icsk = inet_csk(sk);
873 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
874 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
875 icsk->icsk_af_ops->net_header_len;
876 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
877 icsk->icsk_mtup.probe_size = 0;
880 /* This function synchronize snd mss to current pmtu/exthdr set.
882 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
883 for TCP options, but includes only bare TCP header.
885 tp->rx_opt.mss_clamp is mss negotiated at connection setup.
886 It is minimum of user_mss and mss received with SYN.
887 It also does not include TCP options.
889 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
891 tp->mss_cache is current effective sending mss, including
892 all tcp options except for SACKs. It is evaluated,
893 taking into account current pmtu, but never exceeds
894 tp->rx_opt.mss_clamp.
896 NOTE1. rfc1122 clearly states that advertised MSS
897 DOES NOT include either tcp or ip options.
899 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
900 are READ ONLY outside this function. --ANK (980731)
903 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
905 struct tcp_sock *tp = tcp_sk(sk);
906 struct inet_connection_sock *icsk = inet_csk(sk);
909 if (icsk->icsk_mtup.search_high > pmtu)
910 icsk->icsk_mtup.search_high = pmtu;
912 mss_now = tcp_mtu_to_mss(sk, pmtu);
914 /* Bound mss with half of window */
915 if (tp->max_window && mss_now > (tp->max_window>>1))
916 mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len);
918 /* And store cached results */
919 icsk->icsk_pmtu_cookie = pmtu;
920 if (icsk->icsk_mtup.enabled)
921 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
922 tp->mss_cache = mss_now;
927 /* Compute the current effective MSS, taking SACKs and IP options,
928 * and even PMTU discovery events into account.
930 * LARGESEND note: !urg_mode is overkill, only frames up to snd_up
931 * cannot be large. However, taking into account rare use of URG, this
934 unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
936 struct tcp_sock *tp = tcp_sk(sk);
937 struct dst_entry *dst = __sk_dst_get(sk);
942 mss_now = tp->mss_cache;
944 if (large_allowed && sk_can_gso(sk) && !tp->urg_mode)
948 u32 mtu = dst_mtu(dst);
949 if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
950 mss_now = tcp_sync_mss(sk, mtu);
953 if (tp->rx_opt.eff_sacks)
954 mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
955 (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
957 #ifdef CONFIG_TCP_MD5SIG
958 if (tp->af_specific->md5_lookup(sk, sk))
959 mss_now -= TCPOLEN_MD5SIG_ALIGNED;
962 xmit_size_goal = mss_now;
965 xmit_size_goal = (65535 -
966 inet_csk(sk)->icsk_af_ops->net_header_len -
967 inet_csk(sk)->icsk_ext_hdr_len -
970 if (tp->max_window &&
971 (xmit_size_goal > (tp->max_window >> 1)))
972 xmit_size_goal = max((tp->max_window >> 1),
973 68U - tp->tcp_header_len);
975 xmit_size_goal -= (xmit_size_goal % mss_now);
977 tp->xmit_size_goal = xmit_size_goal;
982 /* Congestion window validation. (RFC2861) */
984 static void tcp_cwnd_validate(struct sock *sk)
986 struct tcp_sock *tp = tcp_sk(sk);
987 __u32 packets_out = tp->packets_out;
989 if (packets_out >= tp->snd_cwnd) {
990 /* Network is feed fully. */
991 tp->snd_cwnd_used = 0;
992 tp->snd_cwnd_stamp = tcp_time_stamp;
994 /* Network starves. */
995 if (tp->packets_out > tp->snd_cwnd_used)
996 tp->snd_cwnd_used = tp->packets_out;
998 if (sysctl_tcp_slow_start_after_idle &&
999 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
1000 tcp_cwnd_application_limited(sk);
1004 static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd)
1006 u32 window, cwnd_len;
1008 window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq);
1009 cwnd_len = mss_now * cwnd;
1010 return min(window, cwnd_len);
1013 /* Can at least one segment of SKB be sent right now, according to the
1014 * congestion window rules? If so, return how many segments are allowed.
1016 static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb)
1018 u32 in_flight, cwnd;
1020 /* Don't be strict about the congestion window for the final FIN. */
1021 if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1022 tcp_skb_pcount(skb) == 1)
1025 in_flight = tcp_packets_in_flight(tp);
1026 cwnd = tp->snd_cwnd;
1027 if (in_flight < cwnd)
1028 return (cwnd - in_flight);
1033 /* This must be invoked the first time we consider transmitting
1034 * SKB onto the wire.
1036 static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
1038 int tso_segs = tcp_skb_pcount(skb);
1042 tcp_skb_mss(skb) != mss_now)) {
1043 tcp_set_skb_tso_segs(sk, skb, mss_now);
1044 tso_segs = tcp_skb_pcount(skb);
1049 static inline int tcp_minshall_check(const struct tcp_sock *tp)
1051 return after(tp->snd_sml,tp->snd_una) &&
1052 !after(tp->snd_sml, tp->snd_nxt);
1055 /* Return 0, if packet can be sent now without violation Nagle's rules:
1056 * 1. It is full sized.
1057 * 2. Or it contains FIN. (already checked by caller)
1058 * 3. Or TCP_NODELAY was set.
1059 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1060 * With Minshall's modification: all sent small packets are ACKed.
1063 static inline int tcp_nagle_check(const struct tcp_sock *tp,
1064 const struct sk_buff *skb,
1065 unsigned mss_now, int nonagle)
1067 return (skb->len < mss_now &&
1068 ((nonagle&TCP_NAGLE_CORK) ||
1071 tcp_minshall_check(tp))));
1074 /* Return non-zero if the Nagle test allows this packet to be
1077 static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1078 unsigned int cur_mss, int nonagle)
1080 /* Nagle rule does not apply to frames, which sit in the middle of the
1081 * write_queue (they have no chances to get new data).
1083 * This is implemented in the callers, where they modify the 'nonagle'
1084 * argument based upon the location of SKB in the send queue.
1086 if (nonagle & TCP_NAGLE_PUSH)
1089 /* Don't use the nagle rule for urgent data (or for the final FIN).
1090 * Nagle can be ignored during F-RTO too (see RFC4138).
1092 if (tp->urg_mode || (tp->frto_counter == 2) ||
1093 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
1096 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1102 /* Does at least the first segment of SKB fit into the send window? */
1103 static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss)
1105 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1107 if (skb->len > cur_mss)
1108 end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1110 return !after(end_seq, tp->snd_una + tp->snd_wnd);
1113 /* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
1114 * should be put on the wire right now. If so, it returns the number of
1115 * packets allowed by the congestion window.
1117 static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
1118 unsigned int cur_mss, int nonagle)
1120 struct tcp_sock *tp = tcp_sk(sk);
1121 unsigned int cwnd_quota;
1123 tcp_init_tso_segs(sk, skb, cur_mss);
1125 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1128 cwnd_quota = tcp_cwnd_test(tp, skb);
1130 !tcp_snd_wnd_test(tp, skb, cur_mss))
1136 int tcp_may_send_now(struct sock *sk)
1138 struct tcp_sock *tp = tcp_sk(sk);
1139 struct sk_buff *skb = tcp_send_head(sk);
1142 tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
1143 (tcp_skb_is_last(sk, skb) ?
1148 /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1149 * which is put after SKB on the list. It is very much like
1150 * tcp_fragment() except that it may make several kinds of assumptions
1151 * in order to speed up the splitting operation. In particular, we
1152 * know that all the data is in scatter-gather pages, and that the
1153 * packet has never been sent out before (and thus is not cloned).
1155 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now)
1157 struct sk_buff *buff;
1158 int nlen = skb->len - len;
1161 /* All of a TSO frame must be composed of paged data. */
1162 if (skb->len != skb->data_len)
1163 return tcp_fragment(sk, skb, len, mss_now);
1165 buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC);
1166 if (unlikely(buff == NULL))
1169 sk_charge_skb(sk, buff);
1170 buff->truesize += nlen;
1171 skb->truesize -= nlen;
1173 /* Correct the sequence numbers. */
1174 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1175 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1176 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1178 /* PSH and FIN should only be set in the second packet. */
1179 flags = TCP_SKB_CB(skb)->flags;
1180 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
1181 TCP_SKB_CB(buff)->flags = flags;
1183 /* This packet was never sent out yet, so no SACK bits. */
1184 TCP_SKB_CB(buff)->sacked = 0;
1186 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1187 skb_split(skb, buff, len);
1189 /* Fix up tso_factor for both original and new SKB. */
1190 tcp_set_skb_tso_segs(sk, skb, mss_now);
1191 tcp_set_skb_tso_segs(sk, buff, mss_now);
1193 /* Link BUFF into the send queue. */
1194 skb_header_release(buff);
1195 tcp_insert_write_queue_after(skb, buff, sk);
1200 /* Try to defer sending, if possible, in order to minimize the amount
1201 * of TSO splitting we do. View it as a kind of TSO Nagle test.
1203 * This algorithm is from John Heffner.
1205 static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1207 struct tcp_sock *tp = tcp_sk(sk);
1208 const struct inet_connection_sock *icsk = inet_csk(sk);
1209 u32 send_win, cong_win, limit, in_flight;
1211 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
1214 if (icsk->icsk_ca_state != TCP_CA_Open)
1217 /* Defer for less than two clock ticks. */
1218 if (!tp->tso_deferred && ((jiffies<<1)>>1) - (tp->tso_deferred>>1) > 1)
1221 in_flight = tcp_packets_in_flight(tp);
1223 BUG_ON(tcp_skb_pcount(skb) <= 1 ||
1224 (tp->snd_cwnd <= in_flight));
1226 send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq;
1228 /* From in_flight test above, we know that cwnd > in_flight. */
1229 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1231 limit = min(send_win, cong_win);
1233 /* If a full-sized TSO skb can be sent, do it. */
1237 if (sysctl_tcp_tso_win_divisor) {
1238 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1240 /* If at least some fraction of a window is available,
1243 chunk /= sysctl_tcp_tso_win_divisor;
1247 /* Different approach, try not to defer past a single
1248 * ACK. Receiver should ACK every other full sized
1249 * frame, so if we have space for more than 3 frames
1252 if (limit > tcp_max_burst(tp) * tp->mss_cache)
1256 /* Ok, it looks like it is advisable to defer. */
1257 tp->tso_deferred = 1 | (jiffies<<1);
1262 tp->tso_deferred = 0;
1266 /* Create a new MTU probe if we are ready.
1267 * Returns 0 if we should wait to probe (no cwnd available),
1268 * 1 if a probe was sent,
1270 static int tcp_mtu_probe(struct sock *sk)
1272 struct tcp_sock *tp = tcp_sk(sk);
1273 struct inet_connection_sock *icsk = inet_csk(sk);
1274 struct sk_buff *skb, *nskb, *next;
1281 /* Not currently probing/verifying,
1283 * have enough cwnd, and
1284 * not SACKing (the variable headers throw things off) */
1285 if (!icsk->icsk_mtup.enabled ||
1286 icsk->icsk_mtup.probe_size ||
1287 inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
1288 tp->snd_cwnd < 11 ||
1289 tp->rx_opt.eff_sacks)
1292 /* Very simple search strategy: just double the MSS. */
1293 mss_now = tcp_current_mss(sk, 0);
1294 probe_size = 2*tp->mss_cache;
1295 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
1296 /* TODO: set timer for probe_converge_event */
1300 /* Have enough data in the send queue to probe? */
1302 if ((skb = tcp_send_head(sk)) == NULL)
1304 while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb))
1305 skb = tcp_write_queue_next(sk, skb);
1306 if (len < probe_size)
1309 /* Receive window check. */
1310 if (after(TCP_SKB_CB(skb)->seq + probe_size, tp->snd_una + tp->snd_wnd)) {
1311 if (tp->snd_wnd < probe_size)
1317 /* Do we need to wait to drain cwnd? */
1318 pif = tcp_packets_in_flight(tp);
1319 if (pif + 2 > tp->snd_cwnd) {
1320 /* With no packets in flight, don't stall. */
1327 /* We're allowed to probe. Build it now. */
1328 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
1330 sk_charge_skb(sk, nskb);
1332 skb = tcp_send_head(sk);
1333 tcp_insert_write_queue_before(nskb, skb, sk);
1334 tcp_advance_send_head(sk, skb);
1336 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
1337 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
1338 TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
1339 TCP_SKB_CB(nskb)->sacked = 0;
1341 nskb->ip_summed = skb->ip_summed;
1344 while (len < probe_size) {
1345 next = tcp_write_queue_next(sk, skb);
1347 copy = min_t(int, skb->len, probe_size - len);
1348 if (nskb->ip_summed)
1349 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
1351 nskb->csum = skb_copy_and_csum_bits(skb, 0,
1352 skb_put(nskb, copy), copy, nskb->csum);
1354 if (skb->len <= copy) {
1355 /* We've eaten all the data from this skb.
1357 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
1358 tcp_unlink_write_queue(skb, sk);
1359 sk_stream_free_skb(sk, skb);
1361 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
1362 ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
1363 if (!skb_shinfo(skb)->nr_frags) {
1364 skb_pull(skb, copy);
1365 if (skb->ip_summed != CHECKSUM_PARTIAL)
1366 skb->csum = csum_partial(skb->data, skb->len, 0);
1368 __pskb_trim_head(skb, copy);
1369 tcp_set_skb_tso_segs(sk, skb, mss_now);
1371 TCP_SKB_CB(skb)->seq += copy;
1377 tcp_init_tso_segs(sk, nskb, nskb->len);
1379 /* We're ready to send. If this fails, the probe will
1380 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
1381 TCP_SKB_CB(nskb)->when = tcp_time_stamp;
1382 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
1383 /* Decrement cwnd here because we are sending
1384 * effectively two packets. */
1386 update_send_head(sk, nskb);
1388 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
1389 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
1390 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
1399 /* This routine writes packets to the network. It advances the
1400 * send_head. This happens as incoming acks open up the remote
1403 * Returns 1, if no segments are in flight and we have queued segments, but
1404 * cannot send anything now because of SWS or another problem.
1406 static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
1408 struct tcp_sock *tp = tcp_sk(sk);
1409 struct sk_buff *skb;
1410 unsigned int tso_segs, sent_pkts;
1414 /* If we are closed, the bytes will have to remain here.
1415 * In time closedown will finish, we empty the write queue and all
1418 if (unlikely(sk->sk_state == TCP_CLOSE))
1423 /* Do MTU probing. */
1424 if ((result = tcp_mtu_probe(sk)) == 0) {
1426 } else if (result > 0) {
1430 while ((skb = tcp_send_head(sk))) {
1433 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1436 cwnd_quota = tcp_cwnd_test(tp, skb);
1440 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1443 if (tso_segs == 1) {
1444 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1445 (tcp_skb_is_last(sk, skb) ?
1446 nonagle : TCP_NAGLE_PUSH))))
1449 if (tcp_tso_should_defer(sk, skb))
1455 limit = tcp_window_allows(tp, skb,
1456 mss_now, cwnd_quota);
1458 if (skb->len < limit) {
1459 unsigned int trim = skb->len % mss_now;
1462 limit = skb->len - trim;
1466 if (skb->len > limit &&
1467 unlikely(tso_fragment(sk, skb, limit, mss_now)))
1470 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1472 if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC)))
1475 /* Advance the send_head. This one is sent out.
1476 * This call will increment packets_out.
1478 update_send_head(sk, skb);
1480 tcp_minshall_update(tp, mss_now, skb);
1484 if (likely(sent_pkts)) {
1485 tcp_cwnd_validate(sk);
1488 return !tp->packets_out && tcp_send_head(sk);
1491 /* Push out any pending frames which were held back due to
1492 * TCP_CORK or attempt at coalescing tiny packets.
1493 * The socket must be locked by the caller.
1495 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
1498 struct sk_buff *skb = tcp_send_head(sk);
1501 if (tcp_write_xmit(sk, cur_mss, nonagle))
1502 tcp_check_probe_timer(sk);
1506 /* Send _single_ skb sitting at the send head. This function requires
1507 * true push pending frames to setup probe timer etc.
1509 void tcp_push_one(struct sock *sk, unsigned int mss_now)
1511 struct tcp_sock *tp = tcp_sk(sk);
1512 struct sk_buff *skb = tcp_send_head(sk);
1513 unsigned int tso_segs, cwnd_quota;
1515 BUG_ON(!skb || skb->len < mss_now);
1517 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1518 cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
1520 if (likely(cwnd_quota)) {
1527 limit = tcp_window_allows(tp, skb,
1528 mss_now, cwnd_quota);
1530 if (skb->len < limit) {
1531 unsigned int trim = skb->len % mss_now;
1534 limit = skb->len - trim;
1538 if (skb->len > limit &&
1539 unlikely(tso_fragment(sk, skb, limit, mss_now)))
1542 /* Send it out now. */
1543 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1545 if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
1546 update_send_head(sk, skb);
1547 tcp_cwnd_validate(sk);
1553 /* This function returns the amount that we can raise the
1554 * usable window based on the following constraints
1556 * 1. The window can never be shrunk once it is offered (RFC 793)
1557 * 2. We limit memory per socket
1560 * "the suggested [SWS] avoidance algorithm for the receiver is to keep
1561 * RECV.NEXT + RCV.WIN fixed until:
1562 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
1564 * i.e. don't raise the right edge of the window until you can raise
1565 * it at least MSS bytes.
1567 * Unfortunately, the recommended algorithm breaks header prediction,
1568 * since header prediction assumes th->window stays fixed.
1570 * Strictly speaking, keeping th->window fixed violates the receiver
1571 * side SWS prevention criteria. The problem is that under this rule
1572 * a stream of single byte packets will cause the right side of the
1573 * window to always advance by a single byte.
1575 * Of course, if the sender implements sender side SWS prevention
1576 * then this will not be a problem.
1578 * BSD seems to make the following compromise:
1580 * If the free space is less than the 1/4 of the maximum
1581 * space available and the free space is less than 1/2 mss,
1582 * then set the window to 0.
1583 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
1584 * Otherwise, just prevent the window from shrinking
1585 * and from being larger than the largest representable value.
1587 * This prevents incremental opening of the window in the regime
1588 * where TCP is limited by the speed of the reader side taking
1589 * data out of the TCP receive queue. It does nothing about
1590 * those cases where the window is constrained on the sender side
1591 * because the pipeline is full.
1593 * BSD also seems to "accidentally" limit itself to windows that are a
1594 * multiple of MSS, at least until the free space gets quite small.
1595 * This would appear to be a side effect of the mbuf implementation.
1596 * Combining these two algorithms results in the observed behavior
1597 * of having a fixed window size at almost all times.
1599 * Below we obtain similar behavior by forcing the offered window to
1600 * a multiple of the mss when it is feasible to do so.
1602 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
1603 * Regular options like TIMESTAMP are taken into account.
1605 u32 __tcp_select_window(struct sock *sk)
1607 struct inet_connection_sock *icsk = inet_csk(sk);
1608 struct tcp_sock *tp = tcp_sk(sk);
1609 /* MSS for the peer's data. Previous versions used mss_clamp
1610 * here. I don't know if the value based on our guesses
1611 * of peer's MSS is better for the performance. It's more correct
1612 * but may be worse for the performance because of rcv_mss
1613 * fluctuations. --SAW 1998/11/1
1615 int mss = icsk->icsk_ack.rcv_mss;
1616 int free_space = tcp_space(sk);
1617 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
1620 if (mss > full_space)
1623 if (free_space < full_space/2) {
1624 icsk->icsk_ack.quick = 0;
1626 if (tcp_memory_pressure)
1627 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
1629 if (free_space < mss)
1633 if (free_space > tp->rcv_ssthresh)
1634 free_space = tp->rcv_ssthresh;
1636 /* Don't do rounding if we are using window scaling, since the
1637 * scaled window will not line up with the MSS boundary anyway.
1639 window = tp->rcv_wnd;
1640 if (tp->rx_opt.rcv_wscale) {
1641 window = free_space;
1643 /* Advertise enough space so that it won't get scaled away.
1644 * Import case: prevent zero window announcement if
1645 * 1<<rcv_wscale > mss.
1647 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
1648 window = (((window >> tp->rx_opt.rcv_wscale) + 1)
1649 << tp->rx_opt.rcv_wscale);
1651 /* Get the largest window that is a nice multiple of mss.
1652 * Window clamp already applied above.
1653 * If our current window offering is within 1 mss of the
1654 * free space we just keep it. This prevents the divide
1655 * and multiply from happening most of the time.
1656 * We also don't do any window rounding when the free space
1659 if (window <= free_space - mss || window > free_space)
1660 window = (free_space/mss)*mss;
1661 else if (mss == full_space &&
1662 free_space > window + full_space/2)
1663 window = free_space;
1669 /* Attempt to collapse two adjacent SKB's during retransmission. */
1670 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now)
1672 struct tcp_sock *tp = tcp_sk(sk);
1673 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
1675 /* The first test we must make is that neither of these two
1676 * SKB's are still referenced by someone else.
1678 if (!skb_cloned(skb) && !skb_cloned(next_skb)) {
1679 int skb_size = skb->len, next_skb_size = next_skb->len;
1680 u16 flags = TCP_SKB_CB(skb)->flags;
1682 /* Also punt if next skb has been SACK'd. */
1683 if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED)
1686 /* Next skb is out of window. */
1687 if (after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd))
1690 /* Punt if not enough space exists in the first SKB for
1691 * the data in the second, or the total combined payload
1692 * would exceed the MSS.
1694 if ((next_skb_size > skb_tailroom(skb)) ||
1695 ((skb_size + next_skb_size) > mss_now))
1698 BUG_ON(tcp_skb_pcount(skb) != 1 ||
1699 tcp_skb_pcount(next_skb) != 1);
1701 /* changing transmit queue under us so clear hints */
1702 clear_all_retrans_hints(tp);
1704 /* Ok. We will be able to collapse the packet. */
1705 tcp_unlink_write_queue(next_skb, sk);
1707 skb_copy_from_linear_data(next_skb,
1708 skb_put(skb, next_skb_size),
1711 if (next_skb->ip_summed == CHECKSUM_PARTIAL)
1712 skb->ip_summed = CHECKSUM_PARTIAL;
1714 if (skb->ip_summed != CHECKSUM_PARTIAL)
1715 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
1717 /* Update sequence range on original skb. */
1718 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
1720 /* Merge over control information. */
1721 flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */
1722 TCP_SKB_CB(skb)->flags = flags;
1724 /* All done, get rid of second SKB and account for it so
1725 * packet counting does not break.
1727 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL);
1728 if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS)
1729 tp->retrans_out -= tcp_skb_pcount(next_skb);
1730 if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) {
1731 tp->lost_out -= tcp_skb_pcount(next_skb);
1732 tp->left_out -= tcp_skb_pcount(next_skb);
1734 /* Reno case is special. Sigh... */
1735 if (!tp->rx_opt.sack_ok && tp->sacked_out) {
1736 tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
1737 tp->left_out -= tcp_skb_pcount(next_skb);
1740 /* Not quite right: it can be > snd.fack, but
1741 * it is better to underestimate fackets.
1743 tcp_dec_pcount_approx(&tp->fackets_out, next_skb);
1744 tcp_packets_out_dec(tp, next_skb);
1745 sk_stream_free_skb(sk, next_skb);
1749 /* Do a simple retransmit without using the backoff mechanisms in
1750 * tcp_timer. This is used for path mtu discovery.
1751 * The socket is already locked here.
1753 void tcp_simple_retransmit(struct sock *sk)
1755 const struct inet_connection_sock *icsk = inet_csk(sk);
1756 struct tcp_sock *tp = tcp_sk(sk);
1757 struct sk_buff *skb;
1758 unsigned int mss = tcp_current_mss(sk, 0);
1761 tcp_for_write_queue(skb, sk) {
1762 if (skb == tcp_send_head(sk))
1764 if (skb->len > mss &&
1765 !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
1766 if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
1767 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1768 tp->retrans_out -= tcp_skb_pcount(skb);
1770 if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) {
1771 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1772 tp->lost_out += tcp_skb_pcount(skb);
1778 clear_all_retrans_hints(tp);
1783 tcp_sync_left_out(tp);
1785 /* Don't muck with the congestion window here.
1786 * Reason is that we do not increase amount of _data_
1787 * in network, but units changed and effective
1788 * cwnd/ssthresh really reduced now.
1790 if (icsk->icsk_ca_state != TCP_CA_Loss) {
1791 tp->high_seq = tp->snd_nxt;
1792 tp->snd_ssthresh = tcp_current_ssthresh(sk);
1793 tp->prior_ssthresh = 0;
1794 tp->undo_marker = 0;
1795 tcp_set_ca_state(sk, TCP_CA_Loss);
1797 tcp_xmit_retransmit_queue(sk);
1800 /* This retransmits one SKB. Policy decisions and retransmit queue
1801 * state updates are done by the caller. Returns non-zero if an
1802 * error occurred which prevented the send.
1804 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1806 struct tcp_sock *tp = tcp_sk(sk);
1807 struct inet_connection_sock *icsk = inet_csk(sk);
1808 unsigned int cur_mss = tcp_current_mss(sk, 0);
1811 /* Inconslusive MTU probe */
1812 if (icsk->icsk_mtup.probe_size) {
1813 icsk->icsk_mtup.probe_size = 0;
1816 /* Do not sent more than we queued. 1/4 is reserved for possible
1817 * copying overhead: fragmentation, tunneling, mangling etc.
1819 if (atomic_read(&sk->sk_wmem_alloc) >
1820 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
1823 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
1824 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1826 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
1830 /* If receiver has shrunk his window, and skb is out of
1831 * new window, do not retransmit it. The exception is the
1832 * case, when window is shrunk to zero. In this case
1833 * our retransmit serves as a zero window probe.
1835 if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)
1836 && TCP_SKB_CB(skb)->seq != tp->snd_una)
1839 if (skb->len > cur_mss) {
1840 if (tcp_fragment(sk, skb, cur_mss, cur_mss))
1841 return -ENOMEM; /* We'll try again later. */
1844 /* Collapse two adjacent packets if worthwhile and we can. */
1845 if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
1846 (skb->len < (cur_mss >> 1)) &&
1847 (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) &&
1848 (!tcp_skb_is_last(sk, skb)) &&
1849 (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) &&
1850 (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) &&
1851 (sysctl_tcp_retrans_collapse != 0))
1852 tcp_retrans_try_collapse(sk, skb, cur_mss);
1854 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
1855 return -EHOSTUNREACH; /* Routing failure or similar. */
1857 /* Some Solaris stacks overoptimize and ignore the FIN on a
1858 * retransmit when old data is attached. So strip it off
1859 * since it is cheap to do so and saves bytes on the network.
1862 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1863 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
1864 if (!pskb_trim(skb, 0)) {
1865 TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
1866 skb_shinfo(skb)->gso_segs = 1;
1867 skb_shinfo(skb)->gso_size = 0;
1868 skb_shinfo(skb)->gso_type = 0;
1869 skb->ip_summed = CHECKSUM_NONE;
1874 /* Make a copy, if the first transmission SKB clone we made
1875 * is still in somebody's hands, else make a clone.
1877 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1879 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
1882 /* Update global TCP statistics. */
1883 TCP_INC_STATS(TCP_MIB_RETRANSSEGS);
1885 tp->total_retrans++;
1887 #if FASTRETRANS_DEBUG > 0
1888 if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
1889 if (net_ratelimit())
1890 printk(KERN_DEBUG "retrans_out leaked.\n");
1893 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
1894 tp->retrans_out += tcp_skb_pcount(skb);
1896 /* Save stamp of the first retransmit. */
1897 if (!tp->retrans_stamp)
1898 tp->retrans_stamp = TCP_SKB_CB(skb)->when;
1902 /* snd_nxt is stored to detect loss of retransmitted segment,
1903 * see tcp_input.c tcp_sacktag_write_queue().
1905 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
1910 /* This gets called after a retransmit timeout, and the initially
1911 * retransmitted data is acknowledged. It tries to continue
1912 * resending the rest of the retransmit queue, until either
1913 * we've sent it all or the congestion window limit is reached.
1914 * If doing SACK, the first ACK which comes back for a timeout
1915 * based retransmit packet might feed us FACK information again.
1916 * If so, we use it to avoid unnecessarily retransmissions.
1918 void tcp_xmit_retransmit_queue(struct sock *sk)
1920 const struct inet_connection_sock *icsk = inet_csk(sk);
1921 struct tcp_sock *tp = tcp_sk(sk);
1922 struct sk_buff *skb;
1925 if (tp->retransmit_skb_hint) {
1926 skb = tp->retransmit_skb_hint;
1927 packet_cnt = tp->retransmit_cnt_hint;
1929 skb = tcp_write_queue_head(sk);
1933 /* First pass: retransmit lost packets. */
1935 tcp_for_write_queue_from(skb, sk) {
1936 __u8 sacked = TCP_SKB_CB(skb)->sacked;
1938 if (skb == tcp_send_head(sk))
1940 /* we could do better than to assign each time */
1941 tp->retransmit_skb_hint = skb;
1942 tp->retransmit_cnt_hint = packet_cnt;
1944 /* Assume this retransmit will generate
1945 * only one packet for congestion window
1946 * calculation purposes. This works because
1947 * tcp_retransmit_skb() will chop up the
1948 * packet to be MSS sized and all the
1949 * packet counting works out.
1951 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
1954 if (sacked & TCPCB_LOST) {
1955 if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
1956 if (tcp_retransmit_skb(sk, skb)) {
1957 tp->retransmit_skb_hint = NULL;
1960 if (icsk->icsk_ca_state != TCP_CA_Loss)
1961 NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
1963 NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
1965 if (skb == tcp_write_queue_head(sk))
1966 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
1967 inet_csk(sk)->icsk_rto,
1971 packet_cnt += tcp_skb_pcount(skb);
1972 if (packet_cnt >= tp->lost_out)
1978 /* OK, demanded retransmission is finished. */
1980 /* Forward retransmissions are possible only during Recovery. */
1981 if (icsk->icsk_ca_state != TCP_CA_Recovery)
1984 /* No forward retransmissions in Reno are possible. */
1985 if (!tp->rx_opt.sack_ok)
1988 /* Yeah, we have to make difficult choice between forward transmission
1989 * and retransmission... Both ways have their merits...
1991 * For now we do not retransmit anything, while we have some new
1992 * segments to send. In the other cases, follow rule 3 for
1993 * NextSeg() specified in RFC3517.
1996 if (tcp_may_send_now(sk))
1999 /* If nothing is SACKed, highest_sack in the loop won't be valid */
2000 if (!tp->sacked_out)
2003 if (tp->forward_skb_hint)
2004 skb = tp->forward_skb_hint;
2006 skb = tcp_write_queue_head(sk);
2008 tcp_for_write_queue_from(skb, sk) {
2009 if (skb == tcp_send_head(sk))
2011 tp->forward_skb_hint = skb;
2013 if (after(TCP_SKB_CB(skb)->seq, tp->highest_sack))
2016 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
2019 if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS)
2022 /* Ok, retransmit it. */
2023 if (tcp_retransmit_skb(sk, skb)) {
2024 tp->forward_skb_hint = NULL;
2028 if (skb == tcp_write_queue_head(sk))
2029 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2030 inet_csk(sk)->icsk_rto,
2033 NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
2038 /* Send a fin. The caller locks the socket for us. This cannot be
2039 * allowed to fail queueing a FIN frame under any circumstances.
2041 void tcp_send_fin(struct sock *sk)
2043 struct tcp_sock *tp = tcp_sk(sk);
2044 struct sk_buff *skb = tcp_write_queue_tail(sk);
2047 /* Optimization, tack on the FIN if we have a queue of
2048 * unsent frames. But be careful about outgoing SACKS
2051 mss_now = tcp_current_mss(sk, 1);
2053 if (tcp_send_head(sk) != NULL) {
2054 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
2055 TCP_SKB_CB(skb)->end_seq++;
2058 /* Socket is locked, keep trying until memory is available. */
2060 skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL);
2066 /* Reserve space for headers and prepare control bits. */
2067 skb_reserve(skb, MAX_TCP_HEADER);
2069 TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
2070 TCP_SKB_CB(skb)->sacked = 0;
2071 skb_shinfo(skb)->gso_segs = 1;
2072 skb_shinfo(skb)->gso_size = 0;
2073 skb_shinfo(skb)->gso_type = 0;
2075 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2076 TCP_SKB_CB(skb)->seq = tp->write_seq;
2077 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
2078 tcp_queue_skb(sk, skb);
2080 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
2083 /* We get here when a process closes a file descriptor (either due to
2084 * an explicit close() or as a byproduct of exit()'ing) and there
2085 * was unread data in the receive queue. This behavior is recommended
2086 * by RFC 2525, section 2.17. -DaveM
2088 void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2090 struct sk_buff *skb;
2092 /* NOTE: No TCP options attached and we never retransmit this. */
2093 skb = alloc_skb(MAX_TCP_HEADER, priority);
2095 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
2099 /* Reserve space for headers and prepare control bits. */
2100 skb_reserve(skb, MAX_TCP_HEADER);
2102 TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
2103 TCP_SKB_CB(skb)->sacked = 0;
2104 skb_shinfo(skb)->gso_segs = 1;
2105 skb_shinfo(skb)->gso_size = 0;
2106 skb_shinfo(skb)->gso_type = 0;
2109 TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk);
2110 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
2111 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2112 if (tcp_transmit_skb(sk, skb, 0, priority))
2113 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
2116 /* WARNING: This routine must only be called when we have already sent
2117 * a SYN packet that crossed the incoming SYN that caused this routine
2118 * to get called. If this assumption fails then the initial rcv_wnd
2119 * and rcv_wscale values will not be correct.
2121 int tcp_send_synack(struct sock *sk)
2123 struct sk_buff* skb;
2125 skb = tcp_write_queue_head(sk);
2126 if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) {
2127 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
2130 if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) {
2131 if (skb_cloned(skb)) {
2132 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
2135 tcp_unlink_write_queue(skb, sk);
2136 skb_header_release(nskb);
2137 __tcp_add_write_queue_head(sk, nskb);
2138 sk_stream_free_skb(sk, skb);
2139 sk_charge_skb(sk, nskb);
2143 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
2144 TCP_ECN_send_synack(tcp_sk(sk), skb);
2146 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2147 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2151 * Prepare a SYN-ACK.
2153 struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2154 struct request_sock *req)
2156 struct inet_request_sock *ireq = inet_rsk(req);
2157 struct tcp_sock *tp = tcp_sk(sk);
2159 int tcp_header_size;
2160 struct sk_buff *skb;
2161 #ifdef CONFIG_TCP_MD5SIG
2162 struct tcp_md5sig_key *md5;
2163 __u8 *md5_hash_location;
2166 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
2170 /* Reserve space for headers. */
2171 skb_reserve(skb, MAX_TCP_HEADER);
2173 skb->dst = dst_clone(dst);
2175 tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS +
2176 (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) +
2177 (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
2178 /* SACK_PERM is in the place of NOP NOP of TS */
2179 ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
2181 #ifdef CONFIG_TCP_MD5SIG
2182 /* Are we doing MD5 on this segment? If so - make room for it */
2183 md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
2185 tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
2187 skb_push(skb, tcp_header_size);
2188 skb_reset_transport_header(skb);
2191 memset(th, 0, sizeof(struct tcphdr));
2194 TCP_ECN_make_synack(req, th);
2195 th->source = inet_sk(sk)->sport;
2196 th->dest = ireq->rmt_port;
2197 TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
2198 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
2199 TCP_SKB_CB(skb)->sacked = 0;
2200 skb_shinfo(skb)->gso_segs = 1;
2201 skb_shinfo(skb)->gso_size = 0;
2202 skb_shinfo(skb)->gso_type = 0;
2203 th->seq = htonl(TCP_SKB_CB(skb)->seq);
2204 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
2205 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2207 /* Set this up on the first call only */
2208 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2209 /* tcp_full_space because it is guaranteed to be the first packet */
2210 tcp_select_initial_window(tcp_full_space(sk),
2211 dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
2216 ireq->rcv_wscale = rcv_wscale;
2219 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2220 th->window = htons(min(req->rcv_wnd, 65535U));
2222 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2223 tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
2224 ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
2225 TCP_SKB_CB(skb)->when,
2228 #ifdef CONFIG_TCP_MD5SIG
2229 md5 ? &md5_hash_location :
2235 th->doff = (tcp_header_size >> 2);
2236 TCP_INC_STATS(TCP_MIB_OUTSEGS);
2238 #ifdef CONFIG_TCP_MD5SIG
2239 /* Okay, we have all we need - do the md5 hash if needed */
2241 tp->af_specific->calc_md5_hash(md5_hash_location,
2244 tcp_hdr(skb), sk->sk_protocol,
2253 * Do all connect socket setups that can be done AF independent.
2255 static void tcp_connect_init(struct sock *sk)
2257 struct dst_entry *dst = __sk_dst_get(sk);
2258 struct tcp_sock *tp = tcp_sk(sk);
2261 /* We'll fix this up when we get a response from the other end.
2262 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
2264 tp->tcp_header_len = sizeof(struct tcphdr) +
2265 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
2267 #ifdef CONFIG_TCP_MD5SIG
2268 if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2269 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2272 /* If user gave his TCP_MAXSEG, record it to clamp */
2273 if (tp->rx_opt.user_mss)
2274 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2277 tcp_sync_mss(sk, dst_mtu(dst));
2279 if (!tp->window_clamp)
2280 tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
2281 tp->advmss = dst_metric(dst, RTAX_ADVMSS);
2282 tcp_initialize_rcv_mss(sk);
2284 tcp_select_initial_window(tcp_full_space(sk),
2285 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2288 sysctl_tcp_window_scaling,
2291 tp->rx_opt.rcv_wscale = rcv_wscale;
2292 tp->rcv_ssthresh = tp->rcv_wnd;
2295 sock_reset_flag(sk, SOCK_DONE);
2297 tcp_init_wl(tp, tp->write_seq, 0);
2298 tp->snd_una = tp->write_seq;
2299 tp->snd_sml = tp->write_seq;
2304 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2305 inet_csk(sk)->icsk_retransmits = 0;
2306 tcp_clear_retrans(tp);
2310 * Build a SYN and send it off.
2312 int tcp_connect(struct sock *sk)
2314 struct tcp_sock *tp = tcp_sk(sk);
2315 struct sk_buff *buff;
2317 tcp_connect_init(sk);
2319 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
2320 if (unlikely(buff == NULL))
2323 /* Reserve space for headers. */
2324 skb_reserve(buff, MAX_TCP_HEADER);
2326 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
2327 TCP_ECN_send_syn(sk, buff);
2328 TCP_SKB_CB(buff)->sacked = 0;
2329 skb_shinfo(buff)->gso_segs = 1;
2330 skb_shinfo(buff)->gso_size = 0;
2331 skb_shinfo(buff)->gso_type = 0;
2333 tp->snd_nxt = tp->write_seq;
2334 TCP_SKB_CB(buff)->seq = tp->write_seq++;
2335 TCP_SKB_CB(buff)->end_seq = tp->write_seq;
2338 TCP_SKB_CB(buff)->when = tcp_time_stamp;
2339 tp->retrans_stamp = TCP_SKB_CB(buff)->when;
2340 skb_header_release(buff);
2341 __tcp_add_write_queue_tail(sk, buff);
2342 sk_charge_skb(sk, buff);
2343 tp->packets_out += tcp_skb_pcount(buff);
2344 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
2346 /* We change tp->snd_nxt after the tcp_transmit_skb() call
2347 * in order to make this packet get counted in tcpOutSegs.
2349 tp->snd_nxt = tp->write_seq;
2350 tp->pushed_seq = tp->write_seq;
2351 TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
2353 /* Timer for repeating the SYN until an answer. */
2354 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2355 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
2359 /* Send out a delayed ack, the caller does the policy checking
2360 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check()
2363 void tcp_send_delayed_ack(struct sock *sk)
2365 struct inet_connection_sock *icsk = inet_csk(sk);
2366 int ato = icsk->icsk_ack.ato;
2367 unsigned long timeout;
2369 if (ato > TCP_DELACK_MIN) {
2370 const struct tcp_sock *tp = tcp_sk(sk);
2373 if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
2374 max_ato = TCP_DELACK_MAX;
2376 /* Slow path, intersegment interval is "high". */
2378 /* If some rtt estimate is known, use it to bound delayed ack.
2379 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
2383 int rtt = max(tp->srtt>>3, TCP_DELACK_MIN);
2389 ato = min(ato, max_ato);
2392 /* Stay within the limit we were given */
2393 timeout = jiffies + ato;
2395 /* Use new timeout only if there wasn't a older one earlier. */
2396 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
2397 /* If delack timer was blocked or is about to expire,
2400 if (icsk->icsk_ack.blocked ||
2401 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
2406 if (!time_before(timeout, icsk->icsk_ack.timeout))
2407 timeout = icsk->icsk_ack.timeout;
2409 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
2410 icsk->icsk_ack.timeout = timeout;
2411 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
2414 /* This routine sends an ack and also updates the window. */
2415 void tcp_send_ack(struct sock *sk)
2417 /* If we have been reset, we may not send again. */
2418 if (sk->sk_state != TCP_CLOSE) {
2419 struct sk_buff *buff;
2421 /* We are not putting this on the write queue, so
2422 * tcp_transmit_skb() will set the ownership to this
2425 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2427 inet_csk_schedule_ack(sk);
2428 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
2429 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
2430 TCP_DELACK_MAX, TCP_RTO_MAX);
2434 /* Reserve space for headers and prepare control bits. */
2435 skb_reserve(buff, MAX_TCP_HEADER);
2437 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
2438 TCP_SKB_CB(buff)->sacked = 0;
2439 skb_shinfo(buff)->gso_segs = 1;
2440 skb_shinfo(buff)->gso_size = 0;
2441 skb_shinfo(buff)->gso_type = 0;
2443 /* Send it off, this clears delayed acks for us. */
2444 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk);
2445 TCP_SKB_CB(buff)->when = tcp_time_stamp;
2446 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
2450 /* This routine sends a packet with an out of date sequence
2451 * number. It assumes the other end will try to ack it.
2453 * Question: what should we make while urgent mode?
2454 * 4.4BSD forces sending single byte of data. We cannot send
2455 * out of window data, because we have SND.NXT==SND.MAX...
2457 * Current solution: to send TWO zero-length segments in urgent mode:
2458 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
2459 * out-of-date with SND.UNA-1 to probe window.
2461 static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
2463 struct tcp_sock *tp = tcp_sk(sk);
2464 struct sk_buff *skb;
2466 /* We don't queue it, tcp_transmit_skb() sets ownership. */
2467 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2471 /* Reserve space for headers and set control bits. */
2472 skb_reserve(skb, MAX_TCP_HEADER);
2474 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
2475 TCP_SKB_CB(skb)->sacked = urgent;
2476 skb_shinfo(skb)->gso_segs = 1;
2477 skb_shinfo(skb)->gso_size = 0;
2478 skb_shinfo(skb)->gso_type = 0;
2480 /* Use a previous sequence. This should cause the other
2481 * end to send an ack. Don't queue or clone SKB, just
2484 TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1;
2485 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
2486 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2487 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
2490 int tcp_write_wakeup(struct sock *sk)
2492 if (sk->sk_state != TCP_CLOSE) {
2493 struct tcp_sock *tp = tcp_sk(sk);
2494 struct sk_buff *skb;
2496 if ((skb = tcp_send_head(sk)) != NULL &&
2497 before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) {
2499 unsigned int mss = tcp_current_mss(sk, 0);
2500 unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq;
2502 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
2503 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
2505 /* We are probing the opening of a window
2506 * but the window size is != 0
2507 * must have been a result SWS avoidance ( sender )
2509 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
2511 seg_size = min(seg_size, mss);
2512 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2513 if (tcp_fragment(sk, skb, seg_size, mss))
2515 } else if (!tcp_skb_pcount(skb))
2516 tcp_set_skb_tso_segs(sk, skb, mss);
2518 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2519 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2520 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2522 update_send_head(sk, skb);
2527 between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF))
2528 tcp_xmit_probe_skb(sk, TCPCB_URG);
2529 return tcp_xmit_probe_skb(sk, 0);
2535 /* A window probe timeout has occurred. If window is not closed send
2536 * a partial packet else a zero probe.
2538 void tcp_send_probe0(struct sock *sk)
2540 struct inet_connection_sock *icsk = inet_csk(sk);
2541 struct tcp_sock *tp = tcp_sk(sk);
2544 err = tcp_write_wakeup(sk);
2546 if (tp->packets_out || !tcp_send_head(sk)) {
2547 /* Cancel probe timer, if it is not required. */
2548 icsk->icsk_probes_out = 0;
2549 icsk->icsk_backoff = 0;
2554 if (icsk->icsk_backoff < sysctl_tcp_retries2)
2555 icsk->icsk_backoff++;
2556 icsk->icsk_probes_out++;
2557 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2558 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
2561 /* If packet was not sent due to local congestion,
2562 * do not backoff and do not remember icsk_probes_out.
2563 * Let local senders to fight for local resources.
2565 * Use accumulated backoff yet.
2567 if (!icsk->icsk_probes_out)
2568 icsk->icsk_probes_out = 1;
2569 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2570 min(icsk->icsk_rto << icsk->icsk_backoff,
2571 TCP_RESOURCE_PROBE_INTERVAL),
2576 EXPORT_SYMBOL(tcp_connect);
2577 EXPORT_SYMBOL(tcp_make_synack);
2578 EXPORT_SYMBOL(tcp_simple_retransmit);
2579 EXPORT_SYMBOL(tcp_sync_mss);
2580 EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor);
2581 EXPORT_SYMBOL(tcp_mtup_init);