1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 Intel Corp.
7 * This file is part of the SCTP kernel implementation
9 * These functions implement the sctp_outq class. The outqueue handles
10 * bundling and queueing of outgoing SCTP chunks.
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, write to
26 * the Free Software Foundation, 59 Temple Place - Suite 330,
27 * Boston, MA 02111-1307, USA.
29 * Please send any bug reports or fixes you make to the
31 * lksctp developers <lksctp-developers@lists.sourceforge.net>
33 * Or submit a bug report through the following website:
34 * http://www.sf.net/projects/lksctp
36 * Written or modified by:
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Karl Knutson <karl@athena.chicago.il.us>
39 * Perry Melange <pmelange@null.cc.uic.edu>
40 * Xingang Guo <xingang.guo@intel.com>
41 * Hui Huang <hui.huang@nokia.com>
42 * Sridhar Samudrala <sri@us.ibm.com>
43 * Jon Grimm <jgrimm@us.ibm.com>
45 * Any bugs reported given to us we will try to fix... any fixes shared will
46 * be incorporated into the next SCTP release.
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
51 #include <linux/types.h>
52 #include <linux/list.h> /* For struct list_head */
53 #include <linux/socket.h>
55 #include <linux/slab.h>
56 #include <net/sock.h> /* For skb_set_owner_w */
58 #include <net/sctp/sctp.h>
59 #include <net/sctp/sm.h>
61 /* Declare internal functions here. */
62 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
63 static void sctp_check_transmitted(struct sctp_outq *q,
64 struct list_head *transmitted_queue,
65 struct sctp_transport *transport,
66 struct sctp_sackhdr *sack,
67 __u32 *highest_new_tsn);
69 static void sctp_mark_missing(struct sctp_outq *q,
70 struct list_head *transmitted_queue,
71 struct sctp_transport *transport,
72 __u32 highest_new_tsn,
73 int count_of_newacks);
75 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
77 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout);
79 /* Add data to the front of the queue. */
80 static inline void sctp_outq_head_data(struct sctp_outq *q,
81 struct sctp_chunk *ch)
83 list_add(&ch->list, &q->out_chunk_list);
84 q->out_qlen += ch->skb->len;
87 /* Take data from the front of the queue. */
88 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
90 struct sctp_chunk *ch = NULL;
92 if (!list_empty(&q->out_chunk_list)) {
93 struct list_head *entry = q->out_chunk_list.next;
95 ch = list_entry(entry, struct sctp_chunk, list);
97 q->out_qlen -= ch->skb->len;
101 /* Add data chunk to the end of the queue. */
102 static inline void sctp_outq_tail_data(struct sctp_outq *q,
103 struct sctp_chunk *ch)
105 list_add_tail(&ch->list, &q->out_chunk_list);
106 q->out_qlen += ch->skb->len;
110 * SFR-CACC algorithm:
111 * D) If count_of_newacks is greater than or equal to 2
112 * and t was not sent to the current primary then the
113 * sender MUST NOT increment missing report count for t.
115 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
116 struct sctp_transport *transport,
117 int count_of_newacks)
119 if (count_of_newacks >=2 && transport != primary)
125 * SFR-CACC algorithm:
126 * F) If count_of_newacks is less than 2, let d be the
127 * destination to which t was sent. If cacc_saw_newack
128 * is 0 for destination d, then the sender MUST NOT
129 * increment missing report count for t.
131 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
132 int count_of_newacks)
134 if (count_of_newacks < 2 &&
135 (transport && !transport->cacc.cacc_saw_newack))
141 * SFR-CACC algorithm:
142 * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
143 * execute steps C, D, F.
145 * C has been implemented in sctp_outq_sack
147 static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,
148 struct sctp_transport *transport,
149 int count_of_newacks)
151 if (!primary->cacc.cycling_changeover) {
152 if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))
154 if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))
162 * SFR-CACC algorithm:
163 * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
164 * than next_tsn_at_change of the current primary, then
165 * the sender MUST NOT increment missing report count
168 static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
170 if (primary->cacc.cycling_changeover &&
171 TSN_lt(tsn, primary->cacc.next_tsn_at_change))
177 * SFR-CACC algorithm:
178 * 3) If the missing report count for TSN t is to be
179 * incremented according to [RFC2960] and
180 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
181 * then the sender MUST further execute steps 3.1 and
182 * 3.2 to determine if the missing report count for
183 * TSN t SHOULD NOT be incremented.
185 * 3.3) If 3.1 and 3.2 do not dictate that the missing
186 * report count for t should not be incremented, then
187 * the sender SHOULD increment missing report count for
188 * t (according to [RFC2960] and [SCTP_STEWART_2002]).
190 static inline int sctp_cacc_skip(struct sctp_transport *primary,
191 struct sctp_transport *transport,
192 int count_of_newacks,
195 if (primary->cacc.changeover_active &&
196 (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
197 sctp_cacc_skip_3_2(primary, tsn)))
202 /* Initialize an existing sctp_outq. This does the boring stuff.
203 * You still need to define handlers if you really want to DO
204 * something with this structure...
206 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
209 INIT_LIST_HEAD(&q->out_chunk_list);
210 INIT_LIST_HEAD(&q->control_chunk_list);
211 INIT_LIST_HEAD(&q->retransmit);
212 INIT_LIST_HEAD(&q->sacked);
213 INIT_LIST_HEAD(&q->abandoned);
216 q->outstanding_bytes = 0;
224 /* Free the outqueue structure and any related pending chunks.
226 static void __sctp_outq_teardown(struct sctp_outq *q)
228 struct sctp_transport *transport;
229 struct list_head *lchunk, *temp;
230 struct sctp_chunk *chunk, *tmp;
232 /* Throw away unacknowledged chunks. */
233 list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
235 while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
236 chunk = list_entry(lchunk, struct sctp_chunk,
238 /* Mark as part of a failed message. */
239 sctp_chunk_fail(chunk, q->error);
240 sctp_chunk_free(chunk);
244 /* Throw away chunks that have been gap ACKed. */
245 list_for_each_safe(lchunk, temp, &q->sacked) {
246 list_del_init(lchunk);
247 chunk = list_entry(lchunk, struct sctp_chunk,
249 sctp_chunk_fail(chunk, q->error);
250 sctp_chunk_free(chunk);
253 /* Throw away any chunks in the retransmit queue. */
254 list_for_each_safe(lchunk, temp, &q->retransmit) {
255 list_del_init(lchunk);
256 chunk = list_entry(lchunk, struct sctp_chunk,
258 sctp_chunk_fail(chunk, q->error);
259 sctp_chunk_free(chunk);
262 /* Throw away any chunks that are in the abandoned queue. */
263 list_for_each_safe(lchunk, temp, &q->abandoned) {
264 list_del_init(lchunk);
265 chunk = list_entry(lchunk, struct sctp_chunk,
267 sctp_chunk_fail(chunk, q->error);
268 sctp_chunk_free(chunk);
271 /* Throw away any leftover data chunks. */
272 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
274 /* Mark as send failure. */
275 sctp_chunk_fail(chunk, q->error);
276 sctp_chunk_free(chunk);
279 /* Throw away any leftover control chunks. */
280 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
281 list_del_init(&chunk->list);
282 sctp_chunk_free(chunk);
286 void sctp_outq_teardown(struct sctp_outq *q)
288 __sctp_outq_teardown(q);
289 sctp_outq_init(q->asoc, q);
292 /* Free the outqueue structure and any related pending chunks. */
293 void sctp_outq_free(struct sctp_outq *q)
295 /* Throw away leftover chunks. */
296 __sctp_outq_teardown(q);
298 /* If we were kmalloc()'d, free the memory. */
303 /* Put a new chunk in an sctp_outq. */
304 int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
308 SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n",
309 q, chunk, chunk && chunk->chunk_hdr ?
310 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
313 /* If it is data, queue it up, otherwise, send it
316 if (sctp_chunk_is_data(chunk)) {
317 /* Is it OK to queue data chunks? */
318 /* From 9. Termination of Association
320 * When either endpoint performs a shutdown, the
321 * association on each peer will stop accepting new
322 * data from its user and only deliver data in queue
323 * at the time of sending or receiving the SHUTDOWN
326 switch (q->asoc->state) {
327 case SCTP_STATE_CLOSED:
328 case SCTP_STATE_SHUTDOWN_PENDING:
329 case SCTP_STATE_SHUTDOWN_SENT:
330 case SCTP_STATE_SHUTDOWN_RECEIVED:
331 case SCTP_STATE_SHUTDOWN_ACK_SENT:
332 /* Cannot send after transport endpoint shutdown */
337 SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n",
338 q, chunk, chunk && chunk->chunk_hdr ?
339 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
342 sctp_outq_tail_data(q, chunk);
343 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
344 SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS);
346 SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS);
351 list_add_tail(&chunk->list, &q->control_chunk_list);
352 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
359 error = sctp_outq_flush(q, 0);
364 /* Insert a chunk into the sorted list based on the TSNs. The retransmit list
365 * and the abandoned list are in ascending order.
367 static void sctp_insert_list(struct list_head *head, struct list_head *new)
369 struct list_head *pos;
370 struct sctp_chunk *nchunk, *lchunk;
374 nchunk = list_entry(new, struct sctp_chunk, transmitted_list);
375 ntsn = ntohl(nchunk->subh.data_hdr->tsn);
377 list_for_each(pos, head) {
378 lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);
379 ltsn = ntohl(lchunk->subh.data_hdr->tsn);
380 if (TSN_lt(ntsn, ltsn)) {
381 list_add(new, pos->prev);
387 list_add_tail(new, head);
390 /* Mark all the eligible packets on a transport for retransmission. */
391 void sctp_retransmit_mark(struct sctp_outq *q,
392 struct sctp_transport *transport,
395 struct list_head *lchunk, *ltemp;
396 struct sctp_chunk *chunk;
398 /* Walk through the specified transmitted queue. */
399 list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
400 chunk = list_entry(lchunk, struct sctp_chunk,
403 /* If the chunk is abandoned, move it to abandoned list. */
404 if (sctp_chunk_abandoned(chunk)) {
405 list_del_init(lchunk);
406 sctp_insert_list(&q->abandoned, lchunk);
408 /* If this chunk has not been previousely acked,
409 * stop considering it 'outstanding'. Our peer
410 * will most likely never see it since it will
411 * not be retransmitted
413 if (!chunk->tsn_gap_acked) {
414 if (chunk->transport)
415 chunk->transport->flight_size -=
416 sctp_data_size(chunk);
417 q->outstanding_bytes -= sctp_data_size(chunk);
418 q->asoc->peer.rwnd += sctp_data_size(chunk);
423 /* If we are doing retransmission due to a timeout or pmtu
424 * discovery, only the chunks that are not yet acked should
425 * be added to the retransmit queue.
427 if ((reason == SCTP_RTXR_FAST_RTX &&
428 (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
429 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
430 /* RFC 2960 6.2.1 Processing a Received SACK
432 * C) Any time a DATA chunk is marked for
433 * retransmission (via either T3-rtx timer expiration
434 * (Section 6.3.3) or via fast retransmit
435 * (Section 7.2.4)), add the data size of those
436 * chunks to the rwnd.
438 q->asoc->peer.rwnd += sctp_data_size(chunk);
439 q->outstanding_bytes -= sctp_data_size(chunk);
440 if (chunk->transport)
441 transport->flight_size -= sctp_data_size(chunk);
443 /* sctpimpguide-05 Section 2.8.2
444 * M5) If a T3-rtx timer expires, the
445 * 'TSN.Missing.Report' of all affected TSNs is set
448 chunk->tsn_missing_report = 0;
450 /* If a chunk that is being used for RTT measurement
451 * has to be retransmitted, we cannot use this chunk
452 * anymore for RTT measurements. Reset rto_pending so
453 * that a new RTT measurement is started when a new
454 * data chunk is sent.
456 if (chunk->rtt_in_progress) {
457 chunk->rtt_in_progress = 0;
458 transport->rto_pending = 0;
461 /* Move the chunk to the retransmit queue. The chunks
462 * on the retransmit queue are always kept in order.
464 list_del_init(lchunk);
465 sctp_insert_list(&q->retransmit, lchunk);
469 SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, "
470 "cwnd: %d, ssthresh: %d, flight_size: %d, "
471 "pba: %d\n", __func__,
473 transport->cwnd, transport->ssthresh,
474 transport->flight_size,
475 transport->partial_bytes_acked);
479 /* Mark all the eligible packets on a transport for retransmission and force
482 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
483 sctp_retransmit_reason_t reason)
488 case SCTP_RTXR_T3_RTX:
489 SCTP_INC_STATS(SCTP_MIB_T3_RETRANSMITS);
490 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
491 /* Update the retran path if the T3-rtx timer has expired for
492 * the current retran path.
494 if (transport == transport->asoc->peer.retran_path)
495 sctp_assoc_update_retran_path(transport->asoc);
496 transport->asoc->rtx_data_chunks +=
497 transport->asoc->unack_data;
499 case SCTP_RTXR_FAST_RTX:
500 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
501 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
504 case SCTP_RTXR_PMTUD:
505 SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);
507 case SCTP_RTXR_T1_RTX:
508 SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS);
509 transport->asoc->init_retries++;
515 sctp_retransmit_mark(q, transport, reason);
517 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
518 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
519 * following the procedures outlined in C1 - C5.
521 if (reason == SCTP_RTXR_T3_RTX)
522 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
524 /* Flush the queues only on timeout, since fast_rtx is only
525 * triggered during sack processing and the queue
526 * will be flushed at the end.
528 if (reason != SCTP_RTXR_FAST_RTX)
529 error = sctp_outq_flush(q, /* rtx_timeout */ 1);
532 q->asoc->base.sk->sk_err = -error;
536 * Transmit DATA chunks on the retransmit queue. Upon return from
537 * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
538 * need to be transmitted by the caller.
539 * We assume that pkt->transport has already been set.
541 * The return value is a normal kernel error return value.
543 static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
544 int rtx_timeout, int *start_timer)
546 struct list_head *lqueue;
547 struct sctp_transport *transport = pkt->transport;
549 struct sctp_chunk *chunk, *chunk1;
555 lqueue = &q->retransmit;
556 fast_rtx = q->fast_rtx;
558 /* This loop handles time-out retransmissions, fast retransmissions,
559 * and retransmissions due to opening of whindow.
561 * RFC 2960 6.3.3 Handle T3-rtx Expiration
563 * E3) Determine how many of the earliest (i.e., lowest TSN)
564 * outstanding DATA chunks for the address for which the
565 * T3-rtx has expired will fit into a single packet, subject
566 * to the MTU constraint for the path corresponding to the
567 * destination transport address to which the retransmission
568 * is being sent (this may be different from the address for
569 * which the timer expires [see Section 6.4]). Call this value
570 * K. Bundle and retransmit those K DATA chunks in a single
571 * packet to the destination endpoint.
573 * [Just to be painfully clear, if we are retransmitting
574 * because a timeout just happened, we should send only ONE
575 * packet of retransmitted data.]
577 * For fast retransmissions we also send only ONE packet. However,
578 * if we are just flushing the queue due to open window, we'll
579 * try to send as much as possible.
581 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
582 /* If the chunk is abandoned, move it to abandoned list. */
583 if (sctp_chunk_abandoned(chunk)) {
584 list_del_init(&chunk->transmitted_list);
585 sctp_insert_list(&q->abandoned,
586 &chunk->transmitted_list);
590 /* Make sure that Gap Acked TSNs are not retransmitted. A
591 * simple approach is just to move such TSNs out of the
592 * way and into a 'transmitted' queue and skip to the
595 if (chunk->tsn_gap_acked) {
596 list_del(&chunk->transmitted_list);
597 list_add_tail(&chunk->transmitted_list,
598 &transport->transmitted);
602 /* If we are doing fast retransmit, ignore non-fast_rtransmit
605 if (fast_rtx && !chunk->fast_retransmit)
609 /* Attempt to append this chunk to the packet. */
610 status = sctp_packet_append_chunk(pkt, chunk);
613 case SCTP_XMIT_PMTU_FULL:
614 if (!pkt->has_data && !pkt->has_cookie_echo) {
615 /* If this packet did not contain DATA then
616 * retransmission did not happen, so do it
617 * again. We'll ignore the error here since
618 * control chunks are already freed so there
619 * is nothing we can do.
621 sctp_packet_transmit(pkt);
625 /* Send this packet. */
626 error = sctp_packet_transmit(pkt);
628 /* If we are retransmitting, we should only
629 * send a single packet.
630 * Otherwise, try appending this chunk again.
632 if (rtx_timeout || fast_rtx)
637 /* Bundle next chunk in the next round. */
640 case SCTP_XMIT_RWND_FULL:
641 /* Send this packet. */
642 error = sctp_packet_transmit(pkt);
644 /* Stop sending DATA as there is no more room
650 case SCTP_XMIT_NAGLE_DELAY:
651 /* Send this packet. */
652 error = sctp_packet_transmit(pkt);
654 /* Stop sending DATA because of nagle delay. */
659 /* The append was successful, so add this chunk to
660 * the transmitted list.
662 list_del(&chunk->transmitted_list);
663 list_add_tail(&chunk->transmitted_list,
664 &transport->transmitted);
666 /* Mark the chunk as ineligible for fast retransmit
667 * after it is retransmitted.
669 if (chunk->fast_retransmit == SCTP_NEED_FRTX)
670 chunk->fast_retransmit = SCTP_DONT_FRTX;
676 /* Set the timer if there were no errors */
677 if (!error && !timer)
684 /* If we are here due to a retransmit timeout or a fast
685 * retransmit and if there are any chunks left in the retransmit
686 * queue that could not fit in the PMTU sized packet, they need
687 * to be marked as ineligible for a subsequent fast retransmit.
689 if (rtx_timeout || fast_rtx) {
690 list_for_each_entry(chunk1, lqueue, transmitted_list) {
691 if (chunk1->fast_retransmit == SCTP_NEED_FRTX)
692 chunk1->fast_retransmit = SCTP_DONT_FRTX;
696 *start_timer = timer;
698 /* Clear fast retransmit hint */
705 /* Cork the outqueue so queued chunks are really queued. */
706 int sctp_outq_uncork(struct sctp_outq *q)
711 error = sctp_outq_flush(q, 0);
717 * Try to flush an outqueue.
719 * Description: Send everything in q which we legally can, subject to
720 * congestion limitations.
721 * * Note: This function can be called from multiple contexts so appropriate
722 * locking concerns must be made. Today we use the sock lock to protect
725 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
727 struct sctp_packet *packet;
728 struct sctp_packet singleton;
729 struct sctp_association *asoc = q->asoc;
730 __u16 sport = asoc->base.bind_addr.port;
731 __u16 dport = asoc->peer.port;
732 __u32 vtag = asoc->peer.i.init_tag;
733 struct sctp_transport *transport = NULL;
734 struct sctp_transport *new_transport;
735 struct sctp_chunk *chunk, *tmp;
741 /* These transports have chunks to send. */
742 struct list_head transport_list;
743 struct list_head *ltransport;
745 INIT_LIST_HEAD(&transport_list);
751 * When bundling control chunks with DATA chunks, an
752 * endpoint MUST place control chunks first in the outbound
753 * SCTP packet. The transmitter MUST transmit DATA chunks
754 * within a SCTP packet in increasing order of TSN.
758 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
760 * F1) This means that until such time as the ASCONF
761 * containing the add is acknowledged, the sender MUST
762 * NOT use the new IP address as a source for ANY SCTP
763 * packet except on carrying an ASCONF Chunk.
765 if (asoc->src_out_of_asoc_ok &&
766 chunk->chunk_hdr->type != SCTP_CID_ASCONF)
769 list_del_init(&chunk->list);
771 /* Pick the right transport to use. */
772 new_transport = chunk->transport;
774 if (!new_transport) {
776 * If we have a prior transport pointer, see if
777 * the destination address of the chunk
778 * matches the destination address of the
779 * current transport. If not a match, then
780 * try to look up the transport with a given
781 * destination address. We do this because
782 * after processing ASCONFs, we may have new
783 * transports created.
786 sctp_cmp_addr_exact(&chunk->dest,
788 new_transport = transport;
790 new_transport = sctp_assoc_lookup_paddr(asoc,
793 /* if we still don't have a new transport, then
794 * use the current active path.
797 new_transport = asoc->peer.active_path;
798 } else if ((new_transport->state == SCTP_INACTIVE) ||
799 (new_transport->state == SCTP_UNCONFIRMED)) {
800 /* If the chunk is Heartbeat or Heartbeat Ack,
801 * send it to chunk->transport, even if it's
804 * 3.3.6 Heartbeat Acknowledgement:
806 * A HEARTBEAT ACK is always sent to the source IP
807 * address of the IP datagram containing the
808 * HEARTBEAT chunk to which this ack is responding.
811 * ASCONF_ACKs also must be sent to the source.
813 if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT &&
814 chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK &&
815 chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK)
816 new_transport = asoc->peer.active_path;
819 /* Are we switching transports?
820 * Take care of transport locks.
822 if (new_transport != transport) {
823 transport = new_transport;
824 if (list_empty(&transport->send_ready)) {
825 list_add_tail(&transport->send_ready,
828 packet = &transport->packet;
829 sctp_packet_config(packet, vtag,
830 asoc->peer.ecn_capable);
833 switch (chunk->chunk_hdr->type) {
837 * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
838 * COMPLETE with any other chunks. [Send them immediately.]
841 case SCTP_CID_INIT_ACK:
842 case SCTP_CID_SHUTDOWN_COMPLETE:
843 sctp_packet_init(&singleton, transport, sport, dport);
844 sctp_packet_config(&singleton, vtag, 0);
845 sctp_packet_append_chunk(&singleton, chunk);
846 error = sctp_packet_transmit(&singleton);
852 if (sctp_test_T_bit(chunk)) {
853 packet->vtag = asoc->c.my_vtag;
855 /* The following chunks are "response" chunks, i.e.
856 * they are generated in response to something we
857 * received. If we are sending these, then we can
858 * send only 1 packet containing these chunks.
860 case SCTP_CID_HEARTBEAT_ACK:
861 case SCTP_CID_SHUTDOWN_ACK:
862 case SCTP_CID_COOKIE_ACK:
863 case SCTP_CID_COOKIE_ECHO:
865 case SCTP_CID_ECN_CWR:
866 case SCTP_CID_ASCONF_ACK:
871 case SCTP_CID_HEARTBEAT:
872 case SCTP_CID_SHUTDOWN:
873 case SCTP_CID_ECN_ECNE:
874 case SCTP_CID_ASCONF:
875 case SCTP_CID_FWD_TSN:
876 status = sctp_packet_transmit_chunk(packet, chunk,
878 if (status != SCTP_XMIT_OK) {
879 /* put the chunk back */
880 list_add(&chunk->list, &q->control_chunk_list);
881 } else if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
882 /* PR-SCTP C5) If a FORWARD TSN is sent, the
883 * sender MUST assure that at least one T3-rtx
886 sctp_transport_reset_timers(transport);
891 /* We built a chunk with an illegal type! */
896 if (q->asoc->src_out_of_asoc_ok)
899 /* Is it OK to send data chunks? */
900 switch (asoc->state) {
901 case SCTP_STATE_COOKIE_ECHOED:
902 /* Only allow bundling when this packet has a COOKIE-ECHO
905 if (!packet || !packet->has_cookie_echo)
909 case SCTP_STATE_ESTABLISHED:
910 case SCTP_STATE_SHUTDOWN_PENDING:
911 case SCTP_STATE_SHUTDOWN_RECEIVED:
913 * RFC 2960 6.1 Transmission of DATA Chunks
915 * C) When the time comes for the sender to transmit,
916 * before sending new DATA chunks, the sender MUST
917 * first transmit any outstanding DATA chunks which
918 * are marked for retransmission (limited by the
921 if (!list_empty(&q->retransmit)) {
922 if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
924 if (transport == asoc->peer.retran_path)
927 /* Switch transports & prepare the packet. */
929 transport = asoc->peer.retran_path;
931 if (list_empty(&transport->send_ready)) {
932 list_add_tail(&transport->send_ready,
936 packet = &transport->packet;
937 sctp_packet_config(packet, vtag,
938 asoc->peer.ecn_capable);
940 error = sctp_outq_flush_rtx(q, packet,
941 rtx_timeout, &start_timer);
944 sctp_transport_reset_timers(transport);
946 /* This can happen on COOKIE-ECHO resend. Only
947 * one chunk can get bundled with a COOKIE-ECHO.
949 if (packet->has_cookie_echo)
952 /* Don't send new data if there is still data
953 * waiting to retransmit.
955 if (!list_empty(&q->retransmit))
959 /* Apply Max.Burst limitation to the current transport in
960 * case it will be used for new data. We are going to
961 * rest it before we return, but we want to apply the limit
962 * to the currently queued data.
965 sctp_transport_burst_limited(transport);
967 /* Finally, transmit new packets. */
968 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
969 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
972 if (chunk->sinfo.sinfo_stream >=
973 asoc->c.sinit_num_ostreams) {
975 /* Mark as failed send. */
976 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
977 sctp_chunk_free(chunk);
981 /* Has this chunk expired? */
982 if (sctp_chunk_abandoned(chunk)) {
983 sctp_chunk_fail(chunk, 0);
984 sctp_chunk_free(chunk);
988 /* If there is a specified transport, use it.
989 * Otherwise, we want to use the active path.
991 new_transport = chunk->transport;
992 if (!new_transport ||
993 ((new_transport->state == SCTP_INACTIVE) ||
994 (new_transport->state == SCTP_UNCONFIRMED)))
995 new_transport = asoc->peer.active_path;
996 if (new_transport->state == SCTP_UNCONFIRMED)
999 /* Change packets if necessary. */
1000 if (new_transport != transport) {
1001 transport = new_transport;
1003 /* Schedule to have this transport's
1006 if (list_empty(&transport->send_ready)) {
1007 list_add_tail(&transport->send_ready,
1011 packet = &transport->packet;
1012 sctp_packet_config(packet, vtag,
1013 asoc->peer.ecn_capable);
1014 /* We've switched transports, so apply the
1015 * Burst limit to the new transport.
1017 sctp_transport_burst_limited(transport);
1020 SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ",
1022 chunk && chunk->chunk_hdr ?
1023 sctp_cname(SCTP_ST_CHUNK(
1024 chunk->chunk_hdr->type))
1027 SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head "
1028 "%p skb->users %d.\n",
1029 ntohl(chunk->subh.data_hdr->tsn),
1030 chunk->skb ?chunk->skb->head : NULL,
1032 atomic_read(&chunk->skb->users) : -1);
1034 /* Add the chunk to the packet. */
1035 status = sctp_packet_transmit_chunk(packet, chunk, 0);
1038 case SCTP_XMIT_PMTU_FULL:
1039 case SCTP_XMIT_RWND_FULL:
1040 case SCTP_XMIT_NAGLE_DELAY:
1041 /* We could not append this chunk, so put
1042 * the chunk back on the output queue.
1044 SCTP_DEBUG_PRINTK("sctp_outq_flush: could "
1045 "not transmit TSN: 0x%x, status: %d\n",
1046 ntohl(chunk->subh.data_hdr->tsn),
1048 sctp_outq_head_data(q, chunk);
1049 goto sctp_flush_out;
1053 /* The sender is in the SHUTDOWN-PENDING state,
1054 * The sender MAY set the I-bit in the DATA
1057 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1058 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1066 /* BUG: We assume that the sctp_packet_transmit()
1067 * call below will succeed all the time and add the
1068 * chunk to the transmitted list and restart the
1070 * It is possible that the call can fail under OOM
1073 * Is this really a problem? Won't this behave
1076 list_add_tail(&chunk->transmitted_list,
1077 &transport->transmitted);
1079 sctp_transport_reset_timers(transport);
1083 /* Only let one DATA chunk get bundled with a
1084 * COOKIE-ECHO chunk.
1086 if (packet->has_cookie_echo)
1087 goto sctp_flush_out;
1098 /* Before returning, examine all the transports touched in
1099 * this call. Right now, we bluntly force clear all the
1100 * transports. Things might change after we implement Nagle.
1101 * But such an examination is still required.
1105 while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) {
1106 struct sctp_transport *t = list_entry(ltransport,
1107 struct sctp_transport,
1109 packet = &t->packet;
1110 if (!sctp_packet_empty(packet))
1111 error = sctp_packet_transmit(packet);
1113 /* Clear the burst limited state, if any */
1114 sctp_transport_burst_reset(t);
1120 /* Update unack_data based on the incoming SACK chunk */
1121 static void sctp_sack_update_unack_data(struct sctp_association *assoc,
1122 struct sctp_sackhdr *sack)
1124 sctp_sack_variable_t *frags;
1128 unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;
1130 frags = sack->variable;
1131 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {
1132 unack_data -= ((ntohs(frags[i].gab.end) -
1133 ntohs(frags[i].gab.start) + 1));
1136 assoc->unack_data = unack_data;
1139 /* This is where we REALLY process a SACK.
1141 * Process the SACK against the outqueue. Mostly, this just frees
1142 * things off the transmitted queue.
1144 int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1146 struct sctp_association *asoc = q->asoc;
1147 struct sctp_transport *transport;
1148 struct sctp_chunk *tchunk = NULL;
1149 struct list_head *lchunk, *transport_list, *temp;
1150 sctp_sack_variable_t *frags = sack->variable;
1151 __u32 sack_ctsn, ctsn, tsn;
1152 __u32 highest_tsn, highest_new_tsn;
1154 unsigned outstanding;
1155 struct sctp_transport *primary = asoc->peer.primary_path;
1156 int count_of_newacks = 0;
1160 /* Grab the association's destination address list. */
1161 transport_list = &asoc->peer.transport_addr_list;
1163 sack_ctsn = ntohl(sack->cum_tsn_ack);
1164 gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
1166 * SFR-CACC algorithm:
1167 * On receipt of a SACK the sender SHOULD execute the
1168 * following statements.
1170 * 1) If the cumulative ack in the SACK passes next tsn_at_change
1171 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
1172 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
1174 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
1175 * is set the receiver of the SACK MUST take the following actions:
1177 * A) Initialize the cacc_saw_newack to 0 for all destination
1180 * Only bother if changeover_active is set. Otherwise, this is
1181 * totally suboptimal to do on every SACK.
1183 if (primary->cacc.changeover_active) {
1184 u8 clear_cycling = 0;
1186 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
1187 primary->cacc.changeover_active = 0;
1191 if (clear_cycling || gap_ack_blocks) {
1192 list_for_each_entry(transport, transport_list,
1195 transport->cacc.cycling_changeover = 0;
1197 transport->cacc.cacc_saw_newack = 0;
1202 /* Get the highest TSN in the sack. */
1203 highest_tsn = sack_ctsn;
1205 highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
1207 if (TSN_lt(asoc->highest_sacked, highest_tsn))
1208 asoc->highest_sacked = highest_tsn;
1210 highest_new_tsn = sack_ctsn;
1212 /* Run through the retransmit queue. Credit bytes received
1213 * and free those chunks that we can.
1215 sctp_check_transmitted(q, &q->retransmit, NULL, sack, &highest_new_tsn);
1217 /* Run through the transmitted queue.
1218 * Credit bytes received and free those chunks which we can.
1220 * This is a MASSIVE candidate for optimization.
1222 list_for_each_entry(transport, transport_list, transports) {
1223 sctp_check_transmitted(q, &transport->transmitted,
1224 transport, sack, &highest_new_tsn);
1226 * SFR-CACC algorithm:
1227 * C) Let count_of_newacks be the number of
1228 * destinations for which cacc_saw_newack is set.
1230 if (transport->cacc.cacc_saw_newack)
1231 count_of_newacks ++;
1234 /* Move the Cumulative TSN Ack Point if appropriate. */
1235 if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
1236 asoc->ctsn_ack_point = sack_ctsn;
1240 if (gap_ack_blocks) {
1242 if (asoc->fast_recovery && accum_moved)
1243 highest_new_tsn = highest_tsn;
1245 list_for_each_entry(transport, transport_list, transports)
1246 sctp_mark_missing(q, &transport->transmitted, transport,
1247 highest_new_tsn, count_of_newacks);
1250 /* Update unack_data field in the assoc. */
1251 sctp_sack_update_unack_data(asoc, sack);
1253 ctsn = asoc->ctsn_ack_point;
1255 /* Throw away stuff rotting on the sack queue. */
1256 list_for_each_safe(lchunk, temp, &q->sacked) {
1257 tchunk = list_entry(lchunk, struct sctp_chunk,
1259 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1260 if (TSN_lte(tsn, ctsn)) {
1261 list_del_init(&tchunk->transmitted_list);
1262 sctp_chunk_free(tchunk);
1266 /* ii) Set rwnd equal to the newly received a_rwnd minus the
1267 * number of bytes still outstanding after processing the
1268 * Cumulative TSN Ack and the Gap Ack Blocks.
1271 sack_a_rwnd = ntohl(sack->a_rwnd);
1272 outstanding = q->outstanding_bytes;
1274 if (outstanding < sack_a_rwnd)
1275 sack_a_rwnd -= outstanding;
1279 asoc->peer.rwnd = sack_a_rwnd;
1281 sctp_generate_fwdtsn(q, sack_ctsn);
1283 SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n",
1284 __func__, sack_ctsn);
1285 SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, "
1286 "%p is 0x%x. Adv peer ack point: 0x%x\n",
1287 __func__, asoc, ctsn, asoc->adv_peer_ack_point);
1289 /* See if all chunks are acked.
1290 * Make sure the empty queue handler will get run later.
1292 q->empty = (list_empty(&q->out_chunk_list) &&
1293 list_empty(&q->retransmit));
1297 list_for_each_entry(transport, transport_list, transports) {
1298 q->empty = q->empty && list_empty(&transport->transmitted);
1303 SCTP_DEBUG_PRINTK("sack queue is empty.\n");
1308 /* Is the outqueue empty? */
1309 int sctp_outq_is_empty(const struct sctp_outq *q)
1314 /********************************************************************
1315 * 2nd Level Abstractions
1316 ********************************************************************/
1318 /* Go through a transport's transmitted list or the association's retransmit
1319 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
1320 * The retransmit list will not have an associated transport.
1322 * I added coherent debug information output. --xguo
1324 * Instead of printing 'sacked' or 'kept' for each TSN on the
1325 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
1326 * KEPT TSN6-TSN7, etc.
1328 static void sctp_check_transmitted(struct sctp_outq *q,
1329 struct list_head *transmitted_queue,
1330 struct sctp_transport *transport,
1331 struct sctp_sackhdr *sack,
1332 __u32 *highest_new_tsn_in_sack)
1334 struct list_head *lchunk;
1335 struct sctp_chunk *tchunk;
1336 struct list_head tlist;
1340 __u8 restart_timer = 0;
1341 int bytes_acked = 0;
1342 int migrate_bytes = 0;
1344 /* These state variables are for coherent debug output. --xguo */
1347 __u32 dbg_ack_tsn = 0; /* An ACKed TSN range starts here... */
1348 __u32 dbg_last_ack_tsn = 0; /* ...and finishes here. */
1349 __u32 dbg_kept_tsn = 0; /* An un-ACKed range starts here... */
1350 __u32 dbg_last_kept_tsn = 0; /* ...and finishes here. */
1352 /* 0 : The last TSN was ACKed.
1353 * 1 : The last TSN was NOT ACKed (i.e. KEPT).
1354 * -1: We need to initialize.
1356 int dbg_prt_state = -1;
1357 #endif /* SCTP_DEBUG */
1359 sack_ctsn = ntohl(sack->cum_tsn_ack);
1361 INIT_LIST_HEAD(&tlist);
1363 /* The while loop will skip empty transmitted queues. */
1364 while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {
1365 tchunk = list_entry(lchunk, struct sctp_chunk,
1368 if (sctp_chunk_abandoned(tchunk)) {
1369 /* Move the chunk to abandoned list. */
1370 sctp_insert_list(&q->abandoned, lchunk);
1372 /* If this chunk has not been acked, stop
1373 * considering it as 'outstanding'.
1375 if (!tchunk->tsn_gap_acked) {
1376 if (tchunk->transport)
1377 tchunk->transport->flight_size -=
1378 sctp_data_size(tchunk);
1379 q->outstanding_bytes -= sctp_data_size(tchunk);
1384 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1385 if (sctp_acked(sack, tsn)) {
1386 /* If this queue is the retransmit queue, the
1387 * retransmit timer has already reclaimed
1388 * the outstanding bytes for this chunk, so only
1389 * count bytes associated with a transport.
1392 /* If this chunk is being used for RTT
1393 * measurement, calculate the RTT and update
1394 * the RTO using this value.
1396 * 6.3.1 C5) Karn's algorithm: RTT measurements
1397 * MUST NOT be made using packets that were
1398 * retransmitted (and thus for which it is
1399 * ambiguous whether the reply was for the
1400 * first instance of the packet or a later
1403 if (!tchunk->tsn_gap_acked &&
1404 tchunk->rtt_in_progress) {
1405 tchunk->rtt_in_progress = 0;
1406 rtt = jiffies - tchunk->sent_at;
1407 sctp_transport_update_rto(transport,
1412 /* If the chunk hasn't been marked as ACKED,
1413 * mark it and account bytes_acked if the
1414 * chunk had a valid transport (it will not
1415 * have a transport if ASCONF had deleted it
1416 * while DATA was outstanding).
1418 if (!tchunk->tsn_gap_acked) {
1419 tchunk->tsn_gap_acked = 1;
1420 *highest_new_tsn_in_sack = tsn;
1421 bytes_acked += sctp_data_size(tchunk);
1422 if (!tchunk->transport)
1423 migrate_bytes += sctp_data_size(tchunk);
1426 if (TSN_lte(tsn, sack_ctsn)) {
1427 /* RFC 2960 6.3.2 Retransmission Timer Rules
1429 * R3) Whenever a SACK is received
1430 * that acknowledges the DATA chunk
1431 * with the earliest outstanding TSN
1432 * for that address, restart T3-rtx
1433 * timer for that address with its
1438 if (!tchunk->tsn_gap_acked) {
1440 * SFR-CACC algorithm:
1441 * 2) If the SACK contains gap acks
1442 * and the flag CHANGEOVER_ACTIVE is
1443 * set the receiver of the SACK MUST
1444 * take the following action:
1446 * B) For each TSN t being acked that
1447 * has not been acked in any SACK so
1448 * far, set cacc_saw_newack to 1 for
1449 * the destination that the TSN was
1453 sack->num_gap_ack_blocks &&
1454 q->asoc->peer.primary_path->cacc.
1456 transport->cacc.cacc_saw_newack
1460 list_add_tail(&tchunk->transmitted_list,
1463 /* RFC2960 7.2.4, sctpimpguide-05 2.8.2
1464 * M2) Each time a SACK arrives reporting
1465 * 'Stray DATA chunk(s)' record the highest TSN
1466 * reported as newly acknowledged, call this
1467 * value 'HighestTSNinSack'. A newly
1468 * acknowledged DATA chunk is one not
1469 * previously acknowledged in a SACK.
1471 * When the SCTP sender of data receives a SACK
1472 * chunk that acknowledges, for the first time,
1473 * the receipt of a DATA chunk, all the still
1474 * unacknowledged DATA chunks whose TSN is
1475 * older than that newly acknowledged DATA
1476 * chunk, are qualified as 'Stray DATA chunks'.
1478 list_add_tail(lchunk, &tlist);
1482 switch (dbg_prt_state) {
1483 case 0: /* last TSN was ACKed */
1484 if (dbg_last_ack_tsn + 1 == tsn) {
1485 /* This TSN belongs to the
1486 * current ACK range.
1491 if (dbg_last_ack_tsn != dbg_ack_tsn) {
1492 /* Display the end of the
1495 SCTP_DEBUG_PRINTK_CONT("-%08x",
1499 /* Start a new range. */
1500 SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);
1504 case 1: /* The last TSN was NOT ACKed. */
1505 if (dbg_last_kept_tsn != dbg_kept_tsn) {
1506 /* Display the end of current range. */
1507 SCTP_DEBUG_PRINTK_CONT("-%08x",
1511 SCTP_DEBUG_PRINTK_CONT("\n");
1513 /* FALL THROUGH... */
1515 /* This is the first-ever TSN we examined. */
1516 /* Start a new range of ACK-ed TSNs. */
1517 SCTP_DEBUG_PRINTK("ACKed: %08x", tsn);
1522 dbg_last_ack_tsn = tsn;
1523 #endif /* SCTP_DEBUG */
1526 if (tchunk->tsn_gap_acked) {
1527 SCTP_DEBUG_PRINTK("%s: Receiver reneged on "
1531 tchunk->tsn_gap_acked = 0;
1533 if (tchunk->transport)
1534 bytes_acked -= sctp_data_size(tchunk);
1536 /* RFC 2960 6.3.2 Retransmission Timer Rules
1538 * R4) Whenever a SACK is received missing a
1539 * TSN that was previously acknowledged via a
1540 * Gap Ack Block, start T3-rtx for the
1541 * destination address to which the DATA
1542 * chunk was originally
1543 * transmitted if it is not already running.
1548 list_add_tail(lchunk, &tlist);
1551 /* See the above comments on ACK-ed TSNs. */
1552 switch (dbg_prt_state) {
1554 if (dbg_last_kept_tsn + 1 == tsn)
1557 if (dbg_last_kept_tsn != dbg_kept_tsn)
1558 SCTP_DEBUG_PRINTK_CONT("-%08x",
1561 SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);
1566 if (dbg_last_ack_tsn != dbg_ack_tsn)
1567 SCTP_DEBUG_PRINTK_CONT("-%08x",
1569 SCTP_DEBUG_PRINTK_CONT("\n");
1571 /* FALL THROUGH... */
1573 SCTP_DEBUG_PRINTK("KEPT: %08x",tsn);
1578 dbg_last_kept_tsn = tsn;
1579 #endif /* SCTP_DEBUG */
1584 /* Finish off the last range, displaying its ending TSN. */
1585 switch (dbg_prt_state) {
1587 if (dbg_last_ack_tsn != dbg_ack_tsn) {
1588 SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_ack_tsn);
1590 SCTP_DEBUG_PRINTK_CONT("\n");
1595 if (dbg_last_kept_tsn != dbg_kept_tsn) {
1596 SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_kept_tsn);
1598 SCTP_DEBUG_PRINTK_CONT("\n");
1601 #endif /* SCTP_DEBUG */
1604 struct sctp_association *asoc = transport->asoc;
1606 /* We may have counted DATA that was migrated
1607 * to this transport due to DEL-IP operation.
1608 * Subtract those bytes, since the were never
1609 * send on this transport and shouldn't be
1610 * credited to this transport.
1612 bytes_acked -= migrate_bytes;
1614 /* 8.2. When an outstanding TSN is acknowledged,
1615 * the endpoint shall clear the error counter of
1616 * the destination transport address to which the
1617 * DATA chunk was last sent.
1618 * The association's overall error counter is
1621 transport->error_count = 0;
1622 transport->asoc->overall_error_count = 0;
1625 * While in SHUTDOWN PENDING, we may have started
1626 * the T5 shutdown guard timer after reaching the
1627 * retransmission limit. Stop that timer as soon
1628 * as the receiver acknowledged any data.
1630 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
1631 del_timer(&asoc->timers
1632 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
1633 sctp_association_put(asoc);
1635 /* Mark the destination transport address as
1636 * active if it is not so marked.
1638 if ((transport->state == SCTP_INACTIVE) ||
1639 (transport->state == SCTP_UNCONFIRMED)) {
1640 sctp_assoc_control_transport(
1644 SCTP_RECEIVED_SACK);
1647 sctp_transport_raise_cwnd(transport, sack_ctsn,
1650 transport->flight_size -= bytes_acked;
1651 if (transport->flight_size == 0)
1652 transport->partial_bytes_acked = 0;
1653 q->outstanding_bytes -= bytes_acked + migrate_bytes;
1655 /* RFC 2960 6.1, sctpimpguide-06 2.15.2
1656 * When a sender is doing zero window probing, it
1657 * should not timeout the association if it continues
1658 * to receive new packets from the receiver. The
1659 * reason is that the receiver MAY keep its window
1660 * closed for an indefinite time.
1661 * A sender is doing zero window probing when the
1662 * receiver's advertised window is zero, and there is
1663 * only one data chunk in flight to the receiver.
1665 * Allow the association to timeout while in SHUTDOWN
1666 * PENDING or SHUTDOWN RECEIVED in case the receiver
1667 * stays in zero window mode forever.
1669 if (!q->asoc->peer.rwnd &&
1670 !list_empty(&tlist) &&
1671 (sack_ctsn+2 == q->asoc->next_tsn) &&
1672 q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
1673 SCTP_DEBUG_PRINTK("%s: SACK received for zero "
1674 "window probe: %u\n",
1675 __func__, sack_ctsn);
1676 q->asoc->overall_error_count = 0;
1677 transport->error_count = 0;
1681 /* RFC 2960 6.3.2 Retransmission Timer Rules
1683 * R2) Whenever all outstanding data sent to an address have
1684 * been acknowledged, turn off the T3-rtx timer of that
1687 if (!transport->flight_size) {
1688 if (timer_pending(&transport->T3_rtx_timer) &&
1689 del_timer(&transport->T3_rtx_timer)) {
1690 sctp_transport_put(transport);
1692 } else if (restart_timer) {
1693 if (!mod_timer(&transport->T3_rtx_timer,
1694 jiffies + transport->rto))
1695 sctp_transport_hold(transport);
1699 list_splice(&tlist, transmitted_queue);
1702 /* Mark chunks as missing and consequently may get retransmitted. */
1703 static void sctp_mark_missing(struct sctp_outq *q,
1704 struct list_head *transmitted_queue,
1705 struct sctp_transport *transport,
1706 __u32 highest_new_tsn_in_sack,
1707 int count_of_newacks)
1709 struct sctp_chunk *chunk;
1711 char do_fast_retransmit = 0;
1712 struct sctp_association *asoc = q->asoc;
1713 struct sctp_transport *primary = asoc->peer.primary_path;
1715 list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
1717 tsn = ntohl(chunk->subh.data_hdr->tsn);
1719 /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
1720 * 'Unacknowledged TSN's', if the TSN number of an
1721 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
1722 * value, increment the 'TSN.Missing.Report' count on that
1723 * chunk if it has NOT been fast retransmitted or marked for
1724 * fast retransmit already.
1726 if (chunk->fast_retransmit == SCTP_CAN_FRTX &&
1727 !chunk->tsn_gap_acked &&
1728 TSN_lt(tsn, highest_new_tsn_in_sack)) {
1730 /* SFR-CACC may require us to skip marking
1731 * this chunk as missing.
1733 if (!transport || !sctp_cacc_skip(primary,
1735 count_of_newacks, tsn)) {
1736 chunk->tsn_missing_report++;
1739 "%s: TSN 0x%x missing counter: %d\n",
1741 chunk->tsn_missing_report);
1745 * M4) If any DATA chunk is found to have a
1746 * 'TSN.Missing.Report'
1747 * value larger than or equal to 3, mark that chunk for
1748 * retransmission and start the fast retransmit procedure.
1751 if (chunk->tsn_missing_report >= 3) {
1752 chunk->fast_retransmit = SCTP_NEED_FRTX;
1753 do_fast_retransmit = 1;
1758 if (do_fast_retransmit)
1759 sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
1761 SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, "
1762 "ssthresh: %d, flight_size: %d, pba: %d\n",
1763 __func__, transport, transport->cwnd,
1764 transport->ssthresh, transport->flight_size,
1765 transport->partial_bytes_acked);
1769 /* Is the given TSN acked by this packet? */
1770 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
1773 sctp_sack_variable_t *frags;
1775 __u32 ctsn = ntohl(sack->cum_tsn_ack);
1777 if (TSN_lte(tsn, ctsn))
1780 /* 3.3.4 Selective Acknowledgement (SACK) (3):
1783 * These fields contain the Gap Ack Blocks. They are repeated
1784 * for each Gap Ack Block up to the number of Gap Ack Blocks
1785 * defined in the Number of Gap Ack Blocks field. All DATA
1786 * chunks with TSNs greater than or equal to (Cumulative TSN
1787 * Ack + Gap Ack Block Start) and less than or equal to
1788 * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
1789 * Block are assumed to have been received correctly.
1792 frags = sack->variable;
1794 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) {
1795 if (TSN_lte(ntohs(frags[i].gab.start), gap) &&
1796 TSN_lte(gap, ntohs(frags[i].gab.end)))
1805 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
1806 int nskips, __be16 stream)
1810 for (i = 0; i < nskips; i++) {
1811 if (skiplist[i].stream == stream)
1817 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */
1818 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1820 struct sctp_association *asoc = q->asoc;
1821 struct sctp_chunk *ftsn_chunk = NULL;
1822 struct sctp_fwdtsn_skip ftsn_skip_arr[10];
1826 struct sctp_chunk *chunk;
1827 struct list_head *lchunk, *temp;
1829 if (!asoc->peer.prsctp_capable)
1832 /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
1835 * If (Advanced.Peer.Ack.Point < SackCumAck), then update
1836 * Advanced.Peer.Ack.Point to be equal to SackCumAck.
1838 if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1839 asoc->adv_peer_ack_point = ctsn;
1841 /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
1842 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
1843 * the chunk next in the out-queue space is marked as "abandoned" as
1844 * shown in the following example:
1846 * Assuming that a SACK arrived with the Cumulative TSN ACK 102
1847 * and the Advanced.Peer.Ack.Point is updated to this value:
1849 * out-queue at the end of ==> out-queue after Adv.Ack.Point
1850 * normal SACK processing local advancement
1852 * Adv.Ack.Pt-> 102 acked 102 acked
1853 * 103 abandoned 103 abandoned
1854 * 104 abandoned Adv.Ack.P-> 104 abandoned
1856 * 106 acked 106 acked
1859 * In this example, the data sender successfully advanced the
1860 * "Advanced.Peer.Ack.Point" from 102 to 104 locally.
1862 list_for_each_safe(lchunk, temp, &q->abandoned) {
1863 chunk = list_entry(lchunk, struct sctp_chunk,
1865 tsn = ntohl(chunk->subh.data_hdr->tsn);
1867 /* Remove any chunks in the abandoned queue that are acked by
1870 if (TSN_lte(tsn, ctsn)) {
1871 list_del_init(lchunk);
1872 sctp_chunk_free(chunk);
1874 if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {
1875 asoc->adv_peer_ack_point = tsn;
1876 if (chunk->chunk_hdr->flags &
1877 SCTP_DATA_UNORDERED)
1879 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],
1881 chunk->subh.data_hdr->stream);
1882 ftsn_skip_arr[skip_pos].stream =
1883 chunk->subh.data_hdr->stream;
1884 ftsn_skip_arr[skip_pos].ssn =
1885 chunk->subh.data_hdr->ssn;
1886 if (skip_pos == nskips)
1895 /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
1896 * is greater than the Cumulative TSN ACK carried in the received
1897 * SACK, the data sender MUST send the data receiver a FORWARD TSN
1898 * chunk containing the latest value of the
1899 * "Advanced.Peer.Ack.Point".
1901 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
1902 * list each stream and sequence number in the forwarded TSN. This
1903 * information will enable the receiver to easily find any
1904 * stranded TSN's waiting on stream reorder queues. Each stream
1905 * SHOULD only be reported once; this means that if multiple
1906 * abandoned messages occur in the same stream then only the
1907 * highest abandoned stream sequence number is reported. If the
1908 * total size of the FORWARD TSN does NOT fit in a single MTU then
1909 * the sender of the FORWARD TSN SHOULD lower the
1910 * Advanced.Peer.Ack.Point to the last TSN that will fit in a
1913 if (asoc->adv_peer_ack_point > ctsn)
1914 ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
1915 nskips, &ftsn_skip_arr[0]);
1918 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1919 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);