1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 Intel Corp.
7 * This file is part of the SCTP kernel implementation
9 * These functions implement the sctp_outq class. The outqueue handles
10 * bundling and queueing of outgoing SCTP chunks.
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, write to
26 * the Free Software Foundation, 59 Temple Place - Suite 330,
27 * Boston, MA 02111-1307, USA.
29 * Please send any bug reports or fixes you make to the
31 * lksctp developers <lksctp-developers@lists.sourceforge.net>
33 * Or submit a bug report through the following website:
34 * http://www.sf.net/projects/lksctp
36 * Written or modified by:
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Karl Knutson <karl@athena.chicago.il.us>
39 * Perry Melange <pmelange@null.cc.uic.edu>
40 * Xingang Guo <xingang.guo@intel.com>
41 * Hui Huang <hui.huang@nokia.com>
42 * Sridhar Samudrala <sri@us.ibm.com>
43 * Jon Grimm <jgrimm@us.ibm.com>
45 * Any bugs reported given to us we will try to fix... any fixes shared will
46 * be incorporated into the next SCTP release.
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
51 #include <linux/types.h>
52 #include <linux/list.h> /* For struct list_head */
53 #include <linux/socket.h>
55 #include <linux/slab.h>
56 #include <net/sock.h> /* For skb_set_owner_w */
58 #include <net/sctp/sctp.h>
59 #include <net/sctp/sm.h>
61 /* Declare internal functions here. */
62 static int sctp_acked(struct sctp_sackhdr
*sack
, __u32 tsn
);
63 static void sctp_check_transmitted(struct sctp_outq
*q
,
64 struct list_head
*transmitted_queue
,
65 struct sctp_transport
*transport
,
66 union sctp_addr
*saddr
,
67 struct sctp_sackhdr
*sack
,
68 __u32
*highest_new_tsn
);
70 static void sctp_mark_missing(struct sctp_outq
*q
,
71 struct list_head
*transmitted_queue
,
72 struct sctp_transport
*transport
,
73 __u32 highest_new_tsn
,
74 int count_of_newacks
);
76 static void sctp_generate_fwdtsn(struct sctp_outq
*q
, __u32 sack_ctsn
);
78 static int sctp_outq_flush(struct sctp_outq
*q
, int rtx_timeout
);
80 /* Add data to the front of the queue. */
81 static inline void sctp_outq_head_data(struct sctp_outq
*q
,
82 struct sctp_chunk
*ch
)
84 list_add(&ch
->list
, &q
->out_chunk_list
);
85 q
->out_qlen
+= ch
->skb
->len
;
88 /* Take data from the front of the queue. */
89 static inline struct sctp_chunk
*sctp_outq_dequeue_data(struct sctp_outq
*q
)
91 struct sctp_chunk
*ch
= NULL
;
93 if (!list_empty(&q
->out_chunk_list
)) {
94 struct list_head
*entry
= q
->out_chunk_list
.next
;
96 ch
= list_entry(entry
, struct sctp_chunk
, list
);
98 q
->out_qlen
-= ch
->skb
->len
;
102 /* Add data chunk to the end of the queue. */
103 static inline void sctp_outq_tail_data(struct sctp_outq
*q
,
104 struct sctp_chunk
*ch
)
106 list_add_tail(&ch
->list
, &q
->out_chunk_list
);
107 q
->out_qlen
+= ch
->skb
->len
;
111 * SFR-CACC algorithm:
112 * D) If count_of_newacks is greater than or equal to 2
113 * and t was not sent to the current primary then the
114 * sender MUST NOT increment missing report count for t.
116 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport
*primary
,
117 struct sctp_transport
*transport
,
118 int count_of_newacks
)
120 if (count_of_newacks
>=2 && transport
!= primary
)
126 * SFR-CACC algorithm:
127 * F) If count_of_newacks is less than 2, let d be the
128 * destination to which t was sent. If cacc_saw_newack
129 * is 0 for destination d, then the sender MUST NOT
130 * increment missing report count for t.
132 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport
*transport
,
133 int count_of_newacks
)
135 if (count_of_newacks
< 2 &&
136 (transport
&& !transport
->cacc
.cacc_saw_newack
))
142 * SFR-CACC algorithm:
143 * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
144 * execute steps C, D, F.
146 * C has been implemented in sctp_outq_sack
148 static inline int sctp_cacc_skip_3_1(struct sctp_transport
*primary
,
149 struct sctp_transport
*transport
,
150 int count_of_newacks
)
152 if (!primary
->cacc
.cycling_changeover
) {
153 if (sctp_cacc_skip_3_1_d(primary
, transport
, count_of_newacks
))
155 if (sctp_cacc_skip_3_1_f(transport
, count_of_newacks
))
163 * SFR-CACC algorithm:
164 * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
165 * than next_tsn_at_change of the current primary, then
166 * the sender MUST NOT increment missing report count
169 static inline int sctp_cacc_skip_3_2(struct sctp_transport
*primary
, __u32 tsn
)
171 if (primary
->cacc
.cycling_changeover
&&
172 TSN_lt(tsn
, primary
->cacc
.next_tsn_at_change
))
178 * SFR-CACC algorithm:
179 * 3) If the missing report count for TSN t is to be
180 * incremented according to [RFC2960] and
181 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
182 * then the sender MUST further execute steps 3.1 and
183 * 3.2 to determine if the missing report count for
184 * TSN t SHOULD NOT be incremented.
186 * 3.3) If 3.1 and 3.2 do not dictate that the missing
187 * report count for t should not be incremented, then
188 * the sender SHOULD increment missing report count for
189 * t (according to [RFC2960] and [SCTP_STEWART_2002]).
191 static inline int sctp_cacc_skip(struct sctp_transport
*primary
,
192 struct sctp_transport
*transport
,
193 int count_of_newacks
,
196 if (primary
->cacc
.changeover_active
&&
197 (sctp_cacc_skip_3_1(primary
, transport
, count_of_newacks
) ||
198 sctp_cacc_skip_3_2(primary
, tsn
)))
203 /* Initialize an existing sctp_outq. This does the boring stuff.
204 * You still need to define handlers if you really want to DO
205 * something with this structure...
207 void sctp_outq_init(struct sctp_association
*asoc
, struct sctp_outq
*q
)
210 INIT_LIST_HEAD(&q
->out_chunk_list
);
211 INIT_LIST_HEAD(&q
->control_chunk_list
);
212 INIT_LIST_HEAD(&q
->retransmit
);
213 INIT_LIST_HEAD(&q
->sacked
);
214 INIT_LIST_HEAD(&q
->abandoned
);
217 q
->outstanding_bytes
= 0;
225 /* Free the outqueue structure and any related pending chunks.
227 void sctp_outq_teardown(struct sctp_outq
*q
)
229 struct sctp_transport
*transport
;
230 struct list_head
*lchunk
, *temp
;
231 struct sctp_chunk
*chunk
, *tmp
;
233 /* Throw away unacknowledged chunks. */
234 list_for_each_entry(transport
, &q
->asoc
->peer
.transport_addr_list
,
236 while ((lchunk
= sctp_list_dequeue(&transport
->transmitted
)) != NULL
) {
237 chunk
= list_entry(lchunk
, struct sctp_chunk
,
239 /* Mark as part of a failed message. */
240 sctp_chunk_fail(chunk
, q
->error
);
241 sctp_chunk_free(chunk
);
245 /* Throw away chunks that have been gap ACKed. */
246 list_for_each_safe(lchunk
, temp
, &q
->sacked
) {
247 list_del_init(lchunk
);
248 chunk
= list_entry(lchunk
, struct sctp_chunk
,
250 sctp_chunk_fail(chunk
, q
->error
);
251 sctp_chunk_free(chunk
);
254 /* Throw away any chunks in the retransmit queue. */
255 list_for_each_safe(lchunk
, temp
, &q
->retransmit
) {
256 list_del_init(lchunk
);
257 chunk
= list_entry(lchunk
, struct sctp_chunk
,
259 sctp_chunk_fail(chunk
, q
->error
);
260 sctp_chunk_free(chunk
);
263 /* Throw away any chunks that are in the abandoned queue. */
264 list_for_each_safe(lchunk
, temp
, &q
->abandoned
) {
265 list_del_init(lchunk
);
266 chunk
= list_entry(lchunk
, struct sctp_chunk
,
268 sctp_chunk_fail(chunk
, q
->error
);
269 sctp_chunk_free(chunk
);
272 /* Throw away any leftover data chunks. */
273 while ((chunk
= sctp_outq_dequeue_data(q
)) != NULL
) {
275 /* Mark as send failure. */
276 sctp_chunk_fail(chunk
, q
->error
);
277 sctp_chunk_free(chunk
);
282 /* Throw away any leftover control chunks. */
283 list_for_each_entry_safe(chunk
, tmp
, &q
->control_chunk_list
, list
) {
284 list_del_init(&chunk
->list
);
285 sctp_chunk_free(chunk
);
289 /* Free the outqueue structure and any related pending chunks. */
290 void sctp_outq_free(struct sctp_outq
*q
)
292 /* Throw away leftover chunks. */
293 sctp_outq_teardown(q
);
295 /* If we were kmalloc()'d, free the memory. */
300 /* Put a new chunk in an sctp_outq. */
301 int sctp_outq_tail(struct sctp_outq
*q
, struct sctp_chunk
*chunk
)
303 struct net
*net
= sock_net(q
->asoc
->base
.sk
);
306 SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n",
307 q
, chunk
, chunk
&& chunk
->chunk_hdr
?
308 sctp_cname(SCTP_ST_CHUNK(chunk
->chunk_hdr
->type
))
311 /* If it is data, queue it up, otherwise, send it
314 if (sctp_chunk_is_data(chunk
)) {
315 /* Is it OK to queue data chunks? */
316 /* From 9. Termination of Association
318 * When either endpoint performs a shutdown, the
319 * association on each peer will stop accepting new
320 * data from its user and only deliver data in queue
321 * at the time of sending or receiving the SHUTDOWN
324 switch (q
->asoc
->state
) {
325 case SCTP_STATE_CLOSED
:
326 case SCTP_STATE_SHUTDOWN_PENDING
:
327 case SCTP_STATE_SHUTDOWN_SENT
:
328 case SCTP_STATE_SHUTDOWN_RECEIVED
:
329 case SCTP_STATE_SHUTDOWN_ACK_SENT
:
330 /* Cannot send after transport endpoint shutdown */
335 SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n",
336 q
, chunk
, chunk
&& chunk
->chunk_hdr
?
337 sctp_cname(SCTP_ST_CHUNK(chunk
->chunk_hdr
->type
))
340 sctp_outq_tail_data(q
, chunk
);
341 if (chunk
->chunk_hdr
->flags
& SCTP_DATA_UNORDERED
)
342 SCTP_INC_STATS(net
, SCTP_MIB_OUTUNORDERCHUNKS
);
344 SCTP_INC_STATS(net
, SCTP_MIB_OUTORDERCHUNKS
);
349 list_add_tail(&chunk
->list
, &q
->control_chunk_list
);
350 SCTP_INC_STATS(net
, SCTP_MIB_OUTCTRLCHUNKS
);
357 error
= sctp_outq_flush(q
, 0);
362 /* Insert a chunk into the sorted list based on the TSNs. The retransmit list
363 * and the abandoned list are in ascending order.
365 static void sctp_insert_list(struct list_head
*head
, struct list_head
*new)
367 struct list_head
*pos
;
368 struct sctp_chunk
*nchunk
, *lchunk
;
372 nchunk
= list_entry(new, struct sctp_chunk
, transmitted_list
);
373 ntsn
= ntohl(nchunk
->subh
.data_hdr
->tsn
);
375 list_for_each(pos
, head
) {
376 lchunk
= list_entry(pos
, struct sctp_chunk
, transmitted_list
);
377 ltsn
= ntohl(lchunk
->subh
.data_hdr
->tsn
);
378 if (TSN_lt(ntsn
, ltsn
)) {
379 list_add(new, pos
->prev
);
385 list_add_tail(new, head
);
388 /* Mark all the eligible packets on a transport for retransmission. */
389 void sctp_retransmit_mark(struct sctp_outq
*q
,
390 struct sctp_transport
*transport
,
393 struct list_head
*lchunk
, *ltemp
;
394 struct sctp_chunk
*chunk
;
396 /* Walk through the specified transmitted queue. */
397 list_for_each_safe(lchunk
, ltemp
, &transport
->transmitted
) {
398 chunk
= list_entry(lchunk
, struct sctp_chunk
,
401 /* If the chunk is abandoned, move it to abandoned list. */
402 if (sctp_chunk_abandoned(chunk
)) {
403 list_del_init(lchunk
);
404 sctp_insert_list(&q
->abandoned
, lchunk
);
406 /* If this chunk has not been previousely acked,
407 * stop considering it 'outstanding'. Our peer
408 * will most likely never see it since it will
409 * not be retransmitted
411 if (!chunk
->tsn_gap_acked
) {
412 if (chunk
->transport
)
413 chunk
->transport
->flight_size
-=
414 sctp_data_size(chunk
);
415 q
->outstanding_bytes
-= sctp_data_size(chunk
);
416 q
->asoc
->peer
.rwnd
+= sctp_data_size(chunk
);
421 /* If we are doing retransmission due to a timeout or pmtu
422 * discovery, only the chunks that are not yet acked should
423 * be added to the retransmit queue.
425 if ((reason
== SCTP_RTXR_FAST_RTX
&&
426 (chunk
->fast_retransmit
== SCTP_NEED_FRTX
)) ||
427 (reason
!= SCTP_RTXR_FAST_RTX
&& !chunk
->tsn_gap_acked
)) {
428 /* RFC 2960 6.2.1 Processing a Received SACK
430 * C) Any time a DATA chunk is marked for
431 * retransmission (via either T3-rtx timer expiration
432 * (Section 6.3.3) or via fast retransmit
433 * (Section 7.2.4)), add the data size of those
434 * chunks to the rwnd.
436 q
->asoc
->peer
.rwnd
+= sctp_data_size(chunk
);
437 q
->outstanding_bytes
-= sctp_data_size(chunk
);
438 if (chunk
->transport
)
439 transport
->flight_size
-= sctp_data_size(chunk
);
441 /* sctpimpguide-05 Section 2.8.2
442 * M5) If a T3-rtx timer expires, the
443 * 'TSN.Missing.Report' of all affected TSNs is set
446 chunk
->tsn_missing_report
= 0;
448 /* If a chunk that is being used for RTT measurement
449 * has to be retransmitted, we cannot use this chunk
450 * anymore for RTT measurements. Reset rto_pending so
451 * that a new RTT measurement is started when a new
452 * data chunk is sent.
454 if (chunk
->rtt_in_progress
) {
455 chunk
->rtt_in_progress
= 0;
456 transport
->rto_pending
= 0;
459 /* Move the chunk to the retransmit queue. The chunks
460 * on the retransmit queue are always kept in order.
462 list_del_init(lchunk
);
463 sctp_insert_list(&q
->retransmit
, lchunk
);
467 SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, "
468 "cwnd: %d, ssthresh: %d, flight_size: %d, "
469 "pba: %d\n", __func__
,
471 transport
->cwnd
, transport
->ssthresh
,
472 transport
->flight_size
,
473 transport
->partial_bytes_acked
);
477 /* Mark all the eligible packets on a transport for retransmission and force
480 void sctp_retransmit(struct sctp_outq
*q
, struct sctp_transport
*transport
,
481 sctp_retransmit_reason_t reason
)
483 struct net
*net
= sock_net(q
->asoc
->base
.sk
);
487 case SCTP_RTXR_T3_RTX
:
488 SCTP_INC_STATS(net
, SCTP_MIB_T3_RETRANSMITS
);
489 sctp_transport_lower_cwnd(transport
, SCTP_LOWER_CWND_T3_RTX
);
490 /* Update the retran path if the T3-rtx timer has expired for
491 * the current retran path.
493 if (transport
== transport
->asoc
->peer
.retran_path
)
494 sctp_assoc_update_retran_path(transport
->asoc
);
495 transport
->asoc
->rtx_data_chunks
+=
496 transport
->asoc
->unack_data
;
498 case SCTP_RTXR_FAST_RTX
:
499 SCTP_INC_STATS(net
, SCTP_MIB_FAST_RETRANSMITS
);
500 sctp_transport_lower_cwnd(transport
, SCTP_LOWER_CWND_FAST_RTX
);
503 case SCTP_RTXR_PMTUD
:
504 SCTP_INC_STATS(net
, SCTP_MIB_PMTUD_RETRANSMITS
);
506 case SCTP_RTXR_T1_RTX
:
507 SCTP_INC_STATS(net
, SCTP_MIB_T1_RETRANSMITS
);
508 transport
->asoc
->init_retries
++;
514 sctp_retransmit_mark(q
, transport
, reason
);
516 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
517 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
518 * following the procedures outlined in C1 - C5.
520 if (reason
== SCTP_RTXR_T3_RTX
)
521 sctp_generate_fwdtsn(q
, q
->asoc
->ctsn_ack_point
);
523 /* Flush the queues only on timeout, since fast_rtx is only
524 * triggered during sack processing and the queue
525 * will be flushed at the end.
527 if (reason
!= SCTP_RTXR_FAST_RTX
)
528 error
= sctp_outq_flush(q
, /* rtx_timeout */ 1);
531 q
->asoc
->base
.sk
->sk_err
= -error
;
535 * Transmit DATA chunks on the retransmit queue. Upon return from
536 * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
537 * need to be transmitted by the caller.
538 * We assume that pkt->transport has already been set.
540 * The return value is a normal kernel error return value.
542 static int sctp_outq_flush_rtx(struct sctp_outq
*q
, struct sctp_packet
*pkt
,
543 int rtx_timeout
, int *start_timer
)
545 struct list_head
*lqueue
;
546 struct sctp_transport
*transport
= pkt
->transport
;
548 struct sctp_chunk
*chunk
, *chunk1
;
554 lqueue
= &q
->retransmit
;
555 fast_rtx
= q
->fast_rtx
;
557 /* This loop handles time-out retransmissions, fast retransmissions,
558 * and retransmissions due to opening of whindow.
560 * RFC 2960 6.3.3 Handle T3-rtx Expiration
562 * E3) Determine how many of the earliest (i.e., lowest TSN)
563 * outstanding DATA chunks for the address for which the
564 * T3-rtx has expired will fit into a single packet, subject
565 * to the MTU constraint for the path corresponding to the
566 * destination transport address to which the retransmission
567 * is being sent (this may be different from the address for
568 * which the timer expires [see Section 6.4]). Call this value
569 * K. Bundle and retransmit those K DATA chunks in a single
570 * packet to the destination endpoint.
572 * [Just to be painfully clear, if we are retransmitting
573 * because a timeout just happened, we should send only ONE
574 * packet of retransmitted data.]
576 * For fast retransmissions we also send only ONE packet. However,
577 * if we are just flushing the queue due to open window, we'll
578 * try to send as much as possible.
580 list_for_each_entry_safe(chunk
, chunk1
, lqueue
, transmitted_list
) {
581 /* If the chunk is abandoned, move it to abandoned list. */
582 if (sctp_chunk_abandoned(chunk
)) {
583 list_del_init(&chunk
->transmitted_list
);
584 sctp_insert_list(&q
->abandoned
,
585 &chunk
->transmitted_list
);
589 /* Make sure that Gap Acked TSNs are not retransmitted. A
590 * simple approach is just to move such TSNs out of the
591 * way and into a 'transmitted' queue and skip to the
594 if (chunk
->tsn_gap_acked
) {
595 list_move_tail(&chunk
->transmitted_list
,
596 &transport
->transmitted
);
600 /* If we are doing fast retransmit, ignore non-fast_rtransmit
603 if (fast_rtx
&& !chunk
->fast_retransmit
)
607 /* Attempt to append this chunk to the packet. */
608 status
= sctp_packet_append_chunk(pkt
, chunk
);
611 case SCTP_XMIT_PMTU_FULL
:
612 if (!pkt
->has_data
&& !pkt
->has_cookie_echo
) {
613 /* If this packet did not contain DATA then
614 * retransmission did not happen, so do it
615 * again. We'll ignore the error here since
616 * control chunks are already freed so there
617 * is nothing we can do.
619 sctp_packet_transmit(pkt
);
623 /* Send this packet. */
624 error
= sctp_packet_transmit(pkt
);
626 /* If we are retransmitting, we should only
627 * send a single packet.
628 * Otherwise, try appending this chunk again.
630 if (rtx_timeout
|| fast_rtx
)
635 /* Bundle next chunk in the next round. */
638 case SCTP_XMIT_RWND_FULL
:
639 /* Send this packet. */
640 error
= sctp_packet_transmit(pkt
);
642 /* Stop sending DATA as there is no more room
648 case SCTP_XMIT_NAGLE_DELAY
:
649 /* Send this packet. */
650 error
= sctp_packet_transmit(pkt
);
652 /* Stop sending DATA because of nagle delay. */
657 /* The append was successful, so add this chunk to
658 * the transmitted list.
660 list_move_tail(&chunk
->transmitted_list
,
661 &transport
->transmitted
);
663 /* Mark the chunk as ineligible for fast retransmit
664 * after it is retransmitted.
666 if (chunk
->fast_retransmit
== SCTP_NEED_FRTX
)
667 chunk
->fast_retransmit
= SCTP_DONT_FRTX
;
670 q
->asoc
->stats
.rtxchunks
++;
674 /* Set the timer if there were no errors */
675 if (!error
&& !timer
)
682 /* If we are here due to a retransmit timeout or a fast
683 * retransmit and if there are any chunks left in the retransmit
684 * queue that could not fit in the PMTU sized packet, they need
685 * to be marked as ineligible for a subsequent fast retransmit.
687 if (rtx_timeout
|| fast_rtx
) {
688 list_for_each_entry(chunk1
, lqueue
, transmitted_list
) {
689 if (chunk1
->fast_retransmit
== SCTP_NEED_FRTX
)
690 chunk1
->fast_retransmit
= SCTP_DONT_FRTX
;
694 *start_timer
= timer
;
696 /* Clear fast retransmit hint */
703 /* Cork the outqueue so queued chunks are really queued. */
704 int sctp_outq_uncork(struct sctp_outq
*q
)
709 error
= sctp_outq_flush(q
, 0);
715 * Try to flush an outqueue.
717 * Description: Send everything in q which we legally can, subject to
718 * congestion limitations.
719 * * Note: This function can be called from multiple contexts so appropriate
720 * locking concerns must be made. Today we use the sock lock to protect
723 static int sctp_outq_flush(struct sctp_outq
*q
, int rtx_timeout
)
725 struct sctp_packet
*packet
;
726 struct sctp_packet singleton
;
727 struct sctp_association
*asoc
= q
->asoc
;
728 __u16 sport
= asoc
->base
.bind_addr
.port
;
729 __u16 dport
= asoc
->peer
.port
;
730 __u32 vtag
= asoc
->peer
.i
.init_tag
;
731 struct sctp_transport
*transport
= NULL
;
732 struct sctp_transport
*new_transport
;
733 struct sctp_chunk
*chunk
, *tmp
;
739 /* These transports have chunks to send. */
740 struct list_head transport_list
;
741 struct list_head
*ltransport
;
743 INIT_LIST_HEAD(&transport_list
);
749 * When bundling control chunks with DATA chunks, an
750 * endpoint MUST place control chunks first in the outbound
751 * SCTP packet. The transmitter MUST transmit DATA chunks
752 * within a SCTP packet in increasing order of TSN.
756 list_for_each_entry_safe(chunk
, tmp
, &q
->control_chunk_list
, list
) {
758 * F1) This means that until such time as the ASCONF
759 * containing the add is acknowledged, the sender MUST
760 * NOT use the new IP address as a source for ANY SCTP
761 * packet except on carrying an ASCONF Chunk.
763 if (asoc
->src_out_of_asoc_ok
&&
764 chunk
->chunk_hdr
->type
!= SCTP_CID_ASCONF
)
767 list_del_init(&chunk
->list
);
769 /* Pick the right transport to use. */
770 new_transport
= chunk
->transport
;
772 if (!new_transport
) {
774 * If we have a prior transport pointer, see if
775 * the destination address of the chunk
776 * matches the destination address of the
777 * current transport. If not a match, then
778 * try to look up the transport with a given
779 * destination address. We do this because
780 * after processing ASCONFs, we may have new
781 * transports created.
784 sctp_cmp_addr_exact(&chunk
->dest
,
786 new_transport
= transport
;
788 new_transport
= sctp_assoc_lookup_paddr(asoc
,
791 /* if we still don't have a new transport, then
792 * use the current active path.
795 new_transport
= asoc
->peer
.active_path
;
796 } else if ((new_transport
->state
== SCTP_INACTIVE
) ||
797 (new_transport
->state
== SCTP_UNCONFIRMED
) ||
798 (new_transport
->state
== SCTP_PF
)) {
799 /* If the chunk is Heartbeat or Heartbeat Ack,
800 * send it to chunk->transport, even if it's
803 * 3.3.6 Heartbeat Acknowledgement:
805 * A HEARTBEAT ACK is always sent to the source IP
806 * address of the IP datagram containing the
807 * HEARTBEAT chunk to which this ack is responding.
810 * ASCONF_ACKs also must be sent to the source.
812 if (chunk
->chunk_hdr
->type
!= SCTP_CID_HEARTBEAT
&&
813 chunk
->chunk_hdr
->type
!= SCTP_CID_HEARTBEAT_ACK
&&
814 chunk
->chunk_hdr
->type
!= SCTP_CID_ASCONF_ACK
)
815 new_transport
= asoc
->peer
.active_path
;
818 /* Are we switching transports?
819 * Take care of transport locks.
821 if (new_transport
!= transport
) {
822 transport
= new_transport
;
823 if (list_empty(&transport
->send_ready
)) {
824 list_add_tail(&transport
->send_ready
,
827 packet
= &transport
->packet
;
828 sctp_packet_config(packet
, vtag
,
829 asoc
->peer
.ecn_capable
);
832 switch (chunk
->chunk_hdr
->type
) {
836 * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
837 * COMPLETE with any other chunks. [Send them immediately.]
840 case SCTP_CID_INIT_ACK
:
841 case SCTP_CID_SHUTDOWN_COMPLETE
:
842 sctp_packet_init(&singleton
, transport
, sport
, dport
);
843 sctp_packet_config(&singleton
, vtag
, 0);
844 sctp_packet_append_chunk(&singleton
, chunk
);
845 error
= sctp_packet_transmit(&singleton
);
851 if (sctp_test_T_bit(chunk
)) {
852 packet
->vtag
= asoc
->c
.my_vtag
;
854 /* The following chunks are "response" chunks, i.e.
855 * they are generated in response to something we
856 * received. If we are sending these, then we can
857 * send only 1 packet containing these chunks.
859 case SCTP_CID_HEARTBEAT_ACK
:
860 case SCTP_CID_SHUTDOWN_ACK
:
861 case SCTP_CID_COOKIE_ACK
:
862 case SCTP_CID_COOKIE_ECHO
:
864 case SCTP_CID_ECN_CWR
:
865 case SCTP_CID_ASCONF_ACK
:
870 case SCTP_CID_HEARTBEAT
:
871 case SCTP_CID_SHUTDOWN
:
872 case SCTP_CID_ECN_ECNE
:
873 case SCTP_CID_ASCONF
:
874 case SCTP_CID_FWD_TSN
:
875 status
= sctp_packet_transmit_chunk(packet
, chunk
,
877 if (status
!= SCTP_XMIT_OK
) {
878 /* put the chunk back */
879 list_add(&chunk
->list
, &q
->control_chunk_list
);
881 asoc
->stats
.octrlchunks
++;
882 /* PR-SCTP C5) If a FORWARD TSN is sent, the
883 * sender MUST assure that at least one T3-rtx
886 if (chunk
->chunk_hdr
->type
== SCTP_CID_FWD_TSN
)
887 sctp_transport_reset_timers(transport
);
892 /* We built a chunk with an illegal type! */
897 if (q
->asoc
->src_out_of_asoc_ok
)
900 /* Is it OK to send data chunks? */
901 switch (asoc
->state
) {
902 case SCTP_STATE_COOKIE_ECHOED
:
903 /* Only allow bundling when this packet has a COOKIE-ECHO
906 if (!packet
|| !packet
->has_cookie_echo
)
910 case SCTP_STATE_ESTABLISHED
:
911 case SCTP_STATE_SHUTDOWN_PENDING
:
912 case SCTP_STATE_SHUTDOWN_RECEIVED
:
914 * RFC 2960 6.1 Transmission of DATA Chunks
916 * C) When the time comes for the sender to transmit,
917 * before sending new DATA chunks, the sender MUST
918 * first transmit any outstanding DATA chunks which
919 * are marked for retransmission (limited by the
922 if (!list_empty(&q
->retransmit
)) {
923 if (asoc
->peer
.retran_path
->state
== SCTP_UNCONFIRMED
)
925 if (transport
== asoc
->peer
.retran_path
)
928 /* Switch transports & prepare the packet. */
930 transport
= asoc
->peer
.retran_path
;
932 if (list_empty(&transport
->send_ready
)) {
933 list_add_tail(&transport
->send_ready
,
937 packet
= &transport
->packet
;
938 sctp_packet_config(packet
, vtag
,
939 asoc
->peer
.ecn_capable
);
941 error
= sctp_outq_flush_rtx(q
, packet
,
942 rtx_timeout
, &start_timer
);
945 sctp_transport_reset_timers(transport
);
947 /* This can happen on COOKIE-ECHO resend. Only
948 * one chunk can get bundled with a COOKIE-ECHO.
950 if (packet
->has_cookie_echo
)
953 /* Don't send new data if there is still data
954 * waiting to retransmit.
956 if (!list_empty(&q
->retransmit
))
960 /* Apply Max.Burst limitation to the current transport in
961 * case it will be used for new data. We are going to
962 * rest it before we return, but we want to apply the limit
963 * to the currently queued data.
966 sctp_transport_burst_limited(transport
);
968 /* Finally, transmit new packets. */
969 while ((chunk
= sctp_outq_dequeue_data(q
)) != NULL
) {
970 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
973 if (chunk
->sinfo
.sinfo_stream
>=
974 asoc
->c
.sinit_num_ostreams
) {
976 /* Mark as failed send. */
977 sctp_chunk_fail(chunk
, SCTP_ERROR_INV_STRM
);
978 sctp_chunk_free(chunk
);
982 /* Has this chunk expired? */
983 if (sctp_chunk_abandoned(chunk
)) {
984 sctp_chunk_fail(chunk
, 0);
985 sctp_chunk_free(chunk
);
989 /* If there is a specified transport, use it.
990 * Otherwise, we want to use the active path.
992 new_transport
= chunk
->transport
;
993 if (!new_transport
||
994 ((new_transport
->state
== SCTP_INACTIVE
) ||
995 (new_transport
->state
== SCTP_UNCONFIRMED
) ||
996 (new_transport
->state
== SCTP_PF
)))
997 new_transport
= asoc
->peer
.active_path
;
998 if (new_transport
->state
== SCTP_UNCONFIRMED
)
1001 /* Change packets if necessary. */
1002 if (new_transport
!= transport
) {
1003 transport
= new_transport
;
1005 /* Schedule to have this transport's
1008 if (list_empty(&transport
->send_ready
)) {
1009 list_add_tail(&transport
->send_ready
,
1013 packet
= &transport
->packet
;
1014 sctp_packet_config(packet
, vtag
,
1015 asoc
->peer
.ecn_capable
);
1016 /* We've switched transports, so apply the
1017 * Burst limit to the new transport.
1019 sctp_transport_burst_limited(transport
);
1022 SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ",
1024 chunk
&& chunk
->chunk_hdr
?
1025 sctp_cname(SCTP_ST_CHUNK(
1026 chunk
->chunk_hdr
->type
))
1029 SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head "
1030 "%p skb->users %d.\n",
1031 ntohl(chunk
->subh
.data_hdr
->tsn
),
1032 chunk
->skb
?chunk
->skb
->head
: NULL
,
1034 atomic_read(&chunk
->skb
->users
) : -1);
1036 /* Add the chunk to the packet. */
1037 status
= sctp_packet_transmit_chunk(packet
, chunk
, 0);
1040 case SCTP_XMIT_PMTU_FULL
:
1041 case SCTP_XMIT_RWND_FULL
:
1042 case SCTP_XMIT_NAGLE_DELAY
:
1043 /* We could not append this chunk, so put
1044 * the chunk back on the output queue.
1046 SCTP_DEBUG_PRINTK("sctp_outq_flush: could "
1047 "not transmit TSN: 0x%x, status: %d\n",
1048 ntohl(chunk
->subh
.data_hdr
->tsn
),
1050 sctp_outq_head_data(q
, chunk
);
1051 goto sctp_flush_out
;
1055 /* The sender is in the SHUTDOWN-PENDING state,
1056 * The sender MAY set the I-bit in the DATA
1059 if (asoc
->state
== SCTP_STATE_SHUTDOWN_PENDING
)
1060 chunk
->chunk_hdr
->flags
|= SCTP_DATA_SACK_IMM
;
1061 if (chunk
->chunk_hdr
->flags
& SCTP_DATA_UNORDERED
)
1062 asoc
->stats
.ouodchunks
++;
1064 asoc
->stats
.oodchunks
++;
1072 /* BUG: We assume that the sctp_packet_transmit()
1073 * call below will succeed all the time and add the
1074 * chunk to the transmitted list and restart the
1076 * It is possible that the call can fail under OOM
1079 * Is this really a problem? Won't this behave
1082 list_add_tail(&chunk
->transmitted_list
,
1083 &transport
->transmitted
);
1085 sctp_transport_reset_timers(transport
);
1089 /* Only let one DATA chunk get bundled with a
1090 * COOKIE-ECHO chunk.
1092 if (packet
->has_cookie_echo
)
1093 goto sctp_flush_out
;
1104 /* Before returning, examine all the transports touched in
1105 * this call. Right now, we bluntly force clear all the
1106 * transports. Things might change after we implement Nagle.
1107 * But such an examination is still required.
1111 while ((ltransport
= sctp_list_dequeue(&transport_list
)) != NULL
) {
1112 struct sctp_transport
*t
= list_entry(ltransport
,
1113 struct sctp_transport
,
1115 packet
= &t
->packet
;
1116 if (!sctp_packet_empty(packet
))
1117 error
= sctp_packet_transmit(packet
);
1119 /* Clear the burst limited state, if any */
1120 sctp_transport_burst_reset(t
);
1126 /* Update unack_data based on the incoming SACK chunk */
1127 static void sctp_sack_update_unack_data(struct sctp_association
*assoc
,
1128 struct sctp_sackhdr
*sack
)
1130 sctp_sack_variable_t
*frags
;
1134 unack_data
= assoc
->next_tsn
- assoc
->ctsn_ack_point
- 1;
1136 frags
= sack
->variable
;
1137 for (i
= 0; i
< ntohs(sack
->num_gap_ack_blocks
); i
++) {
1138 unack_data
-= ((ntohs(frags
[i
].gab
.end
) -
1139 ntohs(frags
[i
].gab
.start
) + 1));
1142 assoc
->unack_data
= unack_data
;
1145 /* This is where we REALLY process a SACK.
1147 * Process the SACK against the outqueue. Mostly, this just frees
1148 * things off the transmitted queue.
1150 int sctp_outq_sack(struct sctp_outq
*q
, struct sctp_chunk
*chunk
)
1152 struct sctp_association
*asoc
= q
->asoc
;
1153 struct sctp_sackhdr
*sack
= chunk
->subh
.sack_hdr
;
1154 struct sctp_transport
*transport
;
1155 struct sctp_chunk
*tchunk
= NULL
;
1156 struct list_head
*lchunk
, *transport_list
, *temp
;
1157 sctp_sack_variable_t
*frags
= sack
->variable
;
1158 __u32 sack_ctsn
, ctsn
, tsn
;
1159 __u32 highest_tsn
, highest_new_tsn
;
1161 unsigned int outstanding
;
1162 struct sctp_transport
*primary
= asoc
->peer
.primary_path
;
1163 int count_of_newacks
= 0;
1167 /* Grab the association's destination address list. */
1168 transport_list
= &asoc
->peer
.transport_addr_list
;
1170 sack_ctsn
= ntohl(sack
->cum_tsn_ack
);
1171 gap_ack_blocks
= ntohs(sack
->num_gap_ack_blocks
);
1172 asoc
->stats
.gapcnt
+= gap_ack_blocks
;
1174 * SFR-CACC algorithm:
1175 * On receipt of a SACK the sender SHOULD execute the
1176 * following statements.
1178 * 1) If the cumulative ack in the SACK passes next tsn_at_change
1179 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
1180 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
1182 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
1183 * is set the receiver of the SACK MUST take the following actions:
1185 * A) Initialize the cacc_saw_newack to 0 for all destination
1188 * Only bother if changeover_active is set. Otherwise, this is
1189 * totally suboptimal to do on every SACK.
1191 if (primary
->cacc
.changeover_active
) {
1192 u8 clear_cycling
= 0;
1194 if (TSN_lte(primary
->cacc
.next_tsn_at_change
, sack_ctsn
)) {
1195 primary
->cacc
.changeover_active
= 0;
1199 if (clear_cycling
|| gap_ack_blocks
) {
1200 list_for_each_entry(transport
, transport_list
,
1203 transport
->cacc
.cycling_changeover
= 0;
1205 transport
->cacc
.cacc_saw_newack
= 0;
1210 /* Get the highest TSN in the sack. */
1211 highest_tsn
= sack_ctsn
;
1213 highest_tsn
+= ntohs(frags
[gap_ack_blocks
- 1].gab
.end
);
1215 if (TSN_lt(asoc
->highest_sacked
, highest_tsn
))
1216 asoc
->highest_sacked
= highest_tsn
;
1218 highest_new_tsn
= sack_ctsn
;
1220 /* Run through the retransmit queue. Credit bytes received
1221 * and free those chunks that we can.
1223 sctp_check_transmitted(q
, &q
->retransmit
, NULL
, NULL
, sack
, &highest_new_tsn
);
1225 /* Run through the transmitted queue.
1226 * Credit bytes received and free those chunks which we can.
1228 * This is a MASSIVE candidate for optimization.
1230 list_for_each_entry(transport
, transport_list
, transports
) {
1231 sctp_check_transmitted(q
, &transport
->transmitted
,
1232 transport
, &chunk
->source
, sack
,
1235 * SFR-CACC algorithm:
1236 * C) Let count_of_newacks be the number of
1237 * destinations for which cacc_saw_newack is set.
1239 if (transport
->cacc
.cacc_saw_newack
)
1240 count_of_newacks
++;
1243 /* Move the Cumulative TSN Ack Point if appropriate. */
1244 if (TSN_lt(asoc
->ctsn_ack_point
, sack_ctsn
)) {
1245 asoc
->ctsn_ack_point
= sack_ctsn
;
1249 if (gap_ack_blocks
) {
1251 if (asoc
->fast_recovery
&& accum_moved
)
1252 highest_new_tsn
= highest_tsn
;
1254 list_for_each_entry(transport
, transport_list
, transports
)
1255 sctp_mark_missing(q
, &transport
->transmitted
, transport
,
1256 highest_new_tsn
, count_of_newacks
);
1259 /* Update unack_data field in the assoc. */
1260 sctp_sack_update_unack_data(asoc
, sack
);
1262 ctsn
= asoc
->ctsn_ack_point
;
1264 /* Throw away stuff rotting on the sack queue. */
1265 list_for_each_safe(lchunk
, temp
, &q
->sacked
) {
1266 tchunk
= list_entry(lchunk
, struct sctp_chunk
,
1268 tsn
= ntohl(tchunk
->subh
.data_hdr
->tsn
);
1269 if (TSN_lte(tsn
, ctsn
)) {
1270 list_del_init(&tchunk
->transmitted_list
);
1271 sctp_chunk_free(tchunk
);
1275 /* ii) Set rwnd equal to the newly received a_rwnd minus the
1276 * number of bytes still outstanding after processing the
1277 * Cumulative TSN Ack and the Gap Ack Blocks.
1280 sack_a_rwnd
= ntohl(sack
->a_rwnd
);
1281 outstanding
= q
->outstanding_bytes
;
1283 if (outstanding
< sack_a_rwnd
)
1284 sack_a_rwnd
-= outstanding
;
1288 asoc
->peer
.rwnd
= sack_a_rwnd
;
1290 sctp_generate_fwdtsn(q
, sack_ctsn
);
1292 SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n",
1293 __func__
, sack_ctsn
);
1294 SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, "
1295 "%p is 0x%x. Adv peer ack point: 0x%x\n",
1296 __func__
, asoc
, ctsn
, asoc
->adv_peer_ack_point
);
1298 /* See if all chunks are acked.
1299 * Make sure the empty queue handler will get run later.
1301 q
->empty
= (list_empty(&q
->out_chunk_list
) &&
1302 list_empty(&q
->retransmit
));
1306 list_for_each_entry(transport
, transport_list
, transports
) {
1307 q
->empty
= q
->empty
&& list_empty(&transport
->transmitted
);
1312 SCTP_DEBUG_PRINTK("sack queue is empty.\n");
1317 /* Is the outqueue empty? */
1318 int sctp_outq_is_empty(const struct sctp_outq
*q
)
1323 /********************************************************************
1324 * 2nd Level Abstractions
1325 ********************************************************************/
1327 /* Go through a transport's transmitted list or the association's retransmit
1328 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
1329 * The retransmit list will not have an associated transport.
1331 * I added coherent debug information output. --xguo
1333 * Instead of printing 'sacked' or 'kept' for each TSN on the
1334 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
1335 * KEPT TSN6-TSN7, etc.
1337 static void sctp_check_transmitted(struct sctp_outq
*q
,
1338 struct list_head
*transmitted_queue
,
1339 struct sctp_transport
*transport
,
1340 union sctp_addr
*saddr
,
1341 struct sctp_sackhdr
*sack
,
1342 __u32
*highest_new_tsn_in_sack
)
1344 struct list_head
*lchunk
;
1345 struct sctp_chunk
*tchunk
;
1346 struct list_head tlist
;
1350 __u8 restart_timer
= 0;
1351 int bytes_acked
= 0;
1352 int migrate_bytes
= 0;
1354 /* These state variables are for coherent debug output. --xguo */
1357 __u32 dbg_ack_tsn
= 0; /* An ACKed TSN range starts here... */
1358 __u32 dbg_last_ack_tsn
= 0; /* ...and finishes here. */
1359 __u32 dbg_kept_tsn
= 0; /* An un-ACKed range starts here... */
1360 __u32 dbg_last_kept_tsn
= 0; /* ...and finishes here. */
1362 /* 0 : The last TSN was ACKed.
1363 * 1 : The last TSN was NOT ACKed (i.e. KEPT).
1364 * -1: We need to initialize.
1366 int dbg_prt_state
= -1;
1367 #endif /* SCTP_DEBUG */
1369 sack_ctsn
= ntohl(sack
->cum_tsn_ack
);
1371 INIT_LIST_HEAD(&tlist
);
1373 /* The while loop will skip empty transmitted queues. */
1374 while (NULL
!= (lchunk
= sctp_list_dequeue(transmitted_queue
))) {
1375 tchunk
= list_entry(lchunk
, struct sctp_chunk
,
1378 if (sctp_chunk_abandoned(tchunk
)) {
1379 /* Move the chunk to abandoned list. */
1380 sctp_insert_list(&q
->abandoned
, lchunk
);
1382 /* If this chunk has not been acked, stop
1383 * considering it as 'outstanding'.
1385 if (!tchunk
->tsn_gap_acked
) {
1386 if (tchunk
->transport
)
1387 tchunk
->transport
->flight_size
-=
1388 sctp_data_size(tchunk
);
1389 q
->outstanding_bytes
-= sctp_data_size(tchunk
);
1394 tsn
= ntohl(tchunk
->subh
.data_hdr
->tsn
);
1395 if (sctp_acked(sack
, tsn
)) {
1396 /* If this queue is the retransmit queue, the
1397 * retransmit timer has already reclaimed
1398 * the outstanding bytes for this chunk, so only
1399 * count bytes associated with a transport.
1402 /* If this chunk is being used for RTT
1403 * measurement, calculate the RTT and update
1404 * the RTO using this value.
1406 * 6.3.1 C5) Karn's algorithm: RTT measurements
1407 * MUST NOT be made using packets that were
1408 * retransmitted (and thus for which it is
1409 * ambiguous whether the reply was for the
1410 * first instance of the packet or a later
1413 if (!tchunk
->tsn_gap_acked
&&
1414 tchunk
->rtt_in_progress
) {
1415 tchunk
->rtt_in_progress
= 0;
1416 rtt
= jiffies
- tchunk
->sent_at
;
1417 sctp_transport_update_rto(transport
,
1422 /* If the chunk hasn't been marked as ACKED,
1423 * mark it and account bytes_acked if the
1424 * chunk had a valid transport (it will not
1425 * have a transport if ASCONF had deleted it
1426 * while DATA was outstanding).
1428 if (!tchunk
->tsn_gap_acked
) {
1429 tchunk
->tsn_gap_acked
= 1;
1430 *highest_new_tsn_in_sack
= tsn
;
1431 bytes_acked
+= sctp_data_size(tchunk
);
1432 if (!tchunk
->transport
)
1433 migrate_bytes
+= sctp_data_size(tchunk
);
1436 if (TSN_lte(tsn
, sack_ctsn
)) {
1437 /* RFC 2960 6.3.2 Retransmission Timer Rules
1439 * R3) Whenever a SACK is received
1440 * that acknowledges the DATA chunk
1441 * with the earliest outstanding TSN
1442 * for that address, restart T3-rtx
1443 * timer for that address with its
1448 if (!tchunk
->tsn_gap_acked
) {
1450 * SFR-CACC algorithm:
1451 * 2) If the SACK contains gap acks
1452 * and the flag CHANGEOVER_ACTIVE is
1453 * set the receiver of the SACK MUST
1454 * take the following action:
1456 * B) For each TSN t being acked that
1457 * has not been acked in any SACK so
1458 * far, set cacc_saw_newack to 1 for
1459 * the destination that the TSN was
1463 sack
->num_gap_ack_blocks
&&
1464 q
->asoc
->peer
.primary_path
->cacc
.
1466 transport
->cacc
.cacc_saw_newack
1470 list_add_tail(&tchunk
->transmitted_list
,
1473 /* RFC2960 7.2.4, sctpimpguide-05 2.8.2
1474 * M2) Each time a SACK arrives reporting
1475 * 'Stray DATA chunk(s)' record the highest TSN
1476 * reported as newly acknowledged, call this
1477 * value 'HighestTSNinSack'. A newly
1478 * acknowledged DATA chunk is one not
1479 * previously acknowledged in a SACK.
1481 * When the SCTP sender of data receives a SACK
1482 * chunk that acknowledges, for the first time,
1483 * the receipt of a DATA chunk, all the still
1484 * unacknowledged DATA chunks whose TSN is
1485 * older than that newly acknowledged DATA
1486 * chunk, are qualified as 'Stray DATA chunks'.
1488 list_add_tail(lchunk
, &tlist
);
1492 switch (dbg_prt_state
) {
1493 case 0: /* last TSN was ACKed */
1494 if (dbg_last_ack_tsn
+ 1 == tsn
) {
1495 /* This TSN belongs to the
1496 * current ACK range.
1501 if (dbg_last_ack_tsn
!= dbg_ack_tsn
) {
1502 /* Display the end of the
1505 SCTP_DEBUG_PRINTK_CONT("-%08x",
1509 /* Start a new range. */
1510 SCTP_DEBUG_PRINTK_CONT(",%08x", tsn
);
1514 case 1: /* The last TSN was NOT ACKed. */
1515 if (dbg_last_kept_tsn
!= dbg_kept_tsn
) {
1516 /* Display the end of current range. */
1517 SCTP_DEBUG_PRINTK_CONT("-%08x",
1521 SCTP_DEBUG_PRINTK_CONT("\n");
1523 /* FALL THROUGH... */
1525 /* This is the first-ever TSN we examined. */
1526 /* Start a new range of ACK-ed TSNs. */
1527 SCTP_DEBUG_PRINTK("ACKed: %08x", tsn
);
1532 dbg_last_ack_tsn
= tsn
;
1533 #endif /* SCTP_DEBUG */
1536 if (tchunk
->tsn_gap_acked
) {
1537 SCTP_DEBUG_PRINTK("%s: Receiver reneged on "
1541 tchunk
->tsn_gap_acked
= 0;
1543 if (tchunk
->transport
)
1544 bytes_acked
-= sctp_data_size(tchunk
);
1546 /* RFC 2960 6.3.2 Retransmission Timer Rules
1548 * R4) Whenever a SACK is received missing a
1549 * TSN that was previously acknowledged via a
1550 * Gap Ack Block, start T3-rtx for the
1551 * destination address to which the DATA
1552 * chunk was originally
1553 * transmitted if it is not already running.
1558 list_add_tail(lchunk
, &tlist
);
1561 /* See the above comments on ACK-ed TSNs. */
1562 switch (dbg_prt_state
) {
1564 if (dbg_last_kept_tsn
+ 1 == tsn
)
1567 if (dbg_last_kept_tsn
!= dbg_kept_tsn
)
1568 SCTP_DEBUG_PRINTK_CONT("-%08x",
1571 SCTP_DEBUG_PRINTK_CONT(",%08x", tsn
);
1576 if (dbg_last_ack_tsn
!= dbg_ack_tsn
)
1577 SCTP_DEBUG_PRINTK_CONT("-%08x",
1579 SCTP_DEBUG_PRINTK_CONT("\n");
1581 /* FALL THROUGH... */
1583 SCTP_DEBUG_PRINTK("KEPT: %08x",tsn
);
1588 dbg_last_kept_tsn
= tsn
;
1589 #endif /* SCTP_DEBUG */
1594 /* Finish off the last range, displaying its ending TSN. */
1595 switch (dbg_prt_state
) {
1597 if (dbg_last_ack_tsn
!= dbg_ack_tsn
) {
1598 SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_ack_tsn
);
1600 SCTP_DEBUG_PRINTK_CONT("\n");
1605 if (dbg_last_kept_tsn
!= dbg_kept_tsn
) {
1606 SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_kept_tsn
);
1608 SCTP_DEBUG_PRINTK_CONT("\n");
1611 #endif /* SCTP_DEBUG */
1614 struct sctp_association
*asoc
= transport
->asoc
;
1616 /* We may have counted DATA that was migrated
1617 * to this transport due to DEL-IP operation.
1618 * Subtract those bytes, since the were never
1619 * send on this transport and shouldn't be
1620 * credited to this transport.
1622 bytes_acked
-= migrate_bytes
;
1624 /* 8.2. When an outstanding TSN is acknowledged,
1625 * the endpoint shall clear the error counter of
1626 * the destination transport address to which the
1627 * DATA chunk was last sent.
1628 * The association's overall error counter is
1631 transport
->error_count
= 0;
1632 transport
->asoc
->overall_error_count
= 0;
1635 * While in SHUTDOWN PENDING, we may have started
1636 * the T5 shutdown guard timer after reaching the
1637 * retransmission limit. Stop that timer as soon
1638 * as the receiver acknowledged any data.
1640 if (asoc
->state
== SCTP_STATE_SHUTDOWN_PENDING
&&
1641 del_timer(&asoc
->timers
1642 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
]))
1643 sctp_association_put(asoc
);
1645 /* Mark the destination transport address as
1646 * active if it is not so marked.
1648 if ((transport
->state
== SCTP_INACTIVE
||
1649 transport
->state
== SCTP_UNCONFIRMED
) &&
1650 sctp_cmp_addr_exact(&transport
->ipaddr
, saddr
)) {
1651 sctp_assoc_control_transport(
1655 SCTP_RECEIVED_SACK
);
1658 sctp_transport_raise_cwnd(transport
, sack_ctsn
,
1661 transport
->flight_size
-= bytes_acked
;
1662 if (transport
->flight_size
== 0)
1663 transport
->partial_bytes_acked
= 0;
1664 q
->outstanding_bytes
-= bytes_acked
+ migrate_bytes
;
1666 /* RFC 2960 6.1, sctpimpguide-06 2.15.2
1667 * When a sender is doing zero window probing, it
1668 * should not timeout the association if it continues
1669 * to receive new packets from the receiver. The
1670 * reason is that the receiver MAY keep its window
1671 * closed for an indefinite time.
1672 * A sender is doing zero window probing when the
1673 * receiver's advertised window is zero, and there is
1674 * only one data chunk in flight to the receiver.
1676 * Allow the association to timeout while in SHUTDOWN
1677 * PENDING or SHUTDOWN RECEIVED in case the receiver
1678 * stays in zero window mode forever.
1680 if (!q
->asoc
->peer
.rwnd
&&
1681 !list_empty(&tlist
) &&
1682 (sack_ctsn
+2 == q
->asoc
->next_tsn
) &&
1683 q
->asoc
->state
< SCTP_STATE_SHUTDOWN_PENDING
) {
1684 SCTP_DEBUG_PRINTK("%s: SACK received for zero "
1685 "window probe: %u\n",
1686 __func__
, sack_ctsn
);
1687 q
->asoc
->overall_error_count
= 0;
1688 transport
->error_count
= 0;
1692 /* RFC 2960 6.3.2 Retransmission Timer Rules
1694 * R2) Whenever all outstanding data sent to an address have
1695 * been acknowledged, turn off the T3-rtx timer of that
1698 if (!transport
->flight_size
) {
1699 if (timer_pending(&transport
->T3_rtx_timer
) &&
1700 del_timer(&transport
->T3_rtx_timer
)) {
1701 sctp_transport_put(transport
);
1703 } else if (restart_timer
) {
1704 if (!mod_timer(&transport
->T3_rtx_timer
,
1705 jiffies
+ transport
->rto
))
1706 sctp_transport_hold(transport
);
1710 list_splice(&tlist
, transmitted_queue
);
1713 /* Mark chunks as missing and consequently may get retransmitted. */
1714 static void sctp_mark_missing(struct sctp_outq
*q
,
1715 struct list_head
*transmitted_queue
,
1716 struct sctp_transport
*transport
,
1717 __u32 highest_new_tsn_in_sack
,
1718 int count_of_newacks
)
1720 struct sctp_chunk
*chunk
;
1722 char do_fast_retransmit
= 0;
1723 struct sctp_association
*asoc
= q
->asoc
;
1724 struct sctp_transport
*primary
= asoc
->peer
.primary_path
;
1726 list_for_each_entry(chunk
, transmitted_queue
, transmitted_list
) {
1728 tsn
= ntohl(chunk
->subh
.data_hdr
->tsn
);
1730 /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
1731 * 'Unacknowledged TSN's', if the TSN number of an
1732 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
1733 * value, increment the 'TSN.Missing.Report' count on that
1734 * chunk if it has NOT been fast retransmitted or marked for
1735 * fast retransmit already.
1737 if (chunk
->fast_retransmit
== SCTP_CAN_FRTX
&&
1738 !chunk
->tsn_gap_acked
&&
1739 TSN_lt(tsn
, highest_new_tsn_in_sack
)) {
1741 /* SFR-CACC may require us to skip marking
1742 * this chunk as missing.
1744 if (!transport
|| !sctp_cacc_skip(primary
,
1746 count_of_newacks
, tsn
)) {
1747 chunk
->tsn_missing_report
++;
1750 "%s: TSN 0x%x missing counter: %d\n",
1752 chunk
->tsn_missing_report
);
1756 * M4) If any DATA chunk is found to have a
1757 * 'TSN.Missing.Report'
1758 * value larger than or equal to 3, mark that chunk for
1759 * retransmission and start the fast retransmit procedure.
1762 if (chunk
->tsn_missing_report
>= 3) {
1763 chunk
->fast_retransmit
= SCTP_NEED_FRTX
;
1764 do_fast_retransmit
= 1;
1769 if (do_fast_retransmit
)
1770 sctp_retransmit(q
, transport
, SCTP_RTXR_FAST_RTX
);
1772 SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, "
1773 "ssthresh: %d, flight_size: %d, pba: %d\n",
1774 __func__
, transport
, transport
->cwnd
,
1775 transport
->ssthresh
, transport
->flight_size
,
1776 transport
->partial_bytes_acked
);
1780 /* Is the given TSN acked by this packet? */
1781 static int sctp_acked(struct sctp_sackhdr
*sack
, __u32 tsn
)
1784 sctp_sack_variable_t
*frags
;
1786 __u32 ctsn
= ntohl(sack
->cum_tsn_ack
);
1788 if (TSN_lte(tsn
, ctsn
))
1791 /* 3.3.4 Selective Acknowledgement (SACK) (3):
1794 * These fields contain the Gap Ack Blocks. They are repeated
1795 * for each Gap Ack Block up to the number of Gap Ack Blocks
1796 * defined in the Number of Gap Ack Blocks field. All DATA
1797 * chunks with TSNs greater than or equal to (Cumulative TSN
1798 * Ack + Gap Ack Block Start) and less than or equal to
1799 * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
1800 * Block are assumed to have been received correctly.
1803 frags
= sack
->variable
;
1805 for (i
= 0; i
< ntohs(sack
->num_gap_ack_blocks
); ++i
) {
1806 if (TSN_lte(ntohs(frags
[i
].gab
.start
), gap
) &&
1807 TSN_lte(gap
, ntohs(frags
[i
].gab
.end
)))
1816 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip
*skiplist
,
1817 int nskips
, __be16 stream
)
1821 for (i
= 0; i
< nskips
; i
++) {
1822 if (skiplist
[i
].stream
== stream
)
1828 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */
1829 static void sctp_generate_fwdtsn(struct sctp_outq
*q
, __u32 ctsn
)
1831 struct sctp_association
*asoc
= q
->asoc
;
1832 struct sctp_chunk
*ftsn_chunk
= NULL
;
1833 struct sctp_fwdtsn_skip ftsn_skip_arr
[10];
1837 struct sctp_chunk
*chunk
;
1838 struct list_head
*lchunk
, *temp
;
1840 if (!asoc
->peer
.prsctp_capable
)
1843 /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
1846 * If (Advanced.Peer.Ack.Point < SackCumAck), then update
1847 * Advanced.Peer.Ack.Point to be equal to SackCumAck.
1849 if (TSN_lt(asoc
->adv_peer_ack_point
, ctsn
))
1850 asoc
->adv_peer_ack_point
= ctsn
;
1852 /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
1853 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
1854 * the chunk next in the out-queue space is marked as "abandoned" as
1855 * shown in the following example:
1857 * Assuming that a SACK arrived with the Cumulative TSN ACK 102
1858 * and the Advanced.Peer.Ack.Point is updated to this value:
1860 * out-queue at the end of ==> out-queue after Adv.Ack.Point
1861 * normal SACK processing local advancement
1863 * Adv.Ack.Pt-> 102 acked 102 acked
1864 * 103 abandoned 103 abandoned
1865 * 104 abandoned Adv.Ack.P-> 104 abandoned
1867 * 106 acked 106 acked
1870 * In this example, the data sender successfully advanced the
1871 * "Advanced.Peer.Ack.Point" from 102 to 104 locally.
1873 list_for_each_safe(lchunk
, temp
, &q
->abandoned
) {
1874 chunk
= list_entry(lchunk
, struct sctp_chunk
,
1876 tsn
= ntohl(chunk
->subh
.data_hdr
->tsn
);
1878 /* Remove any chunks in the abandoned queue that are acked by
1881 if (TSN_lte(tsn
, ctsn
)) {
1882 list_del_init(lchunk
);
1883 sctp_chunk_free(chunk
);
1885 if (TSN_lte(tsn
, asoc
->adv_peer_ack_point
+1)) {
1886 asoc
->adv_peer_ack_point
= tsn
;
1887 if (chunk
->chunk_hdr
->flags
&
1888 SCTP_DATA_UNORDERED
)
1890 skip_pos
= sctp_get_skip_pos(&ftsn_skip_arr
[0],
1892 chunk
->subh
.data_hdr
->stream
);
1893 ftsn_skip_arr
[skip_pos
].stream
=
1894 chunk
->subh
.data_hdr
->stream
;
1895 ftsn_skip_arr
[skip_pos
].ssn
=
1896 chunk
->subh
.data_hdr
->ssn
;
1897 if (skip_pos
== nskips
)
1906 /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
1907 * is greater than the Cumulative TSN ACK carried in the received
1908 * SACK, the data sender MUST send the data receiver a FORWARD TSN
1909 * chunk containing the latest value of the
1910 * "Advanced.Peer.Ack.Point".
1912 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
1913 * list each stream and sequence number in the forwarded TSN. This
1914 * information will enable the receiver to easily find any
1915 * stranded TSN's waiting on stream reorder queues. Each stream
1916 * SHOULD only be reported once; this means that if multiple
1917 * abandoned messages occur in the same stream then only the
1918 * highest abandoned stream sequence number is reported. If the
1919 * total size of the FORWARD TSN does NOT fit in a single MTU then
1920 * the sender of the FORWARD TSN SHOULD lower the
1921 * Advanced.Peer.Ack.Point to the last TSN that will fit in a
1924 if (asoc
->adv_peer_ack_point
> ctsn
)
1925 ftsn_chunk
= sctp_make_fwdtsn(asoc
, asoc
->adv_peer_ack_point
,
1926 nskips
, &ftsn_skip_arr
[0]);
1929 list_add_tail(&ftsn_chunk
->list
, &q
->control_chunk_list
);
1930 SCTP_INC_STATS(sock_net(asoc
->base
.sk
), SCTP_MIB_OUTCTRLCHUNKS
);