Merge tag 'imx-clk-fixes-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/shawng...
[deliverable/linux.git] / net / sctp / transport.c
1 /* SCTP kernel implementation
2 * Copyright (c) 1999-2000 Cisco, Inc.
3 * Copyright (c) 1999-2001 Motorola, Inc.
4 * Copyright (c) 2001-2003 International Business Machines Corp.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 La Monte H.P. Yarroll
7 *
8 * This file is part of the SCTP kernel implementation
9 *
10 * This module provides the abstraction for an SCTP tranport representing
11 * a remote transport address. For local transport addresses, we just use
12 * union sctp_addr.
13 *
14 * This SCTP implementation is free software;
15 * you can redistribute it and/or modify it under the terms of
16 * the GNU General Public License as published by
17 * the Free Software Foundation; either version 2, or (at your option)
18 * any later version.
19 *
20 * This SCTP implementation is distributed in the hope that it
21 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
22 * ************************
23 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
24 * See the GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with GNU CC; see the file COPYING. If not, see
28 * <http://www.gnu.org/licenses/>.
29 *
30 * Please send any bug reports or fixes you make to the
31 * email address(es):
32 * lksctp developers <linux-sctp@vger.kernel.org>
33 *
34 * Written or modified by:
35 * La Monte H.P. Yarroll <piggy@acm.org>
36 * Karl Knutson <karl@athena.chicago.il.us>
37 * Jon Grimm <jgrimm@us.ibm.com>
38 * Xingang Guo <xingang.guo@intel.com>
39 * Hui Huang <hui.huang@nokia.com>
40 * Sridhar Samudrala <sri@us.ibm.com>
41 * Ardelle Fan <ardelle.fan@intel.com>
42 */
43
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
46 #include <linux/slab.h>
47 #include <linux/types.h>
48 #include <linux/random.h>
49 #include <net/sctp/sctp.h>
50 #include <net/sctp/sm.h>
51
52 /* 1st Level Abstractions. */
53
54 /* Initialize a new transport from provided memory. */
55 static struct sctp_transport *sctp_transport_init(struct net *net,
56 struct sctp_transport *peer,
57 const union sctp_addr *addr,
58 gfp_t gfp)
59 {
60 /* Copy in the address. */
61 peer->ipaddr = *addr;
62 peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
63 memset(&peer->saddr, 0, sizeof(union sctp_addr));
64
65 peer->sack_generation = 0;
66
67 /* From 6.3.1 RTO Calculation:
68 *
69 * C1) Until an RTT measurement has been made for a packet sent to the
70 * given destination transport address, set RTO to the protocol
71 * parameter 'RTO.Initial'.
72 */
73 peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
74
75 peer->last_time_heard = ktime_set(0, 0);
76 peer->last_time_ecne_reduced = jiffies;
77
78 peer->param_flags = SPP_HB_DISABLE |
79 SPP_PMTUD_ENABLE |
80 SPP_SACKDELAY_ENABLE;
81
82 /* Initialize the default path max_retrans. */
83 peer->pathmaxrxt = net->sctp.max_retrans_path;
84 peer->pf_retrans = net->sctp.pf_retrans;
85
86 INIT_LIST_HEAD(&peer->transmitted);
87 INIT_LIST_HEAD(&peer->send_ready);
88 INIT_LIST_HEAD(&peer->transports);
89
90 setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event,
91 (unsigned long)peer);
92 setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event,
93 (unsigned long)peer);
94 setup_timer(&peer->proto_unreach_timer,
95 sctp_generate_proto_unreach_event, (unsigned long)peer);
96
97 /* Initialize the 64-bit random nonce sent with heartbeat. */
98 get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
99
100 atomic_set(&peer->refcnt, 1);
101
102 return peer;
103 }
104
105 /* Allocate and initialize a new transport. */
106 struct sctp_transport *sctp_transport_new(struct net *net,
107 const union sctp_addr *addr,
108 gfp_t gfp)
109 {
110 struct sctp_transport *transport;
111
112 transport = kzalloc(sizeof(*transport), gfp);
113 if (!transport)
114 goto fail;
115
116 if (!sctp_transport_init(net, transport, addr, gfp))
117 goto fail_init;
118
119 SCTP_DBG_OBJCNT_INC(transport);
120
121 return transport;
122
123 fail_init:
124 kfree(transport);
125
126 fail:
127 return NULL;
128 }
129
130 /* This transport is no longer needed. Free up if possible, or
131 * delay until it last reference count.
132 */
133 void sctp_transport_free(struct sctp_transport *transport)
134 {
135 /* Try to delete the heartbeat timer. */
136 if (del_timer(&transport->hb_timer))
137 sctp_transport_put(transport);
138
139 /* Delete the T3_rtx timer if it's active.
140 * There is no point in not doing this now and letting
141 * structure hang around in memory since we know
142 * the tranport is going away.
143 */
144 if (del_timer(&transport->T3_rtx_timer))
145 sctp_transport_put(transport);
146
147 /* Delete the ICMP proto unreachable timer if it's active. */
148 if (del_timer(&transport->proto_unreach_timer))
149 sctp_association_put(transport->asoc);
150
151 sctp_transport_put(transport);
152 }
153
154 static void sctp_transport_destroy_rcu(struct rcu_head *head)
155 {
156 struct sctp_transport *transport;
157
158 transport = container_of(head, struct sctp_transport, rcu);
159
160 dst_release(transport->dst);
161 kfree(transport);
162 SCTP_DBG_OBJCNT_DEC(transport);
163 }
164
165 /* Destroy the transport data structure.
166 * Assumes there are no more users of this structure.
167 */
168 static void sctp_transport_destroy(struct sctp_transport *transport)
169 {
170 if (unlikely(atomic_read(&transport->refcnt))) {
171 WARN(1, "Attempt to destroy undead transport %p!\n", transport);
172 return;
173 }
174
175 sctp_packet_free(&transport->packet);
176
177 if (transport->asoc)
178 sctp_association_put(transport->asoc);
179
180 call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
181 }
182
183 /* Start T3_rtx timer if it is not already running and update the heartbeat
184 * timer. This routine is called every time a DATA chunk is sent.
185 */
186 void sctp_transport_reset_timers(struct sctp_transport *transport)
187 {
188 /* RFC 2960 6.3.2 Retransmission Timer Rules
189 *
190 * R1) Every time a DATA chunk is sent to any address(including a
191 * retransmission), if the T3-rtx timer of that address is not running
192 * start it running so that it will expire after the RTO of that
193 * address.
194 */
195
196 if (!timer_pending(&transport->T3_rtx_timer))
197 if (!mod_timer(&transport->T3_rtx_timer,
198 jiffies + transport->rto))
199 sctp_transport_hold(transport);
200
201 /* When a data chunk is sent, reset the heartbeat interval. */
202 if (!mod_timer(&transport->hb_timer,
203 sctp_transport_timeout(transport)))
204 sctp_transport_hold(transport);
205 }
206
207 /* This transport has been assigned to an association.
208 * Initialize fields from the association or from the sock itself.
209 * Register the reference count in the association.
210 */
211 void sctp_transport_set_owner(struct sctp_transport *transport,
212 struct sctp_association *asoc)
213 {
214 transport->asoc = asoc;
215 sctp_association_hold(asoc);
216 }
217
218 /* Initialize the pmtu of a transport. */
219 void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
220 {
221 /* If we don't have a fresh route, look one up */
222 if (!transport->dst || transport->dst->obsolete) {
223 dst_release(transport->dst);
224 transport->af_specific->get_dst(transport, &transport->saddr,
225 &transport->fl, sk);
226 }
227
228 if (transport->dst) {
229 transport->pathmtu = WORD_TRUNC(dst_mtu(transport->dst));
230 } else
231 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
232 }
233
234 void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu)
235 {
236 struct dst_entry *dst;
237
238 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
239 pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
240 __func__, pmtu,
241 SCTP_DEFAULT_MINSEGMENT);
242 /* Use default minimum segment size and disable
243 * pmtu discovery on this transport.
244 */
245 t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
246 } else {
247 t->pathmtu = pmtu;
248 }
249
250 dst = sctp_transport_dst_check(t);
251 if (!dst)
252 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
253
254 if (dst) {
255 dst->ops->update_pmtu(dst, sk, NULL, pmtu);
256
257 dst = sctp_transport_dst_check(t);
258 if (!dst)
259 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
260 }
261 }
262
263 /* Caches the dst entry and source address for a transport's destination
264 * address.
265 */
266 void sctp_transport_route(struct sctp_transport *transport,
267 union sctp_addr *saddr, struct sctp_sock *opt)
268 {
269 struct sctp_association *asoc = transport->asoc;
270 struct sctp_af *af = transport->af_specific;
271
272 af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt));
273
274 if (saddr)
275 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
276 else
277 af->get_saddr(opt, transport, &transport->fl);
278
279 if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) {
280 return;
281 }
282 if (transport->dst) {
283 transport->pathmtu = WORD_TRUNC(dst_mtu(transport->dst));
284
285 /* Initialize sk->sk_rcv_saddr, if the transport is the
286 * association's active path for getsockname().
287 */
288 if (asoc && (!asoc->peer.primary_path ||
289 (transport == asoc->peer.active_path)))
290 opt->pf->to_sk_saddr(&transport->saddr,
291 asoc->base.sk);
292 } else
293 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
294 }
295
296 /* Hold a reference to a transport. */
297 int sctp_transport_hold(struct sctp_transport *transport)
298 {
299 return atomic_add_unless(&transport->refcnt, 1, 0);
300 }
301
302 /* Release a reference to a transport and clean up
303 * if there are no more references.
304 */
305 void sctp_transport_put(struct sctp_transport *transport)
306 {
307 if (atomic_dec_and_test(&transport->refcnt))
308 sctp_transport_destroy(transport);
309 }
310
311 /* Update transport's RTO based on the newly calculated RTT. */
312 void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
313 {
314 if (unlikely(!tp->rto_pending))
315 /* We should not be doing any RTO updates unless rto_pending is set. */
316 pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp);
317
318 if (tp->rttvar || tp->srtt) {
319 struct net *net = sock_net(tp->asoc->base.sk);
320 /* 6.3.1 C3) When a new RTT measurement R' is made, set
321 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
322 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
323 */
324
325 /* Note: The above algorithm has been rewritten to
326 * express rto_beta and rto_alpha as inverse powers
327 * of two.
328 * For example, assuming the default value of RTO.Alpha of
329 * 1/8, rto_alpha would be expressed as 3.
330 */
331 tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
332 + (((__u32)abs((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta);
333 tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
334 + (rtt >> net->sctp.rto_alpha);
335 } else {
336 /* 6.3.1 C2) When the first RTT measurement R is made, set
337 * SRTT <- R, RTTVAR <- R/2.
338 */
339 tp->srtt = rtt;
340 tp->rttvar = rtt >> 1;
341 }
342
343 /* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then
344 * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY.
345 */
346 if (tp->rttvar == 0)
347 tp->rttvar = SCTP_CLOCK_GRANULARITY;
348
349 /* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */
350 tp->rto = tp->srtt + (tp->rttvar << 2);
351
352 /* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min
353 * seconds then it is rounded up to RTO.Min seconds.
354 */
355 if (tp->rto < tp->asoc->rto_min)
356 tp->rto = tp->asoc->rto_min;
357
358 /* 6.3.1 C7) A maximum value may be placed on RTO provided it is
359 * at least RTO.max seconds.
360 */
361 if (tp->rto > tp->asoc->rto_max)
362 tp->rto = tp->asoc->rto_max;
363
364 sctp_max_rto(tp->asoc, tp);
365 tp->rtt = rtt;
366
367 /* Reset rto_pending so that a new RTT measurement is started when a
368 * new data chunk is sent.
369 */
370 tp->rto_pending = 0;
371
372 pr_debug("%s: transport:%p, rtt:%d, srtt:%d rttvar:%d, rto:%ld\n",
373 __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto);
374 }
375
376 /* This routine updates the transport's cwnd and partial_bytes_acked
377 * parameters based on the bytes acked in the received SACK.
378 */
379 void sctp_transport_raise_cwnd(struct sctp_transport *transport,
380 __u32 sack_ctsn, __u32 bytes_acked)
381 {
382 struct sctp_association *asoc = transport->asoc;
383 __u32 cwnd, ssthresh, flight_size, pba, pmtu;
384
385 cwnd = transport->cwnd;
386 flight_size = transport->flight_size;
387
388 /* See if we need to exit Fast Recovery first */
389 if (asoc->fast_recovery &&
390 TSN_lte(asoc->fast_recovery_exit, sack_ctsn))
391 asoc->fast_recovery = 0;
392
393 /* The appropriate cwnd increase algorithm is performed if, and only
394 * if the cumulative TSN whould advanced and the congestion window is
395 * being fully utilized.
396 */
397 if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) ||
398 (flight_size < cwnd))
399 return;
400
401 ssthresh = transport->ssthresh;
402 pba = transport->partial_bytes_acked;
403 pmtu = transport->asoc->pathmtu;
404
405 if (cwnd <= ssthresh) {
406 /* RFC 4960 7.2.1
407 * o When cwnd is less than or equal to ssthresh, an SCTP
408 * endpoint MUST use the slow-start algorithm to increase
409 * cwnd only if the current congestion window is being fully
410 * utilized, an incoming SACK advances the Cumulative TSN
411 * Ack Point, and the data sender is not in Fast Recovery.
412 * Only when these three conditions are met can the cwnd be
413 * increased; otherwise, the cwnd MUST not be increased.
414 * If these conditions are met, then cwnd MUST be increased
415 * by, at most, the lesser of 1) the total size of the
416 * previously outstanding DATA chunk(s) acknowledged, and
417 * 2) the destination's path MTU. This upper bound protects
418 * against the ACK-Splitting attack outlined in [SAVAGE99].
419 */
420 if (asoc->fast_recovery)
421 return;
422
423 if (bytes_acked > pmtu)
424 cwnd += pmtu;
425 else
426 cwnd += bytes_acked;
427
428 pr_debug("%s: slow start: transport:%p, bytes_acked:%d, "
429 "cwnd:%d, ssthresh:%d, flight_size:%d, pba:%d\n",
430 __func__, transport, bytes_acked, cwnd, ssthresh,
431 flight_size, pba);
432 } else {
433 /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh,
434 * upon each SACK arrival that advances the Cumulative TSN Ack
435 * Point, increase partial_bytes_acked by the total number of
436 * bytes of all new chunks acknowledged in that SACK including
437 * chunks acknowledged by the new Cumulative TSN Ack and by
438 * Gap Ack Blocks.
439 *
440 * When partial_bytes_acked is equal to or greater than cwnd
441 * and before the arrival of the SACK the sender had cwnd or
442 * more bytes of data outstanding (i.e., before arrival of the
443 * SACK, flightsize was greater than or equal to cwnd),
444 * increase cwnd by MTU, and reset partial_bytes_acked to
445 * (partial_bytes_acked - cwnd).
446 */
447 pba += bytes_acked;
448 if (pba >= cwnd) {
449 cwnd += pmtu;
450 pba = ((cwnd < pba) ? (pba - cwnd) : 0);
451 }
452
453 pr_debug("%s: congestion avoidance: transport:%p, "
454 "bytes_acked:%d, cwnd:%d, ssthresh:%d, "
455 "flight_size:%d, pba:%d\n", __func__,
456 transport, bytes_acked, cwnd, ssthresh,
457 flight_size, pba);
458 }
459
460 transport->cwnd = cwnd;
461 transport->partial_bytes_acked = pba;
462 }
463
464 /* This routine is used to lower the transport's cwnd when congestion is
465 * detected.
466 */
467 void sctp_transport_lower_cwnd(struct sctp_transport *transport,
468 sctp_lower_cwnd_t reason)
469 {
470 struct sctp_association *asoc = transport->asoc;
471
472 switch (reason) {
473 case SCTP_LOWER_CWND_T3_RTX:
474 /* RFC 2960 Section 7.2.3, sctpimpguide
475 * When the T3-rtx timer expires on an address, SCTP should
476 * perform slow start by:
477 * ssthresh = max(cwnd/2, 4*MTU)
478 * cwnd = 1*MTU
479 * partial_bytes_acked = 0
480 */
481 transport->ssthresh = max(transport->cwnd/2,
482 4*asoc->pathmtu);
483 transport->cwnd = asoc->pathmtu;
484
485 /* T3-rtx also clears fast recovery */
486 asoc->fast_recovery = 0;
487 break;
488
489 case SCTP_LOWER_CWND_FAST_RTX:
490 /* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the
491 * destination address(es) to which the missing DATA chunks
492 * were last sent, according to the formula described in
493 * Section 7.2.3.
494 *
495 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet
496 * losses from SACK (see Section 7.2.4), An endpoint
497 * should do the following:
498 * ssthresh = max(cwnd/2, 4*MTU)
499 * cwnd = ssthresh
500 * partial_bytes_acked = 0
501 */
502 if (asoc->fast_recovery)
503 return;
504
505 /* Mark Fast recovery */
506 asoc->fast_recovery = 1;
507 asoc->fast_recovery_exit = asoc->next_tsn - 1;
508
509 transport->ssthresh = max(transport->cwnd/2,
510 4*asoc->pathmtu);
511 transport->cwnd = transport->ssthresh;
512 break;
513
514 case SCTP_LOWER_CWND_ECNE:
515 /* RFC 2481 Section 6.1.2.
516 * If the sender receives an ECN-Echo ACK packet
517 * then the sender knows that congestion was encountered in the
518 * network on the path from the sender to the receiver. The
519 * indication of congestion should be treated just as a
520 * congestion loss in non-ECN Capable TCP. That is, the TCP
521 * source halves the congestion window "cwnd" and reduces the
522 * slow start threshold "ssthresh".
523 * A critical condition is that TCP does not react to
524 * congestion indications more than once every window of
525 * data (or more loosely more than once every round-trip time).
526 */
527 if (time_after(jiffies, transport->last_time_ecne_reduced +
528 transport->rtt)) {
529 transport->ssthresh = max(transport->cwnd/2,
530 4*asoc->pathmtu);
531 transport->cwnd = transport->ssthresh;
532 transport->last_time_ecne_reduced = jiffies;
533 }
534 break;
535
536 case SCTP_LOWER_CWND_INACTIVE:
537 /* RFC 2960 Section 7.2.1, sctpimpguide
538 * When the endpoint does not transmit data on a given
539 * transport address, the cwnd of the transport address
540 * should be adjusted to max(cwnd/2, 4*MTU) per RTO.
541 * NOTE: Although the draft recommends that this check needs
542 * to be done every RTO interval, we do it every hearbeat
543 * interval.
544 */
545 transport->cwnd = max(transport->cwnd/2,
546 4*asoc->pathmtu);
547 break;
548 }
549
550 transport->partial_bytes_acked = 0;
551
552 pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d\n",
553 __func__, transport, reason, transport->cwnd,
554 transport->ssthresh);
555 }
556
557 /* Apply Max.Burst limit to the congestion window:
558 * sctpimpguide-05 2.14.2
559 * D) When the time comes for the sender to
560 * transmit new DATA chunks, the protocol parameter Max.Burst MUST
561 * first be applied to limit how many new DATA chunks may be sent.
562 * The limit is applied by adjusting cwnd as follows:
563 * if ((flightsize+ Max.Burst * MTU) < cwnd)
564 * cwnd = flightsize + Max.Burst * MTU
565 */
566
567 void sctp_transport_burst_limited(struct sctp_transport *t)
568 {
569 struct sctp_association *asoc = t->asoc;
570 u32 old_cwnd = t->cwnd;
571 u32 max_burst_bytes;
572
573 if (t->burst_limited || asoc->max_burst == 0)
574 return;
575
576 max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
577 if (max_burst_bytes < old_cwnd) {
578 t->cwnd = max_burst_bytes;
579 t->burst_limited = old_cwnd;
580 }
581 }
582
583 /* Restore the old cwnd congestion window, after the burst had it's
584 * desired effect.
585 */
586 void sctp_transport_burst_reset(struct sctp_transport *t)
587 {
588 if (t->burst_limited) {
589 t->cwnd = t->burst_limited;
590 t->burst_limited = 0;
591 }
592 }
593
594 /* What is the next timeout value for this transport? */
595 unsigned long sctp_transport_timeout(struct sctp_transport *trans)
596 {
597 /* RTO + timer slack +/- 50% of RTO */
598 unsigned long timeout = (trans->rto >> 1) + prandom_u32_max(trans->rto);
599
600 if (trans->state != SCTP_UNCONFIRMED &&
601 trans->state != SCTP_PF)
602 timeout += trans->hbinterval;
603
604 return timeout + jiffies;
605 }
606
607 /* Reset transport variables to their initial values */
608 void sctp_transport_reset(struct sctp_transport *t)
609 {
610 struct sctp_association *asoc = t->asoc;
611
612 /* RFC 2960 (bis), Section 5.2.4
613 * All the congestion control parameters (e.g., cwnd, ssthresh)
614 * related to this peer MUST be reset to their initial values
615 * (see Section 6.2.1)
616 */
617 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
618 t->burst_limited = 0;
619 t->ssthresh = asoc->peer.i.a_rwnd;
620 t->rto = asoc->rto_initial;
621 sctp_max_rto(asoc, t);
622 t->rtt = 0;
623 t->srtt = 0;
624 t->rttvar = 0;
625
626 /* Reset these additional varibles so that we have a clean
627 * slate.
628 */
629 t->partial_bytes_acked = 0;
630 t->flight_size = 0;
631 t->error_count = 0;
632 t->rto_pending = 0;
633 t->hb_sent = 0;
634
635 /* Initialize the state information for SFR-CACC */
636 t->cacc.changeover_active = 0;
637 t->cacc.cycling_changeover = 0;
638 t->cacc.next_tsn_at_change = 0;
639 t->cacc.cacc_saw_newack = 0;
640 }
641
642 /* Schedule retransmission on the given transport */
643 void sctp_transport_immediate_rtx(struct sctp_transport *t)
644 {
645 /* Stop pending T3_rtx_timer */
646 if (del_timer(&t->T3_rtx_timer))
647 sctp_transport_put(t);
648
649 sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
650 if (!timer_pending(&t->T3_rtx_timer)) {
651 if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
652 sctp_transport_hold(t);
653 }
654 }
This page took 0.044153 seconds and 5 git commands to generate.