4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/dccp.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
17 #include <net/inet_sock.h>
24 static inline void dccp_event_ack_sent(struct sock
*sk
)
26 inet_csk_clear_xmit_timer(sk
, ICSK_TIME_DACK
);
29 static void dccp_skb_entail(struct sock
*sk
, struct sk_buff
*skb
)
31 skb_set_owner_w(skb
, sk
);
32 WARN_ON(sk
->sk_send_head
);
33 sk
->sk_send_head
= skb
;
37 * All SKB's seen here are completely headerless. It is our
38 * job to build the DCCP header, and pass the packet down to
39 * IP so it can do the same plus pass the packet off to the
42 static int dccp_transmit_skb(struct sock
*sk
, struct sk_buff
*skb
)
44 if (likely(skb
!= NULL
)) {
45 const struct inet_sock
*inet
= inet_sk(sk
);
46 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
47 struct dccp_sock
*dp
= dccp_sk(sk
);
48 struct dccp_skb_cb
*dcb
= DCCP_SKB_CB(skb
);
50 /* XXX For now we're using only 48 bits sequence numbers */
51 const u32 dccp_header_size
= sizeof(*dh
) +
52 sizeof(struct dccp_hdr_ext
) +
53 dccp_packet_hdr_len(dcb
->dccpd_type
);
55 u64 ackno
= dp
->dccps_gsr
;
57 dccp_inc_seqno(&dp
->dccps_gss
);
59 switch (dcb
->dccpd_type
) {
63 case DCCP_PKT_DATAACK
:
66 case DCCP_PKT_REQUEST
:
71 case DCCP_PKT_SYNCACK
:
72 ackno
= dcb
->dccpd_seq
;
76 * Only data packets should come through with skb->sk
80 skb_set_owner_w(skb
, sk
);
84 dcb
->dccpd_seq
= dp
->dccps_gss
;
86 if (dccp_insert_options(sk
, skb
)) {
92 /* Build DCCP header and checksum it. */
93 dh
= dccp_zeroed_hdr(skb
, dccp_header_size
);
94 dh
->dccph_type
= dcb
->dccpd_type
;
95 dh
->dccph_sport
= inet
->sport
;
96 dh
->dccph_dport
= inet
->dport
;
97 dh
->dccph_doff
= (dccp_header_size
+ dcb
->dccpd_opt_len
) / 4;
98 dh
->dccph_ccval
= dcb
->dccpd_ccval
;
99 dh
->dccph_cscov
= dp
->dccps_pcslen
;
100 /* XXX For now we're using only 48 bits sequence numbers */
103 dp
->dccps_awh
= dp
->dccps_gss
;
104 dccp_hdr_set_seq(dh
, dp
->dccps_gss
);
106 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb
), ackno
);
108 switch (dcb
->dccpd_type
) {
109 case DCCP_PKT_REQUEST
:
110 dccp_hdr_request(skb
)->dccph_req_service
=
114 dccp_hdr_reset(skb
)->dccph_reset_code
=
115 dcb
->dccpd_reset_code
;
119 icsk
->icsk_af_ops
->send_check(sk
, 0, skb
);
122 dccp_event_ack_sent(sk
);
124 DCCP_INC_STATS(DCCP_MIB_OUTSEGS
);
126 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
127 err
= icsk
->icsk_af_ops
->queue_xmit(skb
, sk
, 0);
128 return net_xmit_eval(err
);
133 unsigned int dccp_sync_mss(struct sock
*sk
, u32 pmtu
)
135 struct inet_connection_sock
*icsk
= inet_csk(sk
);
136 struct dccp_sock
*dp
= dccp_sk(sk
);
137 int mss_now
= (pmtu
- icsk
->icsk_af_ops
->net_header_len
-
138 sizeof(struct dccp_hdr
) - sizeof(struct dccp_hdr_ext
));
140 /* Now subtract optional transport overhead */
141 mss_now
-= icsk
->icsk_ext_hdr_len
;
144 * FIXME: this should come from the CCID infrastructure, where, say,
145 * TFRC will say it wants TIMESTAMPS, ELAPSED time, etc, for now lets
146 * put a rough estimate for NDP + TIMESTAMP + TIMESTAMP_ECHO + ELAPSED
147 * TIME + TFRC_OPT_LOSS_EVENT_RATE + TFRC_OPT_RECEIVE_RATE + padding to
148 * make it a multiple of 4
151 mss_now
-= ((5 + 6 + 10 + 6 + 6 + 6 + 3) / 4) * 4;
153 /* And store cached results */
154 icsk
->icsk_pmtu_cookie
= pmtu
;
155 dp
->dccps_mss_cache
= mss_now
;
160 EXPORT_SYMBOL_GPL(dccp_sync_mss
);
162 void dccp_write_space(struct sock
*sk
)
164 read_lock(&sk
->sk_callback_lock
);
166 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
167 wake_up_interruptible(sk
->sk_sleep
);
168 /* Should agree with poll, otherwise some programs break */
169 if (sock_writeable(sk
))
170 sk_wake_async(sk
, 2, POLL_OUT
);
172 read_unlock(&sk
->sk_callback_lock
);
176 * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
177 * @sk: socket to wait for
178 * @timeo: for how long
180 static int dccp_wait_for_ccid(struct sock
*sk
, struct sk_buff
*skb
,
183 struct dccp_sock
*dp
= dccp_sk(sk
);
189 prepare_to_wait(sk
->sk_sleep
, &wait
, TASK_INTERRUPTIBLE
);
195 if (signal_pending(current
))
198 rc
= ccid_hc_tx_send_packet(dp
->dccps_hc_tx_ccid
, sk
, skb
);
201 delay
= msecs_to_jiffies(rc
);
202 if (delay
> *timeo
|| delay
< 0)
205 sk
->sk_write_pending
++;
207 *timeo
-= schedule_timeout(delay
);
209 sk
->sk_write_pending
--;
212 finish_wait(sk
->sk_sleep
, &wait
);
222 rc
= sock_intr_errno(*timeo
);
226 static void dccp_write_xmit_timer(unsigned long data
) {
227 struct sock
*sk
= (struct sock
*)data
;
228 struct dccp_sock
*dp
= dccp_sk(sk
);
231 if (sock_owned_by_user(sk
))
232 sk_reset_timer(sk
, &dp
->dccps_xmit_timer
, jiffies
+1);
234 dccp_write_xmit(sk
, 0);
239 void dccp_write_xmit(struct sock
*sk
, int block
)
241 struct dccp_sock
*dp
= dccp_sk(sk
);
243 long timeo
= DCCP_XMIT_TIMEO
; /* If a packet is taking longer than
244 this we have other issues */
246 while ((skb
= skb_peek(&sk
->sk_write_queue
))) {
247 int err
= ccid_hc_tx_send_packet(dp
->dccps_hc_tx_ccid
, sk
, skb
);
251 sk_reset_timer(sk
, &dp
->dccps_xmit_timer
,
252 msecs_to_jiffies(err
)+jiffies
);
255 err
= dccp_wait_for_ccid(sk
, skb
, &timeo
);
256 timeo
= DCCP_XMIT_TIMEO
;
259 DCCP_BUG("err=%d after dccp_wait_for_ccid", err
);
262 skb_dequeue(&sk
->sk_write_queue
);
264 struct dccp_skb_cb
*dcb
= DCCP_SKB_CB(skb
);
265 const int len
= skb
->len
;
267 if (sk
->sk_state
== DCCP_PARTOPEN
) {
268 /* See 8.1.5. Handshake Completion */
269 inet_csk_schedule_ack(sk
);
270 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_DACK
,
271 inet_csk(sk
)->icsk_rto
,
273 dcb
->dccpd_type
= DCCP_PKT_DATAACK
;
274 } else if (dccp_ack_pending(sk
))
275 dcb
->dccpd_type
= DCCP_PKT_DATAACK
;
277 dcb
->dccpd_type
= DCCP_PKT_DATA
;
279 err
= dccp_transmit_skb(sk
, skb
);
280 ccid_hc_tx_packet_sent(dp
->dccps_hc_tx_ccid
, sk
, 0, len
);
282 DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
289 int dccp_retransmit_skb(struct sock
*sk
, struct sk_buff
*skb
)
291 if (inet_csk(sk
)->icsk_af_ops
->rebuild_header(sk
) != 0)
292 return -EHOSTUNREACH
; /* Routing failure or similar. */
294 return dccp_transmit_skb(sk
, (skb_cloned(skb
) ?
295 pskb_copy(skb
, GFP_ATOMIC
):
296 skb_clone(skb
, GFP_ATOMIC
)));
299 struct sk_buff
*dccp_make_response(struct sock
*sk
, struct dst_entry
*dst
,
300 struct request_sock
*req
)
303 struct dccp_request_sock
*dreq
;
304 const u32 dccp_header_size
= sizeof(struct dccp_hdr
) +
305 sizeof(struct dccp_hdr_ext
) +
306 sizeof(struct dccp_hdr_response
);
307 struct sk_buff
*skb
= sock_wmalloc(sk
, sk
->sk_prot
->max_header
, 1,
312 /* Reserve space for headers. */
313 skb_reserve(skb
, sk
->sk_prot
->max_header
);
315 skb
->dst
= dst_clone(dst
);
317 dreq
= dccp_rsk(req
);
318 if (inet_rsk(req
)->acked
) /* increase ISS upon retransmission */
319 dccp_inc_seqno(&dreq
->dreq_iss
);
320 DCCP_SKB_CB(skb
)->dccpd_type
= DCCP_PKT_RESPONSE
;
321 DCCP_SKB_CB(skb
)->dccpd_seq
= dreq
->dreq_iss
;
323 if (dccp_insert_options(sk
, skb
)) {
328 /* Build and checksum header */
329 dh
= dccp_zeroed_hdr(skb
, dccp_header_size
);
331 dh
->dccph_sport
= inet_sk(sk
)->sport
;
332 dh
->dccph_dport
= inet_rsk(req
)->rmt_port
;
333 dh
->dccph_doff
= (dccp_header_size
+
334 DCCP_SKB_CB(skb
)->dccpd_opt_len
) / 4;
335 dh
->dccph_type
= DCCP_PKT_RESPONSE
;
337 dccp_hdr_set_seq(dh
, dreq
->dreq_iss
);
338 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb
), dreq
->dreq_isr
);
339 dccp_hdr_response(skb
)->dccph_resp_service
= dreq
->dreq_service
;
341 dccp_csum_outgoing(skb
);
343 /* We use `acked' to remember that a Response was already sent. */
344 inet_rsk(req
)->acked
= 1;
345 DCCP_INC_STATS(DCCP_MIB_OUTSEGS
);
349 EXPORT_SYMBOL_GPL(dccp_make_response
);
351 static struct sk_buff
*dccp_make_reset(struct sock
*sk
, struct dst_entry
*dst
,
352 const enum dccp_reset_codes code
)
356 struct dccp_sock
*dp
= dccp_sk(sk
);
357 const u32 dccp_header_size
= sizeof(struct dccp_hdr
) +
358 sizeof(struct dccp_hdr_ext
) +
359 sizeof(struct dccp_hdr_reset
);
360 struct sk_buff
*skb
= sock_wmalloc(sk
, sk
->sk_prot
->max_header
, 1,
365 /* Reserve space for headers. */
366 skb_reserve(skb
, sk
->sk_prot
->max_header
);
368 skb
->dst
= dst_clone(dst
);
370 dccp_inc_seqno(&dp
->dccps_gss
);
372 DCCP_SKB_CB(skb
)->dccpd_reset_code
= code
;
373 DCCP_SKB_CB(skb
)->dccpd_type
= DCCP_PKT_RESET
;
374 DCCP_SKB_CB(skb
)->dccpd_seq
= dp
->dccps_gss
;
376 if (dccp_insert_options(sk
, skb
)) {
381 dh
= dccp_zeroed_hdr(skb
, dccp_header_size
);
383 dh
->dccph_sport
= inet_sk(sk
)->sport
;
384 dh
->dccph_dport
= inet_sk(sk
)->dport
;
385 dh
->dccph_doff
= (dccp_header_size
+
386 DCCP_SKB_CB(skb
)->dccpd_opt_len
) / 4;
387 dh
->dccph_type
= DCCP_PKT_RESET
;
389 dccp_hdr_set_seq(dh
, dp
->dccps_gss
);
390 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb
), dp
->dccps_gsr
);
392 dccp_hdr_reset(skb
)->dccph_reset_code
= code
;
393 inet_csk(sk
)->icsk_af_ops
->send_check(sk
, 0, skb
);
395 DCCP_INC_STATS(DCCP_MIB_OUTSEGS
);
399 int dccp_send_reset(struct sock
*sk
, enum dccp_reset_codes code
)
402 * FIXME: what if rebuild_header fails?
403 * Should we be doing a rebuild_header here?
405 int err
= inet_sk_rebuild_header(sk
);
408 struct sk_buff
*skb
= dccp_make_reset(sk
, sk
->sk_dst_cache
,
411 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
412 err
= inet_csk(sk
)->icsk_af_ops
->queue_xmit(skb
, sk
, 0);
413 return net_xmit_eval(err
);
421 * Do all connect socket setups that can be done AF independent.
423 static inline void dccp_connect_init(struct sock
*sk
)
425 struct dccp_sock
*dp
= dccp_sk(sk
);
426 struct dst_entry
*dst
= __sk_dst_get(sk
);
427 struct inet_connection_sock
*icsk
= inet_csk(sk
);
430 sock_reset_flag(sk
, SOCK_DONE
);
432 dccp_sync_mss(sk
, dst_mtu(dst
));
435 * SWL and AWL are initially adjusted so that they are not less than
436 * the initial Sequence Numbers received and sent, respectively:
437 * SWL := max(GSR + 1 - floor(W/4), ISR),
438 * AWL := max(GSS - W' + 1, ISS).
439 * These adjustments MUST be applied only at the beginning of the
442 dccp_update_gss(sk
, dp
->dccps_iss
);
443 dccp_set_seqno(&dp
->dccps_awl
, max48(dp
->dccps_awl
, dp
->dccps_iss
));
445 /* S.GAR - greatest valid acknowledgement number received on a non-Sync;
446 * initialized to S.ISS (sec. 8.5) */
447 dp
->dccps_gar
= dp
->dccps_iss
;
449 icsk
->icsk_retransmits
= 0;
450 init_timer(&dp
->dccps_xmit_timer
);
451 dp
->dccps_xmit_timer
.data
= (unsigned long)sk
;
452 dp
->dccps_xmit_timer
.function
= dccp_write_xmit_timer
;
455 int dccp_connect(struct sock
*sk
)
458 struct inet_connection_sock
*icsk
= inet_csk(sk
);
460 dccp_connect_init(sk
);
462 skb
= alloc_skb(sk
->sk_prot
->max_header
, sk
->sk_allocation
);
463 if (unlikely(skb
== NULL
))
466 /* Reserve space for headers. */
467 skb_reserve(skb
, sk
->sk_prot
->max_header
);
469 DCCP_SKB_CB(skb
)->dccpd_type
= DCCP_PKT_REQUEST
;
471 dccp_skb_entail(sk
, skb
);
472 dccp_transmit_skb(sk
, skb_clone(skb
, GFP_KERNEL
));
473 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS
);
475 /* Timer for repeating the REQUEST until an answer. */
476 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
477 icsk
->icsk_rto
, DCCP_RTO_MAX
);
481 EXPORT_SYMBOL_GPL(dccp_connect
);
483 void dccp_send_ack(struct sock
*sk
)
485 /* If we have been reset, we may not send again. */
486 if (sk
->sk_state
!= DCCP_CLOSED
) {
487 struct sk_buff
*skb
= alloc_skb(sk
->sk_prot
->max_header
,
491 inet_csk_schedule_ack(sk
);
492 inet_csk(sk
)->icsk_ack
.ato
= TCP_ATO_MIN
;
493 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_DACK
,
499 /* Reserve space for headers */
500 skb_reserve(skb
, sk
->sk_prot
->max_header
);
501 DCCP_SKB_CB(skb
)->dccpd_type
= DCCP_PKT_ACK
;
502 dccp_transmit_skb(sk
, skb
);
506 EXPORT_SYMBOL_GPL(dccp_send_ack
);
508 void dccp_send_delayed_ack(struct sock
*sk
)
510 struct inet_connection_sock
*icsk
= inet_csk(sk
);
512 * FIXME: tune this timer. elapsed time fixes the skew, so no problem
513 * with using 2s, and active senders also piggyback the ACK into a
514 * DATAACK packet, so this is really for quiescent senders.
516 unsigned long timeout
= jiffies
+ 2 * HZ
;
518 /* Use new timeout only if there wasn't a older one earlier. */
519 if (icsk
->icsk_ack
.pending
& ICSK_ACK_TIMER
) {
520 /* If delack timer was blocked or is about to expire,
523 * FIXME: check the "about to expire" part
525 if (icsk
->icsk_ack
.blocked
) {
530 if (!time_before(timeout
, icsk
->icsk_ack
.timeout
))
531 timeout
= icsk
->icsk_ack
.timeout
;
533 icsk
->icsk_ack
.pending
|= ICSK_ACK_SCHED
| ICSK_ACK_TIMER
;
534 icsk
->icsk_ack
.timeout
= timeout
;
535 sk_reset_timer(sk
, &icsk
->icsk_delack_timer
, timeout
);
538 void dccp_send_sync(struct sock
*sk
, const u64 seq
,
539 const enum dccp_pkt_type pkt_type
)
542 * We are not putting this on the write queue, so
543 * dccp_transmit_skb() will set the ownership to this
546 struct sk_buff
*skb
= alloc_skb(sk
->sk_prot
->max_header
, GFP_ATOMIC
);
549 /* FIXME: how to make sure the sync is sent? */
552 /* Reserve space for headers and prepare control bits. */
553 skb_reserve(skb
, sk
->sk_prot
->max_header
);
554 DCCP_SKB_CB(skb
)->dccpd_type
= pkt_type
;
555 DCCP_SKB_CB(skb
)->dccpd_seq
= seq
;
557 dccp_transmit_skb(sk
, skb
);
560 EXPORT_SYMBOL_GPL(dccp_send_sync
);
563 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
564 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
567 void dccp_send_close(struct sock
*sk
, const int active
)
569 struct dccp_sock
*dp
= dccp_sk(sk
);
571 const gfp_t prio
= active
? GFP_KERNEL
: GFP_ATOMIC
;
573 skb
= alloc_skb(sk
->sk_prot
->max_header
, prio
);
577 /* Reserve space for headers and prepare control bits. */
578 skb_reserve(skb
, sk
->sk_prot
->max_header
);
579 DCCP_SKB_CB(skb
)->dccpd_type
= dp
->dccps_role
== DCCP_ROLE_CLIENT
?
580 DCCP_PKT_CLOSE
: DCCP_PKT_CLOSEREQ
;
583 dccp_write_xmit(sk
, 1);
584 dccp_skb_entail(sk
, skb
);
585 dccp_transmit_skb(sk
, skb_clone(skb
, prio
));
586 /* FIXME do we need a retransmit timer here? */
588 dccp_transmit_skb(sk
, skb
);
This page took 0.055798 seconds and 6 git commands to generate.