Commit | Line | Data |
---|---|---|
7c657876 ACM |
1 | /* |
2 | * net/dccp/output.c | |
3 | * | |
4 | * An implementation of the DCCP protocol | |
5 | * Arnaldo Carvalho de Melo <acme@conectiva.com.br> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | ||
7c657876 | 13 | #include <linux/dccp.h> |
48918a4d | 14 | #include <linux/kernel.h> |
7c657876 ACM |
15 | #include <linux/skbuff.h> |
16 | ||
14c85021 | 17 | #include <net/inet_sock.h> |
7c657876 ACM |
18 | #include <net/sock.h> |
19 | ||
ae31c339 | 20 | #include "ackvec.h" |
7c657876 ACM |
21 | #include "ccid.h" |
22 | #include "dccp.h" | |
23 | ||
24 | static inline void dccp_event_ack_sent(struct sock *sk) | |
25 | { | |
26 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); | |
27 | } | |
28 | ||
c25a18ba | 29 | static void dccp_skb_entail(struct sock *sk, struct sk_buff *skb) |
48918a4d HX |
30 | { |
31 | skb_set_owner_w(skb, sk); | |
32 | WARN_ON(sk->sk_send_head); | |
33 | sk->sk_send_head = skb; | |
34 | } | |
35 | ||
7c657876 ACM |
36 | /* |
37 | * All SKB's seen here are completely headerless. It is our | |
38 | * job to build the DCCP header, and pass the packet down to | |
39 | * IP so it can do the same plus pass the packet off to the | |
40 | * device. | |
41 | */ | |
48918a4d | 42 | static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) |
7c657876 ACM |
43 | { |
44 | if (likely(skb != NULL)) { | |
45 | const struct inet_sock *inet = inet_sk(sk); | |
57cca05a | 46 | const struct inet_connection_sock *icsk = inet_csk(sk); |
7c657876 ACM |
47 | struct dccp_sock *dp = dccp_sk(sk); |
48 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); | |
49 | struct dccp_hdr *dh; | |
50 | /* XXX For now we're using only 48 bits sequence numbers */ | |
118b2c95 | 51 | const u32 dccp_header_size = sizeof(*dh) + |
7c657876 | 52 | sizeof(struct dccp_hdr_ext) + |
7690af3f | 53 | dccp_packet_hdr_len(dcb->dccpd_type); |
7c657876 ACM |
54 | int err, set_ack = 1; |
55 | u64 ackno = dp->dccps_gsr; | |
56 | ||
7c657876 ACM |
57 | dccp_inc_seqno(&dp->dccps_gss); |
58 | ||
7c657876 ACM |
59 | switch (dcb->dccpd_type) { |
60 | case DCCP_PKT_DATA: | |
61 | set_ack = 0; | |
edc9e819 HX |
62 | /* fall through */ |
63 | case DCCP_PKT_DATAACK: | |
7c657876 | 64 | break; |
edc9e819 | 65 | |
afe00251 AB |
66 | case DCCP_PKT_REQUEST: |
67 | set_ack = 0; | |
68 | /* fall through */ | |
69 | ||
7c657876 ACM |
70 | case DCCP_PKT_SYNC: |
71 | case DCCP_PKT_SYNCACK: | |
72 | ackno = dcb->dccpd_seq; | |
edc9e819 HX |
73 | /* fall through */ |
74 | default: | |
75 | /* | |
76 | * Only data packets should come through with skb->sk | |
77 | * set. | |
78 | */ | |
79 | WARN_ON(skb->sk); | |
80 | skb_set_owner_w(skb, sk); | |
7c657876 ACM |
81 | break; |
82 | } | |
24117727 ACM |
83 | |
84 | dcb->dccpd_seq = dp->dccps_gss; | |
2d0817d1 ACM |
85 | |
86 | if (dccp_insert_options(sk, skb)) { | |
87 | kfree_skb(skb); | |
88 | return -EPROTO; | |
89 | } | |
7c657876 | 90 | |
fda0fd6c | 91 | |
7c657876 | 92 | /* Build DCCP header and checksum it. */ |
9b42078e | 93 | dh = dccp_zeroed_hdr(skb, dccp_header_size); |
7c657876 ACM |
94 | dh->dccph_type = dcb->dccpd_type; |
95 | dh->dccph_sport = inet->sport; | |
96 | dh->dccph_dport = inet->dport; | |
97 | dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; | |
98 | dh->dccph_ccval = dcb->dccpd_ccval; | |
6f4e5fff | 99 | dh->dccph_cscov = dp->dccps_pcslen; |
7c657876 ACM |
100 | /* XXX For now we're using only 48 bits sequence numbers */ |
101 | dh->dccph_x = 1; | |
102 | ||
103 | dp->dccps_awh = dp->dccps_gss; | |
104 | dccp_hdr_set_seq(dh, dp->dccps_gss); | |
105 | if (set_ack) | |
106 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno); | |
107 | ||
108 | switch (dcb->dccpd_type) { | |
109 | case DCCP_PKT_REQUEST: | |
7690af3f | 110 | dccp_hdr_request(skb)->dccph_req_service = |
67e6b629 | 111 | dp->dccps_service; |
7c657876 ACM |
112 | break; |
113 | case DCCP_PKT_RESET: | |
7690af3f ACM |
114 | dccp_hdr_reset(skb)->dccph_reset_code = |
115 | dcb->dccpd_reset_code; | |
7c657876 ACM |
116 | break; |
117 | } | |
118 | ||
6f4e5fff | 119 | icsk->icsk_af_ops->send_check(sk, 0, skb); |
7c657876 | 120 | |
7ad07e7c | 121 | if (set_ack) |
7c657876 ACM |
122 | dccp_event_ack_sent(sk); |
123 | ||
124 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | |
125 | ||
49c5bfaf | 126 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
93173112 | 127 | err = icsk->icsk_af_ops->queue_xmit(skb, sk, 0); |
b9df3cb8 | 128 | return net_xmit_eval(err); |
7c657876 ACM |
129 | } |
130 | return -ENOBUFS; | |
131 | } | |
132 | ||
133 | unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu) | |
134 | { | |
d83d8461 | 135 | struct inet_connection_sock *icsk = inet_csk(sk); |
7c657876 | 136 | struct dccp_sock *dp = dccp_sk(sk); |
d83d8461 | 137 | int mss_now = (pmtu - icsk->icsk_af_ops->net_header_len - |
57cca05a | 138 | sizeof(struct dccp_hdr) - sizeof(struct dccp_hdr_ext)); |
7c657876 ACM |
139 | |
140 | /* Now subtract optional transport overhead */ | |
d83d8461 | 141 | mss_now -= icsk->icsk_ext_hdr_len; |
7c657876 ACM |
142 | |
143 | /* | |
144 | * FIXME: this should come from the CCID infrastructure, where, say, | |
145 | * TFRC will say it wants TIMESTAMPS, ELAPSED time, etc, for now lets | |
146 | * put a rough estimate for NDP + TIMESTAMP + TIMESTAMP_ECHO + ELAPSED | |
147 | * TIME + TFRC_OPT_LOSS_EVENT_RATE + TFRC_OPT_RECEIVE_RATE + padding to | |
148 | * make it a multiple of 4 | |
149 | */ | |
150 | ||
151 | mss_now -= ((5 + 6 + 10 + 6 + 6 + 6 + 3) / 4) * 4; | |
152 | ||
153 | /* And store cached results */ | |
d83d8461 | 154 | icsk->icsk_pmtu_cookie = pmtu; |
7c657876 ACM |
155 | dp->dccps_mss_cache = mss_now; |
156 | ||
157 | return mss_now; | |
158 | } | |
159 | ||
f21e68ca ACM |
160 | EXPORT_SYMBOL_GPL(dccp_sync_mss); |
161 | ||
c530cfb1 ACM |
162 | void dccp_write_space(struct sock *sk) |
163 | { | |
164 | read_lock(&sk->sk_callback_lock); | |
165 | ||
166 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | |
167 | wake_up_interruptible(sk->sk_sleep); | |
168 | /* Should agree with poll, otherwise some programs break */ | |
169 | if (sock_writeable(sk)) | |
170 | sk_wake_async(sk, 2, POLL_OUT); | |
171 | ||
172 | read_unlock(&sk->sk_callback_lock); | |
173 | } | |
174 | ||
d6809c12 ACM |
175 | /** |
176 | * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet | |
177 | * @sk: socket to wait for | |
178 | * @timeo: for how long | |
179 | */ | |
180 | static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, | |
181 | long *timeo) | |
182 | { | |
183 | struct dccp_sock *dp = dccp_sk(sk); | |
184 | DEFINE_WAIT(wait); | |
185 | long delay; | |
186 | int rc; | |
187 | ||
188 | while (1) { | |
189 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | |
190 | ||
97e5848d | 191 | if (sk->sk_err) |
d6809c12 ACM |
192 | goto do_error; |
193 | if (!*timeo) | |
194 | goto do_nonblock; | |
195 | if (signal_pending(current)) | |
196 | goto do_interrupted; | |
197 | ||
198 | rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, | |
199 | skb->len); | |
200 | if (rc <= 0) | |
201 | break; | |
202 | delay = msecs_to_jiffies(rc); | |
203 | if (delay > *timeo || delay < 0) | |
204 | goto do_nonblock; | |
205 | ||
206 | sk->sk_write_pending++; | |
207 | release_sock(sk); | |
208 | *timeo -= schedule_timeout(delay); | |
209 | lock_sock(sk); | |
210 | sk->sk_write_pending--; | |
211 | } | |
212 | out: | |
213 | finish_wait(sk->sk_sleep, &wait); | |
214 | return rc; | |
215 | ||
216 | do_error: | |
217 | rc = -EPIPE; | |
218 | goto out; | |
219 | do_nonblock: | |
220 | rc = -EAGAIN; | |
221 | goto out; | |
222 | do_interrupted: | |
223 | rc = sock_intr_errno(*timeo); | |
224 | goto out; | |
225 | } | |
226 | ||
97e5848d IM |
227 | static void dccp_write_xmit_timer(unsigned long data) { |
228 | struct sock *sk = (struct sock *)data; | |
229 | struct dccp_sock *dp = dccp_sk(sk); | |
230 | ||
231 | bh_lock_sock(sk); | |
232 | if (sock_owned_by_user(sk)) | |
233 | sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1); | |
234 | else | |
235 | dccp_write_xmit(sk, 0); | |
236 | bh_unlock_sock(sk); | |
237 | sock_put(sk); | |
238 | } | |
239 | ||
240 | void dccp_write_xmit(struct sock *sk, int block) | |
27258ee5 | 241 | { |
97e5848d IM |
242 | struct dccp_sock *dp = dccp_sk(sk); |
243 | struct sk_buff *skb; | |
f45b3ec4 IM |
244 | long timeo = DCCP_XMIT_TIMEO; /* If a packet is taking longer than |
245 | this we have other issues */ | |
97e5848d IM |
246 | |
247 | while ((skb = skb_peek(&sk->sk_write_queue))) { | |
248 | int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, | |
d6809c12 ACM |
249 | skb->len); |
250 | ||
97e5848d IM |
251 | if (err > 0) { |
252 | if (!block) { | |
253 | sk_reset_timer(sk, &dp->dccps_xmit_timer, | |
254 | msecs_to_jiffies(err)+jiffies); | |
255 | break; | |
f45b3ec4 | 256 | } else { |
97e5848d | 257 | err = dccp_wait_for_ccid(sk, skb, &timeo); |
f45b3ec4 IM |
258 | timeo = DCCP_XMIT_TIMEO; |
259 | } | |
59348b19 GR |
260 | if (err) |
261 | DCCP_BUG("err=%d after dccp_wait_for_ccid", err); | |
97e5848d | 262 | } |
27258ee5 | 263 | |
97e5848d IM |
264 | skb_dequeue(&sk->sk_write_queue); |
265 | if (err == 0) { | |
266 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); | |
267 | const int len = skb->len; | |
27258ee5 | 268 | |
97e5848d IM |
269 | if (sk->sk_state == DCCP_PARTOPEN) { |
270 | /* See 8.1.5. Handshake Completion */ | |
271 | inet_csk_schedule_ack(sk); | |
272 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | |
27258ee5 ACM |
273 | inet_csk(sk)->icsk_rto, |
274 | DCCP_RTO_MAX); | |
97e5848d IM |
275 | dcb->dccpd_type = DCCP_PKT_DATAACK; |
276 | } else if (dccp_ack_pending(sk)) | |
277 | dcb->dccpd_type = DCCP_PKT_DATAACK; | |
278 | else | |
279 | dcb->dccpd_type = DCCP_PKT_DATA; | |
280 | ||
281 | err = dccp_transmit_skb(sk, skb); | |
282 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); | |
59348b19 GR |
283 | if (err) |
284 | DCCP_BUG("err=%d after ccid_hc_tx_packet_sent", | |
285 | err); | |
97e5848d IM |
286 | } else |
287 | kfree(skb); | |
288 | } | |
27258ee5 ACM |
289 | } |
290 | ||
7c657876 ACM |
291 | int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb) |
292 | { | |
57cca05a | 293 | if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0) |
7c657876 ACM |
294 | return -EHOSTUNREACH; /* Routing failure or similar. */ |
295 | ||
296 | return dccp_transmit_skb(sk, (skb_cloned(skb) ? | |
297 | pskb_copy(skb, GFP_ATOMIC): | |
298 | skb_clone(skb, GFP_ATOMIC))); | |
299 | } | |
300 | ||
301 | struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, | |
302 | struct request_sock *req) | |
303 | { | |
304 | struct dccp_hdr *dh; | |
67e6b629 | 305 | struct dccp_request_sock *dreq; |
118b2c95 | 306 | const u32 dccp_header_size = sizeof(struct dccp_hdr) + |
7c657876 ACM |
307 | sizeof(struct dccp_hdr_ext) + |
308 | sizeof(struct dccp_hdr_response); | |
118b2c95 | 309 | struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, |
7c657876 ACM |
310 | GFP_ATOMIC); |
311 | if (skb == NULL) | |
312 | return NULL; | |
313 | ||
314 | /* Reserve space for headers. */ | |
118b2c95 | 315 | skb_reserve(skb, sk->sk_prot->max_header); |
7c657876 ACM |
316 | |
317 | skb->dst = dst_clone(dst); | |
7c657876 | 318 | |
67e6b629 | 319 | dreq = dccp_rsk(req); |
e11d9d30 GR |
320 | if (inet_rsk(req)->acked) /* increase ISS upon retransmission */ |
321 | dccp_inc_seqno(&dreq->dreq_iss); | |
7c657876 | 322 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; |
67e6b629 | 323 | DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; |
2d0817d1 ACM |
324 | |
325 | if (dccp_insert_options(sk, skb)) { | |
326 | kfree_skb(skb); | |
327 | return NULL; | |
328 | } | |
7c657876 | 329 | |
09dbc389 | 330 | /* Build and checksum header */ |
9b42078e | 331 | dh = dccp_zeroed_hdr(skb, dccp_header_size); |
7c657876 ACM |
332 | |
333 | dh->dccph_sport = inet_sk(sk)->sport; | |
334 | dh->dccph_dport = inet_rsk(req)->rmt_port; | |
7690af3f ACM |
335 | dh->dccph_doff = (dccp_header_size + |
336 | DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; | |
7c657876 ACM |
337 | dh->dccph_type = DCCP_PKT_RESPONSE; |
338 | dh->dccph_x = 1; | |
67e6b629 ACM |
339 | dccp_hdr_set_seq(dh, dreq->dreq_iss); |
340 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); | |
341 | dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; | |
7c657876 | 342 | |
6f4e5fff GR |
343 | dccp_csum_outgoing(skb); |
344 | ||
e11d9d30 GR |
345 | /* We use `acked' to remember that a Response was already sent. */ |
346 | inet_rsk(req)->acked = 1; | |
7c657876 ACM |
347 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); |
348 | return skb; | |
349 | } | |
350 | ||
f21e68ca ACM |
351 | EXPORT_SYMBOL_GPL(dccp_make_response); |
352 | ||
017487d7 ACM |
353 | static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, |
354 | const enum dccp_reset_codes code) | |
7c657876 ACM |
355 | |
356 | { | |
357 | struct dccp_hdr *dh; | |
358 | struct dccp_sock *dp = dccp_sk(sk); | |
118b2c95 | 359 | const u32 dccp_header_size = sizeof(struct dccp_hdr) + |
7c657876 ACM |
360 | sizeof(struct dccp_hdr_ext) + |
361 | sizeof(struct dccp_hdr_reset); | |
118b2c95 | 362 | struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, |
7c657876 ACM |
363 | GFP_ATOMIC); |
364 | if (skb == NULL) | |
365 | return NULL; | |
366 | ||
367 | /* Reserve space for headers. */ | |
118b2c95 | 368 | skb_reserve(skb, sk->sk_prot->max_header); |
7c657876 ACM |
369 | |
370 | skb->dst = dst_clone(dst); | |
7c657876 ACM |
371 | |
372 | dccp_inc_seqno(&dp->dccps_gss); | |
373 | ||
374 | DCCP_SKB_CB(skb)->dccpd_reset_code = code; | |
375 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET; | |
376 | DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss; | |
2d0817d1 ACM |
377 | |
378 | if (dccp_insert_options(sk, skb)) { | |
379 | kfree_skb(skb); | |
380 | return NULL; | |
381 | } | |
7c657876 | 382 | |
9b42078e | 383 | dh = dccp_zeroed_hdr(skb, dccp_header_size); |
7c657876 ACM |
384 | |
385 | dh->dccph_sport = inet_sk(sk)->sport; | |
386 | dh->dccph_dport = inet_sk(sk)->dport; | |
7690af3f ACM |
387 | dh->dccph_doff = (dccp_header_size + |
388 | DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; | |
7c657876 ACM |
389 | dh->dccph_type = DCCP_PKT_RESET; |
390 | dh->dccph_x = 1; | |
391 | dccp_hdr_set_seq(dh, dp->dccps_gss); | |
392 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr); | |
393 | ||
394 | dccp_hdr_reset(skb)->dccph_reset_code = code; | |
6f4e5fff | 395 | inet_csk(sk)->icsk_af_ops->send_check(sk, 0, skb); |
7c657876 ACM |
396 | |
397 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | |
398 | return skb; | |
399 | } | |
400 | ||
017487d7 ACM |
401 | int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code) |
402 | { | |
403 | /* | |
404 | * FIXME: what if rebuild_header fails? | |
405 | * Should we be doing a rebuild_header here? | |
406 | */ | |
407 | int err = inet_sk_rebuild_header(sk); | |
408 | ||
409 | if (err == 0) { | |
410 | struct sk_buff *skb = dccp_make_reset(sk, sk->sk_dst_cache, | |
411 | code); | |
412 | if (skb != NULL) { | |
413 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | |
93173112 | 414 | err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, sk, 0); |
b9df3cb8 | 415 | return net_xmit_eval(err); |
017487d7 ACM |
416 | } |
417 | } | |
418 | ||
419 | return err; | |
420 | } | |
421 | ||
7c657876 ACM |
422 | /* |
423 | * Do all connect socket setups that can be done AF independent. | |
424 | */ | |
425 | static inline void dccp_connect_init(struct sock *sk) | |
426 | { | |
f21e68ca | 427 | struct dccp_sock *dp = dccp_sk(sk); |
7c657876 ACM |
428 | struct dst_entry *dst = __sk_dst_get(sk); |
429 | struct inet_connection_sock *icsk = inet_csk(sk); | |
430 | ||
431 | sk->sk_err = 0; | |
432 | sock_reset_flag(sk, SOCK_DONE); | |
433 | ||
434 | dccp_sync_mss(sk, dst_mtu(dst)); | |
435 | ||
f21e68ca ACM |
436 | /* |
437 | * SWL and AWL are initially adjusted so that they are not less than | |
438 | * the initial Sequence Numbers received and sent, respectively: | |
439 | * SWL := max(GSR + 1 - floor(W/4), ISR), | |
440 | * AWL := max(GSS - W' + 1, ISS). | |
441 | * These adjustments MUST be applied only at the beginning of the | |
442 | * connection. | |
443 | */ | |
d7f7365f | 444 | dccp_update_gss(sk, dp->dccps_iss); |
f21e68ca | 445 | dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss)); |
7c657876 | 446 | |
d7f7365f GR |
447 | /* S.GAR - greatest valid acknowledgement number received on a non-Sync; |
448 | * initialized to S.ISS (sec. 8.5) */ | |
449 | dp->dccps_gar = dp->dccps_iss; | |
450 | ||
7c657876 | 451 | icsk->icsk_retransmits = 0; |
97e5848d IM |
452 | init_timer(&dp->dccps_xmit_timer); |
453 | dp->dccps_xmit_timer.data = (unsigned long)sk; | |
454 | dp->dccps_xmit_timer.function = dccp_write_xmit_timer; | |
7c657876 ACM |
455 | } |
456 | ||
457 | int dccp_connect(struct sock *sk) | |
458 | { | |
459 | struct sk_buff *skb; | |
460 | struct inet_connection_sock *icsk = inet_csk(sk); | |
461 | ||
462 | dccp_connect_init(sk); | |
463 | ||
118b2c95 | 464 | skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation); |
7c657876 ACM |
465 | if (unlikely(skb == NULL)) |
466 | return -ENOBUFS; | |
467 | ||
468 | /* Reserve space for headers. */ | |
118b2c95 | 469 | skb_reserve(skb, sk->sk_prot->max_header); |
7c657876 ACM |
470 | |
471 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; | |
7c657876 | 472 | |
48918a4d | 473 | dccp_skb_entail(sk, skb); |
7c657876 ACM |
474 | dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL)); |
475 | DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS); | |
476 | ||
477 | /* Timer for repeating the REQUEST until an answer. */ | |
27258ee5 ACM |
478 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
479 | icsk->icsk_rto, DCCP_RTO_MAX); | |
7c657876 ACM |
480 | return 0; |
481 | } | |
482 | ||
f21e68ca ACM |
483 | EXPORT_SYMBOL_GPL(dccp_connect); |
484 | ||
7c657876 ACM |
485 | void dccp_send_ack(struct sock *sk) |
486 | { | |
487 | /* If we have been reset, we may not send again. */ | |
488 | if (sk->sk_state != DCCP_CLOSED) { | |
118b2c95 ACM |
489 | struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, |
490 | GFP_ATOMIC); | |
7c657876 ACM |
491 | |
492 | if (skb == NULL) { | |
493 | inet_csk_schedule_ack(sk); | |
494 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; | |
7690af3f ACM |
495 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, |
496 | TCP_DELACK_MAX, | |
497 | DCCP_RTO_MAX); | |
7c657876 ACM |
498 | return; |
499 | } | |
500 | ||
501 | /* Reserve space for headers */ | |
118b2c95 | 502 | skb_reserve(skb, sk->sk_prot->max_header); |
7c657876 | 503 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; |
7c657876 ACM |
504 | dccp_transmit_skb(sk, skb); |
505 | } | |
506 | } | |
507 | ||
508 | EXPORT_SYMBOL_GPL(dccp_send_ack); | |
509 | ||
510 | void dccp_send_delayed_ack(struct sock *sk) | |
511 | { | |
512 | struct inet_connection_sock *icsk = inet_csk(sk); | |
513 | /* | |
514 | * FIXME: tune this timer. elapsed time fixes the skew, so no problem | |
515 | * with using 2s, and active senders also piggyback the ACK into a | |
516 | * DATAACK packet, so this is really for quiescent senders. | |
517 | */ | |
518 | unsigned long timeout = jiffies + 2 * HZ; | |
519 | ||
520 | /* Use new timeout only if there wasn't a older one earlier. */ | |
521 | if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { | |
522 | /* If delack timer was blocked or is about to expire, | |
523 | * send ACK now. | |
524 | * | |
525 | * FIXME: check the "about to expire" part | |
526 | */ | |
527 | if (icsk->icsk_ack.blocked) { | |
528 | dccp_send_ack(sk); | |
529 | return; | |
530 | } | |
531 | ||
532 | if (!time_before(timeout, icsk->icsk_ack.timeout)) | |
533 | timeout = icsk->icsk_ack.timeout; | |
534 | } | |
535 | icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; | |
536 | icsk->icsk_ack.timeout = timeout; | |
537 | sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); | |
538 | } | |
539 | ||
e92ae93a ACM |
540 | void dccp_send_sync(struct sock *sk, const u64 seq, |
541 | const enum dccp_pkt_type pkt_type) | |
7c657876 ACM |
542 | { |
543 | /* | |
544 | * We are not putting this on the write queue, so | |
545 | * dccp_transmit_skb() will set the ownership to this | |
546 | * sock. | |
547 | */ | |
118b2c95 | 548 | struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC); |
7c657876 ACM |
549 | |
550 | if (skb == NULL) | |
551 | /* FIXME: how to make sure the sync is sent? */ | |
552 | return; | |
553 | ||
554 | /* Reserve space for headers and prepare control bits. */ | |
118b2c95 | 555 | skb_reserve(skb, sk->sk_prot->max_header); |
e92ae93a | 556 | DCCP_SKB_CB(skb)->dccpd_type = pkt_type; |
7c657876 ACM |
557 | DCCP_SKB_CB(skb)->dccpd_seq = seq; |
558 | ||
7c657876 ACM |
559 | dccp_transmit_skb(sk, skb); |
560 | } | |
561 | ||
b61fafc4 ACM |
562 | EXPORT_SYMBOL_GPL(dccp_send_sync); |
563 | ||
7690af3f ACM |
564 | /* |
565 | * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This | |
566 | * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under | |
567 | * any circumstances. | |
7c657876 | 568 | */ |
7ad07e7c | 569 | void dccp_send_close(struct sock *sk, const int active) |
7c657876 ACM |
570 | { |
571 | struct dccp_sock *dp = dccp_sk(sk); | |
572 | struct sk_buff *skb; | |
7d877f3b | 573 | const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC; |
7c657876 | 574 | |
7ad07e7c ACM |
575 | skb = alloc_skb(sk->sk_prot->max_header, prio); |
576 | if (skb == NULL) | |
577 | return; | |
7c657876 ACM |
578 | |
579 | /* Reserve space for headers and prepare control bits. */ | |
580 | skb_reserve(skb, sk->sk_prot->max_header); | |
7690af3f ACM |
581 | DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ? |
582 | DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; | |
7c657876 | 583 | |
7ad07e7c | 584 | if (active) { |
97e5848d | 585 | dccp_write_xmit(sk, 1); |
48918a4d | 586 | dccp_skb_entail(sk, skb); |
7ad07e7c | 587 | dccp_transmit_skb(sk, skb_clone(skb, prio)); |
97e5848d | 588 | /* FIXME do we need a retransmit timer here? */ |
7ad07e7c ACM |
589 | } else |
590 | dccp_transmit_skb(sk, skb); | |
7c657876 | 591 | } |