mmc: sdhci-acpi: Set MMC_CAP_CMD_DURING_TFR for Intel eMMC controllers
[deliverable/linux.git] / net / netfilter / nf_conntrack_proto_tcp.c
1 /* (C) 1999-2001 Paul `Rusty' Russell
2 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
3 * (C) 2002-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4 * (C) 2006-2012 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/types.h>
12 #include <linux/timer.h>
13 #include <linux/module.h>
14 #include <linux/in.h>
15 #include <linux/tcp.h>
16 #include <linux/spinlock.h>
17 #include <linux/skbuff.h>
18 #include <linux/ipv6.h>
19 #include <net/ip6_checksum.h>
20 #include <asm/unaligned.h>
21
22 #include <net/tcp.h>
23
24 #include <linux/netfilter.h>
25 #include <linux/netfilter_ipv4.h>
26 #include <linux/netfilter_ipv6.h>
27 #include <net/netfilter/nf_conntrack.h>
28 #include <net/netfilter/nf_conntrack_l4proto.h>
29 #include <net/netfilter/nf_conntrack_ecache.h>
30 #include <net/netfilter/nf_conntrack_seqadj.h>
31 #include <net/netfilter/nf_conntrack_synproxy.h>
32 #include <net/netfilter/nf_log.h>
33 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
34 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
35
36 /* "Be conservative in what you do,
37 be liberal in what you accept from others."
38 If it's non-zero, we mark only out of window RST segments as INVALID. */
39 static int nf_ct_tcp_be_liberal __read_mostly = 0;
40
41 /* If it is set to zero, we disable picking up already established
42 connections. */
43 static int nf_ct_tcp_loose __read_mostly = 1;
44
45 /* Max number of the retransmitted packets without receiving an (acceptable)
46 ACK from the destination. If this number is reached, a shorter timer
47 will be started. */
48 static int nf_ct_tcp_max_retrans __read_mostly = 3;
49
50 /* FIXME: Examine ipfilter's timeouts and conntrack transitions more
51 closely. They're more complex. --RR */
52
53 static const char *const tcp_conntrack_names[] = {
54 "NONE",
55 "SYN_SENT",
56 "SYN_RECV",
57 "ESTABLISHED",
58 "FIN_WAIT",
59 "CLOSE_WAIT",
60 "LAST_ACK",
61 "TIME_WAIT",
62 "CLOSE",
63 "SYN_SENT2",
64 };
65
66 #define SECS * HZ
67 #define MINS * 60 SECS
68 #define HOURS * 60 MINS
69 #define DAYS * 24 HOURS
70
71 static unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] __read_mostly = {
72 [TCP_CONNTRACK_SYN_SENT] = 2 MINS,
73 [TCP_CONNTRACK_SYN_RECV] = 60 SECS,
74 [TCP_CONNTRACK_ESTABLISHED] = 5 DAYS,
75 [TCP_CONNTRACK_FIN_WAIT] = 2 MINS,
76 [TCP_CONNTRACK_CLOSE_WAIT] = 60 SECS,
77 [TCP_CONNTRACK_LAST_ACK] = 30 SECS,
78 [TCP_CONNTRACK_TIME_WAIT] = 2 MINS,
79 [TCP_CONNTRACK_CLOSE] = 10 SECS,
80 [TCP_CONNTRACK_SYN_SENT2] = 2 MINS,
81 /* RFC1122 says the R2 limit should be at least 100 seconds.
82 Linux uses 15 packets as limit, which corresponds
83 to ~13-30min depending on RTO. */
84 [TCP_CONNTRACK_RETRANS] = 5 MINS,
85 [TCP_CONNTRACK_UNACK] = 5 MINS,
86 };
87
88 #define sNO TCP_CONNTRACK_NONE
89 #define sSS TCP_CONNTRACK_SYN_SENT
90 #define sSR TCP_CONNTRACK_SYN_RECV
91 #define sES TCP_CONNTRACK_ESTABLISHED
92 #define sFW TCP_CONNTRACK_FIN_WAIT
93 #define sCW TCP_CONNTRACK_CLOSE_WAIT
94 #define sLA TCP_CONNTRACK_LAST_ACK
95 #define sTW TCP_CONNTRACK_TIME_WAIT
96 #define sCL TCP_CONNTRACK_CLOSE
97 #define sS2 TCP_CONNTRACK_SYN_SENT2
98 #define sIV TCP_CONNTRACK_MAX
99 #define sIG TCP_CONNTRACK_IGNORE
100
101 /* What TCP flags are set from RST/SYN/FIN/ACK. */
102 enum tcp_bit_set {
103 TCP_SYN_SET,
104 TCP_SYNACK_SET,
105 TCP_FIN_SET,
106 TCP_ACK_SET,
107 TCP_RST_SET,
108 TCP_NONE_SET,
109 };
110
111 /*
112 * The TCP state transition table needs a few words...
113 *
114 * We are the man in the middle. All the packets go through us
115 * but might get lost in transit to the destination.
116 * It is assumed that the destinations can't receive segments
117 * we haven't seen.
118 *
119 * The checked segment is in window, but our windows are *not*
120 * equivalent with the ones of the sender/receiver. We always
121 * try to guess the state of the current sender.
122 *
123 * The meaning of the states are:
124 *
125 * NONE: initial state
126 * SYN_SENT: SYN-only packet seen
127 * SYN_SENT2: SYN-only packet seen from reply dir, simultaneous open
128 * SYN_RECV: SYN-ACK packet seen
129 * ESTABLISHED: ACK packet seen
130 * FIN_WAIT: FIN packet seen
131 * CLOSE_WAIT: ACK seen (after FIN)
132 * LAST_ACK: FIN seen (after FIN)
133 * TIME_WAIT: last ACK seen
134 * CLOSE: closed connection (RST)
135 *
136 * Packets marked as IGNORED (sIG):
137 * if they may be either invalid or valid
138 * and the receiver may send back a connection
139 * closing RST or a SYN/ACK.
140 *
141 * Packets marked as INVALID (sIV):
142 * if we regard them as truly invalid packets
143 */
144 static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
145 {
146 /* ORIGINAL */
147 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
148 /*syn*/ { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sS2 },
149 /*
150 * sNO -> sSS Initialize a new connection
151 * sSS -> sSS Retransmitted SYN
152 * sS2 -> sS2 Late retransmitted SYN
153 * sSR -> sIG
154 * sES -> sIG Error: SYNs in window outside the SYN_SENT state
155 * are errors. Receiver will reply with RST
156 * and close the connection.
157 * Or we are not in sync and hold a dead connection.
158 * sFW -> sIG
159 * sCW -> sIG
160 * sLA -> sIG
161 * sTW -> sSS Reopened connection (RFC 1122).
162 * sCL -> sSS
163 */
164 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
165 /*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
166 /*
167 * sNO -> sIV Too late and no reason to do anything
168 * sSS -> sIV Client can't send SYN and then SYN/ACK
169 * sS2 -> sSR SYN/ACK sent to SYN2 in simultaneous open
170 * sSR -> sSR Late retransmitted SYN/ACK in simultaneous open
171 * sES -> sIV Invalid SYN/ACK packets sent by the client
172 * sFW -> sIV
173 * sCW -> sIV
174 * sLA -> sIV
175 * sTW -> sIV
176 * sCL -> sIV
177 */
178 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
179 /*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
180 /*
181 * sNO -> sIV Too late and no reason to do anything...
182 * sSS -> sIV Client migth not send FIN in this state:
183 * we enforce waiting for a SYN/ACK reply first.
184 * sS2 -> sIV
185 * sSR -> sFW Close started.
186 * sES -> sFW
187 * sFW -> sLA FIN seen in both directions, waiting for
188 * the last ACK.
189 * Migth be a retransmitted FIN as well...
190 * sCW -> sLA
191 * sLA -> sLA Retransmitted FIN. Remain in the same state.
192 * sTW -> sTW
193 * sCL -> sCL
194 */
195 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
196 /*ack*/ { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
197 /*
198 * sNO -> sES Assumed.
199 * sSS -> sIV ACK is invalid: we haven't seen a SYN/ACK yet.
200 * sS2 -> sIV
201 * sSR -> sES Established state is reached.
202 * sES -> sES :-)
203 * sFW -> sCW Normal close request answered by ACK.
204 * sCW -> sCW
205 * sLA -> sTW Last ACK detected (RFC5961 challenged)
206 * sTW -> sTW Retransmitted last ACK. Remain in the same state.
207 * sCL -> sCL
208 */
209 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
210 /*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
211 /*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
212 },
213 {
214 /* REPLY */
215 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
216 /*syn*/ { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sSS, sIV, sS2 },
217 /*
218 * sNO -> sIV Never reached.
219 * sSS -> sS2 Simultaneous open
220 * sS2 -> sS2 Retransmitted simultaneous SYN
221 * sSR -> sIV Invalid SYN packets sent by the server
222 * sES -> sIV
223 * sFW -> sIV
224 * sCW -> sIV
225 * sLA -> sIV
226 * sTW -> sSS Reopened connection, but server may have switched role
227 * sCL -> sIV
228 */
229 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
230 /*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
231 /*
232 * sSS -> sSR Standard open.
233 * sS2 -> sSR Simultaneous open
234 * sSR -> sIG Retransmitted SYN/ACK, ignore it.
235 * sES -> sIG Late retransmitted SYN/ACK?
236 * sFW -> sIG Might be SYN/ACK answering ignored SYN
237 * sCW -> sIG
238 * sLA -> sIG
239 * sTW -> sIG
240 * sCL -> sIG
241 */
242 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
243 /*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
244 /*
245 * sSS -> sIV Server might not send FIN in this state.
246 * sS2 -> sIV
247 * sSR -> sFW Close started.
248 * sES -> sFW
249 * sFW -> sLA FIN seen in both directions.
250 * sCW -> sLA
251 * sLA -> sLA Retransmitted FIN.
252 * sTW -> sTW
253 * sCL -> sCL
254 */
255 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
256 /*ack*/ { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIG },
257 /*
258 * sSS -> sIG Might be a half-open connection.
259 * sS2 -> sIG
260 * sSR -> sSR Might answer late resent SYN.
261 * sES -> sES :-)
262 * sFW -> sCW Normal close request answered by ACK.
263 * sCW -> sCW
264 * sLA -> sTW Last ACK detected (RFC5961 challenged)
265 * sTW -> sTW Retransmitted last ACK.
266 * sCL -> sCL
267 */
268 /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
269 /*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
270 /*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
271 }
272 };
273
274 static inline struct nf_tcp_net *tcp_pernet(struct net *net)
275 {
276 return &net->ct.nf_ct_proto.tcp;
277 }
278
279 static bool tcp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
280 struct net *net, struct nf_conntrack_tuple *tuple)
281 {
282 const struct tcphdr *hp;
283 struct tcphdr _hdr;
284
285 /* Actually only need first 8 bytes. */
286 hp = skb_header_pointer(skb, dataoff, 8, &_hdr);
287 if (hp == NULL)
288 return false;
289
290 tuple->src.u.tcp.port = hp->source;
291 tuple->dst.u.tcp.port = hp->dest;
292
293 return true;
294 }
295
296 static bool tcp_invert_tuple(struct nf_conntrack_tuple *tuple,
297 const struct nf_conntrack_tuple *orig)
298 {
299 tuple->src.u.tcp.port = orig->dst.u.tcp.port;
300 tuple->dst.u.tcp.port = orig->src.u.tcp.port;
301 return true;
302 }
303
304 /* Print out the per-protocol part of the tuple. */
305 static void tcp_print_tuple(struct seq_file *s,
306 const struct nf_conntrack_tuple *tuple)
307 {
308 seq_printf(s, "sport=%hu dport=%hu ",
309 ntohs(tuple->src.u.tcp.port),
310 ntohs(tuple->dst.u.tcp.port));
311 }
312
313 /* Print out the private part of the conntrack. */
314 static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
315 {
316 seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]);
317 }
318
319 static unsigned int get_conntrack_index(const struct tcphdr *tcph)
320 {
321 if (tcph->rst) return TCP_RST_SET;
322 else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
323 else if (tcph->fin) return TCP_FIN_SET;
324 else if (tcph->ack) return TCP_ACK_SET;
325 else return TCP_NONE_SET;
326 }
327
328 /* TCP connection tracking based on 'Real Stateful TCP Packet Filtering
329 in IP Filter' by Guido van Rooij.
330
331 http://www.sane.nl/events/sane2000/papers.html
332 http://www.darkart.com/mirrors/www.obfuscation.org/ipf/
333
334 The boundaries and the conditions are changed according to RFC793:
335 the packet must intersect the window (i.e. segments may be
336 after the right or before the left edge) and thus receivers may ACK
337 segments after the right edge of the window.
338
339 td_maxend = max(sack + max(win,1)) seen in reply packets
340 td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets
341 td_maxwin += seq + len - sender.td_maxend
342 if seq + len > sender.td_maxend
343 td_end = max(seq + len) seen in sent packets
344
345 I. Upper bound for valid data: seq <= sender.td_maxend
346 II. Lower bound for valid data: seq + len >= sender.td_end - receiver.td_maxwin
347 III. Upper bound for valid (s)ack: sack <= receiver.td_end
348 IV. Lower bound for valid (s)ack: sack >= receiver.td_end - MAXACKWINDOW
349
350 where sack is the highest right edge of sack block found in the packet
351 or ack in the case of packet without SACK option.
352
353 The upper bound limit for a valid (s)ack is not ignored -
354 we doesn't have to deal with fragments.
355 */
356
357 static inline __u32 segment_seq_plus_len(__u32 seq,
358 size_t len,
359 unsigned int dataoff,
360 const struct tcphdr *tcph)
361 {
362 /* XXX Should I use payload length field in IP/IPv6 header ?
363 * - YK */
364 return (seq + len - dataoff - tcph->doff*4
365 + (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0));
366 }
367
368 /* Fixme: what about big packets? */
369 #define MAXACKWINCONST 66000
370 #define MAXACKWINDOW(sender) \
371 ((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin \
372 : MAXACKWINCONST)
373
374 /*
375 * Simplified tcp_parse_options routine from tcp_input.c
376 */
377 static void tcp_options(const struct sk_buff *skb,
378 unsigned int dataoff,
379 const struct tcphdr *tcph,
380 struct ip_ct_tcp_state *state)
381 {
382 unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
383 const unsigned char *ptr;
384 int length = (tcph->doff*4) - sizeof(struct tcphdr);
385
386 if (!length)
387 return;
388
389 ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
390 length, buff);
391 BUG_ON(ptr == NULL);
392
393 state->td_scale =
394 state->flags = 0;
395
396 while (length > 0) {
397 int opcode=*ptr++;
398 int opsize;
399
400 switch (opcode) {
401 case TCPOPT_EOL:
402 return;
403 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
404 length--;
405 continue;
406 default:
407 if (length < 2)
408 return;
409 opsize=*ptr++;
410 if (opsize < 2) /* "silly options" */
411 return;
412 if (opsize > length)
413 return; /* don't parse partial options */
414
415 if (opcode == TCPOPT_SACK_PERM
416 && opsize == TCPOLEN_SACK_PERM)
417 state->flags |= IP_CT_TCP_FLAG_SACK_PERM;
418 else if (opcode == TCPOPT_WINDOW
419 && opsize == TCPOLEN_WINDOW) {
420 state->td_scale = *(u_int8_t *)ptr;
421
422 if (state->td_scale > 14) {
423 /* See RFC1323 */
424 state->td_scale = 14;
425 }
426 state->flags |=
427 IP_CT_TCP_FLAG_WINDOW_SCALE;
428 }
429 ptr += opsize - 2;
430 length -= opsize;
431 }
432 }
433 }
434
435 static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
436 const struct tcphdr *tcph, __u32 *sack)
437 {
438 unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
439 const unsigned char *ptr;
440 int length = (tcph->doff*4) - sizeof(struct tcphdr);
441 __u32 tmp;
442
443 if (!length)
444 return;
445
446 ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
447 length, buff);
448 BUG_ON(ptr == NULL);
449
450 /* Fast path for timestamp-only option */
451 if (length == TCPOLEN_TSTAMP_ALIGNED
452 && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
453 | (TCPOPT_NOP << 16)
454 | (TCPOPT_TIMESTAMP << 8)
455 | TCPOLEN_TIMESTAMP))
456 return;
457
458 while (length > 0) {
459 int opcode = *ptr++;
460 int opsize, i;
461
462 switch (opcode) {
463 case TCPOPT_EOL:
464 return;
465 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
466 length--;
467 continue;
468 default:
469 if (length < 2)
470 return;
471 opsize = *ptr++;
472 if (opsize < 2) /* "silly options" */
473 return;
474 if (opsize > length)
475 return; /* don't parse partial options */
476
477 if (opcode == TCPOPT_SACK
478 && opsize >= (TCPOLEN_SACK_BASE
479 + TCPOLEN_SACK_PERBLOCK)
480 && !((opsize - TCPOLEN_SACK_BASE)
481 % TCPOLEN_SACK_PERBLOCK)) {
482 for (i = 0;
483 i < (opsize - TCPOLEN_SACK_BASE);
484 i += TCPOLEN_SACK_PERBLOCK) {
485 tmp = get_unaligned_be32((__be32 *)(ptr+i)+1);
486
487 if (after(tmp, *sack))
488 *sack = tmp;
489 }
490 return;
491 }
492 ptr += opsize - 2;
493 length -= opsize;
494 }
495 }
496 }
497
498 static bool tcp_in_window(const struct nf_conn *ct,
499 struct ip_ct_tcp *state,
500 enum ip_conntrack_dir dir,
501 unsigned int index,
502 const struct sk_buff *skb,
503 unsigned int dataoff,
504 const struct tcphdr *tcph,
505 u_int8_t pf)
506 {
507 struct net *net = nf_ct_net(ct);
508 struct nf_tcp_net *tn = tcp_pernet(net);
509 struct ip_ct_tcp_state *sender = &state->seen[dir];
510 struct ip_ct_tcp_state *receiver = &state->seen[!dir];
511 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
512 __u32 seq, ack, sack, end, win, swin;
513 s32 receiver_offset;
514 bool res, in_recv_win;
515
516 /*
517 * Get the required data from the packet.
518 */
519 seq = ntohl(tcph->seq);
520 ack = sack = ntohl(tcph->ack_seq);
521 win = ntohs(tcph->window);
522 end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
523
524 if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
525 tcp_sack(skb, dataoff, tcph, &sack);
526
527 /* Take into account NAT sequence number mangling */
528 receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1);
529 ack -= receiver_offset;
530 sack -= receiver_offset;
531
532 pr_debug("tcp_in_window: START\n");
533 pr_debug("tcp_in_window: ");
534 nf_ct_dump_tuple(tuple);
535 pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
536 seq, ack, receiver_offset, sack, receiver_offset, win, end);
537 pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
538 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
539 sender->td_end, sender->td_maxend, sender->td_maxwin,
540 sender->td_scale,
541 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
542 receiver->td_scale);
543
544 if (sender->td_maxwin == 0) {
545 /*
546 * Initialize sender data.
547 */
548 if (tcph->syn) {
549 /*
550 * SYN-ACK in reply to a SYN
551 * or SYN from reply direction in simultaneous open.
552 */
553 sender->td_end =
554 sender->td_maxend = end;
555 sender->td_maxwin = (win == 0 ? 1 : win);
556
557 tcp_options(skb, dataoff, tcph, sender);
558 /*
559 * RFC 1323:
560 * Both sides must send the Window Scale option
561 * to enable window scaling in either direction.
562 */
563 if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE
564 && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE))
565 sender->td_scale =
566 receiver->td_scale = 0;
567 if (!tcph->ack)
568 /* Simultaneous open */
569 return true;
570 } else {
571 /*
572 * We are in the middle of a connection,
573 * its history is lost for us.
574 * Let's try to use the data from the packet.
575 */
576 sender->td_end = end;
577 swin = win << sender->td_scale;
578 sender->td_maxwin = (swin == 0 ? 1 : swin);
579 sender->td_maxend = end + sender->td_maxwin;
580 /*
581 * We haven't seen traffic in the other direction yet
582 * but we have to tweak window tracking to pass III
583 * and IV until that happens.
584 */
585 if (receiver->td_maxwin == 0)
586 receiver->td_end = receiver->td_maxend = sack;
587 }
588 } else if (((state->state == TCP_CONNTRACK_SYN_SENT
589 && dir == IP_CT_DIR_ORIGINAL)
590 || (state->state == TCP_CONNTRACK_SYN_RECV
591 && dir == IP_CT_DIR_REPLY))
592 && after(end, sender->td_end)) {
593 /*
594 * RFC 793: "if a TCP is reinitialized ... then it need
595 * not wait at all; it must only be sure to use sequence
596 * numbers larger than those recently used."
597 */
598 sender->td_end =
599 sender->td_maxend = end;
600 sender->td_maxwin = (win == 0 ? 1 : win);
601
602 tcp_options(skb, dataoff, tcph, sender);
603 }
604
605 if (!(tcph->ack)) {
606 /*
607 * If there is no ACK, just pretend it was set and OK.
608 */
609 ack = sack = receiver->td_end;
610 } else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) ==
611 (TCP_FLAG_ACK|TCP_FLAG_RST))
612 && (ack == 0)) {
613 /*
614 * Broken TCP stacks, that set ACK in RST packets as well
615 * with zero ack value.
616 */
617 ack = sack = receiver->td_end;
618 }
619
620 if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)
621 /*
622 * RST sent answering SYN.
623 */
624 seq = end = sender->td_end;
625
626 pr_debug("tcp_in_window: ");
627 nf_ct_dump_tuple(tuple);
628 pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
629 seq, ack, receiver_offset, sack, receiver_offset, win, end);
630 pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
631 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
632 sender->td_end, sender->td_maxend, sender->td_maxwin,
633 sender->td_scale,
634 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
635 receiver->td_scale);
636
637 /* Is the ending sequence in the receive window (if available)? */
638 in_recv_win = !receiver->td_maxwin ||
639 after(end, sender->td_end - receiver->td_maxwin - 1);
640
641 pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
642 before(seq, sender->td_maxend + 1),
643 (in_recv_win ? 1 : 0),
644 before(sack, receiver->td_end + 1),
645 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
646
647 if (before(seq, sender->td_maxend + 1) &&
648 in_recv_win &&
649 before(sack, receiver->td_end + 1) &&
650 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
651 /*
652 * Take into account window scaling (RFC 1323).
653 */
654 if (!tcph->syn)
655 win <<= sender->td_scale;
656
657 /*
658 * Update sender data.
659 */
660 swin = win + (sack - ack);
661 if (sender->td_maxwin < swin)
662 sender->td_maxwin = swin;
663 if (after(end, sender->td_end)) {
664 sender->td_end = end;
665 sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
666 }
667 if (tcph->ack) {
668 if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) {
669 sender->td_maxack = ack;
670 sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET;
671 } else if (after(ack, sender->td_maxack))
672 sender->td_maxack = ack;
673 }
674
675 /*
676 * Update receiver data.
677 */
678 if (receiver->td_maxwin != 0 && after(end, sender->td_maxend))
679 receiver->td_maxwin += end - sender->td_maxend;
680 if (after(sack + win, receiver->td_maxend - 1)) {
681 receiver->td_maxend = sack + win;
682 if (win == 0)
683 receiver->td_maxend++;
684 }
685 if (ack == receiver->td_end)
686 receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
687
688 /*
689 * Check retransmissions.
690 */
691 if (index == TCP_ACK_SET) {
692 if (state->last_dir == dir
693 && state->last_seq == seq
694 && state->last_ack == ack
695 && state->last_end == end
696 && state->last_win == win)
697 state->retrans++;
698 else {
699 state->last_dir = dir;
700 state->last_seq = seq;
701 state->last_ack = ack;
702 state->last_end = end;
703 state->last_win = win;
704 state->retrans = 0;
705 }
706 }
707 res = true;
708 } else {
709 res = false;
710 if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
711 tn->tcp_be_liberal)
712 res = true;
713 if (!res && LOG_INVALID(net, IPPROTO_TCP))
714 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
715 "nf_ct_tcp: %s ",
716 before(seq, sender->td_maxend + 1) ?
717 in_recv_win ?
718 before(sack, receiver->td_end + 1) ?
719 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
720 : "ACK is under the lower bound (possible overly delayed ACK)"
721 : "ACK is over the upper bound (ACKed data not seen yet)"
722 : "SEQ is under the lower bound (already ACKed data retransmitted)"
723 : "SEQ is over the upper bound (over the window of the receiver)");
724 }
725
726 pr_debug("tcp_in_window: res=%u sender end=%u maxend=%u maxwin=%u "
727 "receiver end=%u maxend=%u maxwin=%u\n",
728 res, sender->td_end, sender->td_maxend, sender->td_maxwin,
729 receiver->td_end, receiver->td_maxend, receiver->td_maxwin);
730
731 return res;
732 }
733
734 /* table of valid flag combinations - PUSH, ECE and CWR are always valid */
735 static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
736 TCPHDR_URG) + 1] =
737 {
738 [TCPHDR_SYN] = 1,
739 [TCPHDR_SYN|TCPHDR_URG] = 1,
740 [TCPHDR_SYN|TCPHDR_ACK] = 1,
741 [TCPHDR_RST] = 1,
742 [TCPHDR_RST|TCPHDR_ACK] = 1,
743 [TCPHDR_FIN|TCPHDR_ACK] = 1,
744 [TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG] = 1,
745 [TCPHDR_ACK] = 1,
746 [TCPHDR_ACK|TCPHDR_URG] = 1,
747 };
748
749 /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
750 static int tcp_error(struct net *net, struct nf_conn *tmpl,
751 struct sk_buff *skb,
752 unsigned int dataoff,
753 enum ip_conntrack_info *ctinfo,
754 u_int8_t pf,
755 unsigned int hooknum)
756 {
757 const struct tcphdr *th;
758 struct tcphdr _tcph;
759 unsigned int tcplen = skb->len - dataoff;
760 u_int8_t tcpflags;
761
762 /* Smaller that minimal TCP header? */
763 th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
764 if (th == NULL) {
765 if (LOG_INVALID(net, IPPROTO_TCP))
766 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
767 "nf_ct_tcp: short packet ");
768 return -NF_ACCEPT;
769 }
770
771 /* Not whole TCP header or malformed packet */
772 if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
773 if (LOG_INVALID(net, IPPROTO_TCP))
774 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
775 "nf_ct_tcp: truncated/malformed packet ");
776 return -NF_ACCEPT;
777 }
778
779 /* Checksum invalid? Ignore.
780 * We skip checking packets on the outgoing path
781 * because the checksum is assumed to be correct.
782 */
783 /* FIXME: Source route IP option packets --RR */
784 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
785 nf_checksum(skb, hooknum, dataoff, IPPROTO_TCP, pf)) {
786 if (LOG_INVALID(net, IPPROTO_TCP))
787 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
788 "nf_ct_tcp: bad TCP checksum ");
789 return -NF_ACCEPT;
790 }
791
792 /* Check TCP flags. */
793 tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
794 if (!tcp_valid_flags[tcpflags]) {
795 if (LOG_INVALID(net, IPPROTO_TCP))
796 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
797 "nf_ct_tcp: invalid TCP flag combination ");
798 return -NF_ACCEPT;
799 }
800
801 return NF_ACCEPT;
802 }
803
804 static unsigned int *tcp_get_timeouts(struct net *net)
805 {
806 return tcp_pernet(net)->timeouts;
807 }
808
809 /* Returns verdict for packet, or -1 for invalid. */
810 static int tcp_packet(struct nf_conn *ct,
811 const struct sk_buff *skb,
812 unsigned int dataoff,
813 enum ip_conntrack_info ctinfo,
814 u_int8_t pf,
815 unsigned int hooknum,
816 unsigned int *timeouts)
817 {
818 struct net *net = nf_ct_net(ct);
819 struct nf_tcp_net *tn = tcp_pernet(net);
820 struct nf_conntrack_tuple *tuple;
821 enum tcp_conntrack new_state, old_state;
822 enum ip_conntrack_dir dir;
823 const struct tcphdr *th;
824 struct tcphdr _tcph;
825 unsigned long timeout;
826 unsigned int index;
827
828 th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
829 BUG_ON(th == NULL);
830
831 spin_lock_bh(&ct->lock);
832 old_state = ct->proto.tcp.state;
833 dir = CTINFO2DIR(ctinfo);
834 index = get_conntrack_index(th);
835 new_state = tcp_conntracks[dir][index][old_state];
836 tuple = &ct->tuplehash[dir].tuple;
837
838 switch (new_state) {
839 case TCP_CONNTRACK_SYN_SENT:
840 if (old_state < TCP_CONNTRACK_TIME_WAIT)
841 break;
842 /* RFC 1122: "When a connection is closed actively,
843 * it MUST linger in TIME-WAIT state for a time 2xMSL
844 * (Maximum Segment Lifetime). However, it MAY accept
845 * a new SYN from the remote TCP to reopen the connection
846 * directly from TIME-WAIT state, if..."
847 * We ignore the conditions because we are in the
848 * TIME-WAIT state anyway.
849 *
850 * Handle aborted connections: we and the server
851 * think there is an existing connection but the client
852 * aborts it and starts a new one.
853 */
854 if (((ct->proto.tcp.seen[dir].flags
855 | ct->proto.tcp.seen[!dir].flags)
856 & IP_CT_TCP_FLAG_CLOSE_INIT)
857 || (ct->proto.tcp.last_dir == dir
858 && ct->proto.tcp.last_index == TCP_RST_SET)) {
859 /* Attempt to reopen a closed/aborted connection.
860 * Delete this connection and look up again. */
861 spin_unlock_bh(&ct->lock);
862
863 /* Only repeat if we can actually remove the timer.
864 * Destruction may already be in progress in process
865 * context and we must give it a chance to terminate.
866 */
867 if (nf_ct_kill(ct))
868 return -NF_REPEAT;
869 return NF_DROP;
870 }
871 /* Fall through */
872 case TCP_CONNTRACK_IGNORE:
873 /* Ignored packets:
874 *
875 * Our connection entry may be out of sync, so ignore
876 * packets which may signal the real connection between
877 * the client and the server.
878 *
879 * a) SYN in ORIGINAL
880 * b) SYN/ACK in REPLY
881 * c) ACK in reply direction after initial SYN in original.
882 *
883 * If the ignored packet is invalid, the receiver will send
884 * a RST we'll catch below.
885 */
886 if (index == TCP_SYNACK_SET
887 && ct->proto.tcp.last_index == TCP_SYN_SET
888 && ct->proto.tcp.last_dir != dir
889 && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
890 /* b) This SYN/ACK acknowledges a SYN that we earlier
891 * ignored as invalid. This means that the client and
892 * the server are both in sync, while the firewall is
893 * not. We get in sync from the previously annotated
894 * values.
895 */
896 old_state = TCP_CONNTRACK_SYN_SENT;
897 new_state = TCP_CONNTRACK_SYN_RECV;
898 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_end =
899 ct->proto.tcp.last_end;
900 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxend =
901 ct->proto.tcp.last_end;
902 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxwin =
903 ct->proto.tcp.last_win == 0 ?
904 1 : ct->proto.tcp.last_win;
905 ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale =
906 ct->proto.tcp.last_wscale;
907 ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
908 ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
909 ct->proto.tcp.last_flags;
910 memset(&ct->proto.tcp.seen[dir], 0,
911 sizeof(struct ip_ct_tcp_state));
912 break;
913 }
914 ct->proto.tcp.last_index = index;
915 ct->proto.tcp.last_dir = dir;
916 ct->proto.tcp.last_seq = ntohl(th->seq);
917 ct->proto.tcp.last_end =
918 segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th);
919 ct->proto.tcp.last_win = ntohs(th->window);
920
921 /* a) This is a SYN in ORIGINAL. The client and the server
922 * may be in sync but we are not. In that case, we annotate
923 * the TCP options and let the packet go through. If it is a
924 * valid SYN packet, the server will reply with a SYN/ACK, and
925 * then we'll get in sync. Otherwise, the server potentially
926 * responds with a challenge ACK if implementing RFC5961.
927 */
928 if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) {
929 struct ip_ct_tcp_state seen = {};
930
931 ct->proto.tcp.last_flags =
932 ct->proto.tcp.last_wscale = 0;
933 tcp_options(skb, dataoff, th, &seen);
934 if (seen.flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
935 ct->proto.tcp.last_flags |=
936 IP_CT_TCP_FLAG_WINDOW_SCALE;
937 ct->proto.tcp.last_wscale = seen.td_scale;
938 }
939 if (seen.flags & IP_CT_TCP_FLAG_SACK_PERM) {
940 ct->proto.tcp.last_flags |=
941 IP_CT_TCP_FLAG_SACK_PERM;
942 }
943 /* Mark the potential for RFC5961 challenge ACK,
944 * this pose a special problem for LAST_ACK state
945 * as ACK is intrepretated as ACKing last FIN.
946 */
947 if (old_state == TCP_CONNTRACK_LAST_ACK)
948 ct->proto.tcp.last_flags |=
949 IP_CT_EXP_CHALLENGE_ACK;
950 }
951 spin_unlock_bh(&ct->lock);
952 if (LOG_INVALID(net, IPPROTO_TCP))
953 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
954 "nf_ct_tcp: invalid packet ignored in "
955 "state %s ", tcp_conntrack_names[old_state]);
956 return NF_ACCEPT;
957 case TCP_CONNTRACK_MAX:
958 /* Special case for SYN proxy: when the SYN to the server or
959 * the SYN/ACK from the server is lost, the client may transmit
960 * a keep-alive packet while in SYN_SENT state. This needs to
961 * be associated with the original conntrack entry in order to
962 * generate a new SYN with the correct sequence number.
963 */
964 if (nfct_synproxy(ct) && old_state == TCP_CONNTRACK_SYN_SENT &&
965 index == TCP_ACK_SET && dir == IP_CT_DIR_ORIGINAL &&
966 ct->proto.tcp.last_dir == IP_CT_DIR_ORIGINAL &&
967 ct->proto.tcp.seen[dir].td_end - 1 == ntohl(th->seq)) {
968 pr_debug("nf_ct_tcp: SYN proxy client keep alive\n");
969 spin_unlock_bh(&ct->lock);
970 return NF_ACCEPT;
971 }
972
973 /* Invalid packet */
974 pr_debug("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n",
975 dir, get_conntrack_index(th), old_state);
976 spin_unlock_bh(&ct->lock);
977 if (LOG_INVALID(net, IPPROTO_TCP))
978 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
979 "nf_ct_tcp: invalid state ");
980 return -NF_ACCEPT;
981 case TCP_CONNTRACK_TIME_WAIT:
982 /* RFC5961 compliance cause stack to send "challenge-ACK"
983 * e.g. in response to spurious SYNs. Conntrack MUST
984 * not believe this ACK is acking last FIN.
985 */
986 if (old_state == TCP_CONNTRACK_LAST_ACK &&
987 index == TCP_ACK_SET &&
988 ct->proto.tcp.last_dir != dir &&
989 ct->proto.tcp.last_index == TCP_SYN_SET &&
990 (ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) {
991 /* Detected RFC5961 challenge ACK */
992 ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
993 spin_unlock_bh(&ct->lock);
994 if (LOG_INVALID(net, IPPROTO_TCP))
995 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
996 "nf_ct_tcp: challenge-ACK ignored ");
997 return NF_ACCEPT; /* Don't change state */
998 }
999 break;
1000 case TCP_CONNTRACK_CLOSE:
1001 if (index == TCP_RST_SET
1002 && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET)
1003 && before(ntohl(th->seq), ct->proto.tcp.seen[!dir].td_maxack)) {
1004 /* Invalid RST */
1005 spin_unlock_bh(&ct->lock);
1006 if (LOG_INVALID(net, IPPROTO_TCP))
1007 nf_log_packet(net, pf, 0, skb, NULL, NULL,
1008 NULL, "nf_ct_tcp: invalid RST ");
1009 return -NF_ACCEPT;
1010 }
1011 if (index == TCP_RST_SET
1012 && ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
1013 && ct->proto.tcp.last_index == TCP_SYN_SET)
1014 || (!test_bit(IPS_ASSURED_BIT, &ct->status)
1015 && ct->proto.tcp.last_index == TCP_ACK_SET))
1016 && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
1017 /* RST sent to invalid SYN or ACK we had let through
1018 * at a) and c) above:
1019 *
1020 * a) SYN was in window then
1021 * c) we hold a half-open connection.
1022 *
1023 * Delete our connection entry.
1024 * We skip window checking, because packet might ACK
1025 * segments we ignored. */
1026 goto in_window;
1027 }
1028 /* Just fall through */
1029 default:
1030 /* Keep compilers happy. */
1031 break;
1032 }
1033
1034 if (!tcp_in_window(ct, &ct->proto.tcp, dir, index,
1035 skb, dataoff, th, pf)) {
1036 spin_unlock_bh(&ct->lock);
1037 return -NF_ACCEPT;
1038 }
1039 in_window:
1040 /* From now on we have got in-window packets */
1041 ct->proto.tcp.last_index = index;
1042 ct->proto.tcp.last_dir = dir;
1043
1044 pr_debug("tcp_conntracks: ");
1045 nf_ct_dump_tuple(tuple);
1046 pr_debug("syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n",
1047 (th->syn ? 1 : 0), (th->ack ? 1 : 0),
1048 (th->fin ? 1 : 0), (th->rst ? 1 : 0),
1049 old_state, new_state);
1050
1051 ct->proto.tcp.state = new_state;
1052 if (old_state != new_state
1053 && new_state == TCP_CONNTRACK_FIN_WAIT)
1054 ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
1055
1056 if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
1057 timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1058 timeout = timeouts[TCP_CONNTRACK_RETRANS];
1059 else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
1060 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
1061 timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
1062 timeout = timeouts[TCP_CONNTRACK_UNACK];
1063 else
1064 timeout = timeouts[new_state];
1065 spin_unlock_bh(&ct->lock);
1066
1067 if (new_state != old_state)
1068 nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
1069
1070 if (!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1071 /* If only reply is a RST, we can consider ourselves not to
1072 have an established connection: this is a fairly common
1073 problem case, so we can delete the conntrack
1074 immediately. --RR */
1075 if (th->rst) {
1076 nf_ct_kill_acct(ct, ctinfo, skb);
1077 return NF_ACCEPT;
1078 }
1079 /* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
1080 * pickup with loose=1. Avoid large ESTABLISHED timeout.
1081 */
1082 if (new_state == TCP_CONNTRACK_ESTABLISHED &&
1083 timeout > timeouts[TCP_CONNTRACK_UNACK])
1084 timeout = timeouts[TCP_CONNTRACK_UNACK];
1085 } else if (!test_bit(IPS_ASSURED_BIT, &ct->status)
1086 && (old_state == TCP_CONNTRACK_SYN_RECV
1087 || old_state == TCP_CONNTRACK_ESTABLISHED)
1088 && new_state == TCP_CONNTRACK_ESTABLISHED) {
1089 /* Set ASSURED if we see see valid ack in ESTABLISHED
1090 after SYN_RECV or a valid answer for a picked up
1091 connection. */
1092 set_bit(IPS_ASSURED_BIT, &ct->status);
1093 nf_conntrack_event_cache(IPCT_ASSURED, ct);
1094 }
1095 nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
1096
1097 return NF_ACCEPT;
1098 }
1099
1100 /* Called when a new connection for this protocol found. */
1101 static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
1102 unsigned int dataoff, unsigned int *timeouts)
1103 {
1104 enum tcp_conntrack new_state;
1105 const struct tcphdr *th;
1106 struct tcphdr _tcph;
1107 struct net *net = nf_ct_net(ct);
1108 struct nf_tcp_net *tn = tcp_pernet(net);
1109 const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
1110 const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
1111
1112 th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
1113 BUG_ON(th == NULL);
1114
1115 /* Don't need lock here: this conntrack not in circulation yet */
1116 new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
1117
1118 /* Invalid: delete conntrack */
1119 if (new_state >= TCP_CONNTRACK_MAX) {
1120 pr_debug("nf_ct_tcp: invalid new deleting.\n");
1121 return false;
1122 }
1123
1124 if (new_state == TCP_CONNTRACK_SYN_SENT) {
1125 memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
1126 /* SYN packet */
1127 ct->proto.tcp.seen[0].td_end =
1128 segment_seq_plus_len(ntohl(th->seq), skb->len,
1129 dataoff, th);
1130 ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
1131 if (ct->proto.tcp.seen[0].td_maxwin == 0)
1132 ct->proto.tcp.seen[0].td_maxwin = 1;
1133 ct->proto.tcp.seen[0].td_maxend =
1134 ct->proto.tcp.seen[0].td_end;
1135
1136 tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
1137 } else if (tn->tcp_loose == 0) {
1138 /* Don't try to pick up connections. */
1139 return false;
1140 } else {
1141 memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
1142 /*
1143 * We are in the middle of a connection,
1144 * its history is lost for us.
1145 * Let's try to use the data from the packet.
1146 */
1147 ct->proto.tcp.seen[0].td_end =
1148 segment_seq_plus_len(ntohl(th->seq), skb->len,
1149 dataoff, th);
1150 ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
1151 if (ct->proto.tcp.seen[0].td_maxwin == 0)
1152 ct->proto.tcp.seen[0].td_maxwin = 1;
1153 ct->proto.tcp.seen[0].td_maxend =
1154 ct->proto.tcp.seen[0].td_end +
1155 ct->proto.tcp.seen[0].td_maxwin;
1156
1157 /* We assume SACK and liberal window checking to handle
1158 * window scaling */
1159 ct->proto.tcp.seen[0].flags =
1160 ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
1161 IP_CT_TCP_FLAG_BE_LIBERAL;
1162 }
1163
1164 /* tcp_packet will set them */
1165 ct->proto.tcp.last_index = TCP_NONE_SET;
1166
1167 pr_debug("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
1168 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
1169 sender->td_end, sender->td_maxend, sender->td_maxwin,
1170 sender->td_scale,
1171 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
1172 receiver->td_scale);
1173 return true;
1174 }
1175
1176 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1177
1178 #include <linux/netfilter/nfnetlink.h>
1179 #include <linux/netfilter/nfnetlink_conntrack.h>
1180
1181 static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
1182 struct nf_conn *ct)
1183 {
1184 struct nlattr *nest_parms;
1185 struct nf_ct_tcp_flags tmp = {};
1186
1187 spin_lock_bh(&ct->lock);
1188 nest_parms = nla_nest_start(skb, CTA_PROTOINFO_TCP | NLA_F_NESTED);
1189 if (!nest_parms)
1190 goto nla_put_failure;
1191
1192 if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state) ||
1193 nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
1194 ct->proto.tcp.seen[0].td_scale) ||
1195 nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
1196 ct->proto.tcp.seen[1].td_scale))
1197 goto nla_put_failure;
1198
1199 tmp.flags = ct->proto.tcp.seen[0].flags;
1200 if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
1201 sizeof(struct nf_ct_tcp_flags), &tmp))
1202 goto nla_put_failure;
1203
1204 tmp.flags = ct->proto.tcp.seen[1].flags;
1205 if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
1206 sizeof(struct nf_ct_tcp_flags), &tmp))
1207 goto nla_put_failure;
1208 spin_unlock_bh(&ct->lock);
1209
1210 nla_nest_end(skb, nest_parms);
1211
1212 return 0;
1213
1214 nla_put_failure:
1215 spin_unlock_bh(&ct->lock);
1216 return -1;
1217 }
1218
1219 static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
1220 [CTA_PROTOINFO_TCP_STATE] = { .type = NLA_U8 },
1221 [CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] = { .type = NLA_U8 },
1222 [CTA_PROTOINFO_TCP_WSCALE_REPLY] = { .type = NLA_U8 },
1223 [CTA_PROTOINFO_TCP_FLAGS_ORIGINAL] = { .len = sizeof(struct nf_ct_tcp_flags) },
1224 [CTA_PROTOINFO_TCP_FLAGS_REPLY] = { .len = sizeof(struct nf_ct_tcp_flags) },
1225 };
1226
1227 static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
1228 {
1229 struct nlattr *pattr = cda[CTA_PROTOINFO_TCP];
1230 struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1];
1231 int err;
1232
1233 /* updates could not contain anything about the private
1234 * protocol info, in that case skip the parsing */
1235 if (!pattr)
1236 return 0;
1237
1238 err = nla_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, pattr, tcp_nla_policy);
1239 if (err < 0)
1240 return err;
1241
1242 if (tb[CTA_PROTOINFO_TCP_STATE] &&
1243 nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX)
1244 return -EINVAL;
1245
1246 spin_lock_bh(&ct->lock);
1247 if (tb[CTA_PROTOINFO_TCP_STATE])
1248 ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]);
1249
1250 if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) {
1251 struct nf_ct_tcp_flags *attr =
1252 nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]);
1253 ct->proto.tcp.seen[0].flags &= ~attr->mask;
1254 ct->proto.tcp.seen[0].flags |= attr->flags & attr->mask;
1255 }
1256
1257 if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]) {
1258 struct nf_ct_tcp_flags *attr =
1259 nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]);
1260 ct->proto.tcp.seen[1].flags &= ~attr->mask;
1261 ct->proto.tcp.seen[1].flags |= attr->flags & attr->mask;
1262 }
1263
1264 if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] &&
1265 tb[CTA_PROTOINFO_TCP_WSCALE_REPLY] &&
1266 ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
1267 ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
1268 ct->proto.tcp.seen[0].td_scale =
1269 nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]);
1270 ct->proto.tcp.seen[1].td_scale =
1271 nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY]);
1272 }
1273 spin_unlock_bh(&ct->lock);
1274
1275 return 0;
1276 }
1277
1278 static int tcp_nlattr_size(void)
1279 {
1280 return nla_total_size(0) /* CTA_PROTOINFO_TCP */
1281 + nla_policy_len(tcp_nla_policy, CTA_PROTOINFO_TCP_MAX + 1);
1282 }
1283
1284 static int tcp_nlattr_tuple_size(void)
1285 {
1286 return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1287 }
1288 #endif
1289
1290 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
1291
1292 #include <linux/netfilter/nfnetlink.h>
1293 #include <linux/netfilter/nfnetlink_cttimeout.h>
1294
1295 static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
1296 struct net *net, void *data)
1297 {
1298 unsigned int *timeouts = data;
1299 struct nf_tcp_net *tn = tcp_pernet(net);
1300 int i;
1301
1302 /* set default TCP timeouts. */
1303 for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
1304 timeouts[i] = tn->timeouts[i];
1305
1306 if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
1307 timeouts[TCP_CONNTRACK_SYN_SENT] =
1308 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
1309 }
1310 if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
1311 timeouts[TCP_CONNTRACK_SYN_RECV] =
1312 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
1313 }
1314 if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) {
1315 timeouts[TCP_CONNTRACK_ESTABLISHED] =
1316 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ;
1317 }
1318 if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) {
1319 timeouts[TCP_CONNTRACK_FIN_WAIT] =
1320 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ;
1321 }
1322 if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) {
1323 timeouts[TCP_CONNTRACK_CLOSE_WAIT] =
1324 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ;
1325 }
1326 if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) {
1327 timeouts[TCP_CONNTRACK_LAST_ACK] =
1328 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ;
1329 }
1330 if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) {
1331 timeouts[TCP_CONNTRACK_TIME_WAIT] =
1332 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ;
1333 }
1334 if (tb[CTA_TIMEOUT_TCP_CLOSE]) {
1335 timeouts[TCP_CONNTRACK_CLOSE] =
1336 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ;
1337 }
1338 if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) {
1339 timeouts[TCP_CONNTRACK_SYN_SENT2] =
1340 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ;
1341 }
1342 if (tb[CTA_TIMEOUT_TCP_RETRANS]) {
1343 timeouts[TCP_CONNTRACK_RETRANS] =
1344 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ;
1345 }
1346 if (tb[CTA_TIMEOUT_TCP_UNACK]) {
1347 timeouts[TCP_CONNTRACK_UNACK] =
1348 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
1349 }
1350 return 0;
1351 }
1352
1353 static int
1354 tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
1355 {
1356 const unsigned int *timeouts = data;
1357
1358 if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
1359 htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) ||
1360 nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
1361 htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) ||
1362 nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
1363 htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) ||
1364 nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
1365 htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) ||
1366 nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
1367 htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) ||
1368 nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
1369 htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) ||
1370 nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
1371 htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) ||
1372 nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
1373 htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) ||
1374 nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
1375 htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) ||
1376 nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
1377 htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) ||
1378 nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
1379 htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)))
1380 goto nla_put_failure;
1381 return 0;
1382
1383 nla_put_failure:
1384 return -ENOSPC;
1385 }
1386
1387 static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
1388 [CTA_TIMEOUT_TCP_SYN_SENT] = { .type = NLA_U32 },
1389 [CTA_TIMEOUT_TCP_SYN_RECV] = { .type = NLA_U32 },
1390 [CTA_TIMEOUT_TCP_ESTABLISHED] = { .type = NLA_U32 },
1391 [CTA_TIMEOUT_TCP_FIN_WAIT] = { .type = NLA_U32 },
1392 [CTA_TIMEOUT_TCP_CLOSE_WAIT] = { .type = NLA_U32 },
1393 [CTA_TIMEOUT_TCP_LAST_ACK] = { .type = NLA_U32 },
1394 [CTA_TIMEOUT_TCP_TIME_WAIT] = { .type = NLA_U32 },
1395 [CTA_TIMEOUT_TCP_CLOSE] = { .type = NLA_U32 },
1396 [CTA_TIMEOUT_TCP_SYN_SENT2] = { .type = NLA_U32 },
1397 [CTA_TIMEOUT_TCP_RETRANS] = { .type = NLA_U32 },
1398 [CTA_TIMEOUT_TCP_UNACK] = { .type = NLA_U32 },
1399 };
1400 #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1401
1402 #ifdef CONFIG_SYSCTL
1403 static struct ctl_table tcp_sysctl_table[] = {
1404 {
1405 .procname = "nf_conntrack_tcp_timeout_syn_sent",
1406 .maxlen = sizeof(unsigned int),
1407 .mode = 0644,
1408 .proc_handler = proc_dointvec_jiffies,
1409 },
1410 {
1411 .procname = "nf_conntrack_tcp_timeout_syn_recv",
1412 .maxlen = sizeof(unsigned int),
1413 .mode = 0644,
1414 .proc_handler = proc_dointvec_jiffies,
1415 },
1416 {
1417 .procname = "nf_conntrack_tcp_timeout_established",
1418 .maxlen = sizeof(unsigned int),
1419 .mode = 0644,
1420 .proc_handler = proc_dointvec_jiffies,
1421 },
1422 {
1423 .procname = "nf_conntrack_tcp_timeout_fin_wait",
1424 .maxlen = sizeof(unsigned int),
1425 .mode = 0644,
1426 .proc_handler = proc_dointvec_jiffies,
1427 },
1428 {
1429 .procname = "nf_conntrack_tcp_timeout_close_wait",
1430 .maxlen = sizeof(unsigned int),
1431 .mode = 0644,
1432 .proc_handler = proc_dointvec_jiffies,
1433 },
1434 {
1435 .procname = "nf_conntrack_tcp_timeout_last_ack",
1436 .maxlen = sizeof(unsigned int),
1437 .mode = 0644,
1438 .proc_handler = proc_dointvec_jiffies,
1439 },
1440 {
1441 .procname = "nf_conntrack_tcp_timeout_time_wait",
1442 .maxlen = sizeof(unsigned int),
1443 .mode = 0644,
1444 .proc_handler = proc_dointvec_jiffies,
1445 },
1446 {
1447 .procname = "nf_conntrack_tcp_timeout_close",
1448 .maxlen = sizeof(unsigned int),
1449 .mode = 0644,
1450 .proc_handler = proc_dointvec_jiffies,
1451 },
1452 {
1453 .procname = "nf_conntrack_tcp_timeout_max_retrans",
1454 .maxlen = sizeof(unsigned int),
1455 .mode = 0644,
1456 .proc_handler = proc_dointvec_jiffies,
1457 },
1458 {
1459 .procname = "nf_conntrack_tcp_timeout_unacknowledged",
1460 .maxlen = sizeof(unsigned int),
1461 .mode = 0644,
1462 .proc_handler = proc_dointvec_jiffies,
1463 },
1464 {
1465 .procname = "nf_conntrack_tcp_loose",
1466 .maxlen = sizeof(unsigned int),
1467 .mode = 0644,
1468 .proc_handler = proc_dointvec,
1469 },
1470 {
1471 .procname = "nf_conntrack_tcp_be_liberal",
1472 .maxlen = sizeof(unsigned int),
1473 .mode = 0644,
1474 .proc_handler = proc_dointvec,
1475 },
1476 {
1477 .procname = "nf_conntrack_tcp_max_retrans",
1478 .maxlen = sizeof(unsigned int),
1479 .mode = 0644,
1480 .proc_handler = proc_dointvec,
1481 },
1482 { }
1483 };
1484
1485 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
1486 static struct ctl_table tcp_compat_sysctl_table[] = {
1487 {
1488 .procname = "ip_conntrack_tcp_timeout_syn_sent",
1489 .maxlen = sizeof(unsigned int),
1490 .mode = 0644,
1491 .proc_handler = proc_dointvec_jiffies,
1492 },
1493 {
1494 .procname = "ip_conntrack_tcp_timeout_syn_sent2",
1495 .maxlen = sizeof(unsigned int),
1496 .mode = 0644,
1497 .proc_handler = proc_dointvec_jiffies,
1498 },
1499 {
1500 .procname = "ip_conntrack_tcp_timeout_syn_recv",
1501 .maxlen = sizeof(unsigned int),
1502 .mode = 0644,
1503 .proc_handler = proc_dointvec_jiffies,
1504 },
1505 {
1506 .procname = "ip_conntrack_tcp_timeout_established",
1507 .maxlen = sizeof(unsigned int),
1508 .mode = 0644,
1509 .proc_handler = proc_dointvec_jiffies,
1510 },
1511 {
1512 .procname = "ip_conntrack_tcp_timeout_fin_wait",
1513 .maxlen = sizeof(unsigned int),
1514 .mode = 0644,
1515 .proc_handler = proc_dointvec_jiffies,
1516 },
1517 {
1518 .procname = "ip_conntrack_tcp_timeout_close_wait",
1519 .maxlen = sizeof(unsigned int),
1520 .mode = 0644,
1521 .proc_handler = proc_dointvec_jiffies,
1522 },
1523 {
1524 .procname = "ip_conntrack_tcp_timeout_last_ack",
1525 .maxlen = sizeof(unsigned int),
1526 .mode = 0644,
1527 .proc_handler = proc_dointvec_jiffies,
1528 },
1529 {
1530 .procname = "ip_conntrack_tcp_timeout_time_wait",
1531 .maxlen = sizeof(unsigned int),
1532 .mode = 0644,
1533 .proc_handler = proc_dointvec_jiffies,
1534 },
1535 {
1536 .procname = "ip_conntrack_tcp_timeout_close",
1537 .maxlen = sizeof(unsigned int),
1538 .mode = 0644,
1539 .proc_handler = proc_dointvec_jiffies,
1540 },
1541 {
1542 .procname = "ip_conntrack_tcp_timeout_max_retrans",
1543 .maxlen = sizeof(unsigned int),
1544 .mode = 0644,
1545 .proc_handler = proc_dointvec_jiffies,
1546 },
1547 {
1548 .procname = "ip_conntrack_tcp_loose",
1549 .maxlen = sizeof(unsigned int),
1550 .mode = 0644,
1551 .proc_handler = proc_dointvec,
1552 },
1553 {
1554 .procname = "ip_conntrack_tcp_be_liberal",
1555 .maxlen = sizeof(unsigned int),
1556 .mode = 0644,
1557 .proc_handler = proc_dointvec,
1558 },
1559 {
1560 .procname = "ip_conntrack_tcp_max_retrans",
1561 .maxlen = sizeof(unsigned int),
1562 .mode = 0644,
1563 .proc_handler = proc_dointvec,
1564 },
1565 { }
1566 };
1567 #endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
1568 #endif /* CONFIG_SYSCTL */
1569
1570 static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
1571 struct nf_tcp_net *tn)
1572 {
1573 #ifdef CONFIG_SYSCTL
1574 if (pn->ctl_table)
1575 return 0;
1576
1577 pn->ctl_table = kmemdup(tcp_sysctl_table,
1578 sizeof(tcp_sysctl_table),
1579 GFP_KERNEL);
1580 if (!pn->ctl_table)
1581 return -ENOMEM;
1582
1583 pn->ctl_table[0].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT];
1584 pn->ctl_table[1].data = &tn->timeouts[TCP_CONNTRACK_SYN_RECV];
1585 pn->ctl_table[2].data = &tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
1586 pn->ctl_table[3].data = &tn->timeouts[TCP_CONNTRACK_FIN_WAIT];
1587 pn->ctl_table[4].data = &tn->timeouts[TCP_CONNTRACK_CLOSE_WAIT];
1588 pn->ctl_table[5].data = &tn->timeouts[TCP_CONNTRACK_LAST_ACK];
1589 pn->ctl_table[6].data = &tn->timeouts[TCP_CONNTRACK_TIME_WAIT];
1590 pn->ctl_table[7].data = &tn->timeouts[TCP_CONNTRACK_CLOSE];
1591 pn->ctl_table[8].data = &tn->timeouts[TCP_CONNTRACK_RETRANS];
1592 pn->ctl_table[9].data = &tn->timeouts[TCP_CONNTRACK_UNACK];
1593 pn->ctl_table[10].data = &tn->tcp_loose;
1594 pn->ctl_table[11].data = &tn->tcp_be_liberal;
1595 pn->ctl_table[12].data = &tn->tcp_max_retrans;
1596 #endif
1597 return 0;
1598 }
1599
1600 static int tcp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
1601 struct nf_tcp_net *tn)
1602 {
1603 #ifdef CONFIG_SYSCTL
1604 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
1605 pn->ctl_compat_table = kmemdup(tcp_compat_sysctl_table,
1606 sizeof(tcp_compat_sysctl_table),
1607 GFP_KERNEL);
1608 if (!pn->ctl_compat_table)
1609 return -ENOMEM;
1610
1611 pn->ctl_compat_table[0].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT];
1612 pn->ctl_compat_table[1].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT2];
1613 pn->ctl_compat_table[2].data = &tn->timeouts[TCP_CONNTRACK_SYN_RECV];
1614 pn->ctl_compat_table[3].data = &tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
1615 pn->ctl_compat_table[4].data = &tn->timeouts[TCP_CONNTRACK_FIN_WAIT];
1616 pn->ctl_compat_table[5].data = &tn->timeouts[TCP_CONNTRACK_CLOSE_WAIT];
1617 pn->ctl_compat_table[6].data = &tn->timeouts[TCP_CONNTRACK_LAST_ACK];
1618 pn->ctl_compat_table[7].data = &tn->timeouts[TCP_CONNTRACK_TIME_WAIT];
1619 pn->ctl_compat_table[8].data = &tn->timeouts[TCP_CONNTRACK_CLOSE];
1620 pn->ctl_compat_table[9].data = &tn->timeouts[TCP_CONNTRACK_RETRANS];
1621 pn->ctl_compat_table[10].data = &tn->tcp_loose;
1622 pn->ctl_compat_table[11].data = &tn->tcp_be_liberal;
1623 pn->ctl_compat_table[12].data = &tn->tcp_max_retrans;
1624 #endif
1625 #endif
1626 return 0;
1627 }
1628
1629 static int tcp_init_net(struct net *net, u_int16_t proto)
1630 {
1631 int ret;
1632 struct nf_tcp_net *tn = tcp_pernet(net);
1633 struct nf_proto_net *pn = &tn->pn;
1634
1635 if (!pn->users) {
1636 int i;
1637
1638 for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
1639 tn->timeouts[i] = tcp_timeouts[i];
1640
1641 tn->tcp_loose = nf_ct_tcp_loose;
1642 tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
1643 tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
1644 }
1645
1646 if (proto == AF_INET) {
1647 ret = tcp_kmemdup_compat_sysctl_table(pn, tn);
1648 if (ret < 0)
1649 return ret;
1650
1651 ret = tcp_kmemdup_sysctl_table(pn, tn);
1652 if (ret < 0)
1653 nf_ct_kfree_compat_sysctl_table(pn);
1654 } else
1655 ret = tcp_kmemdup_sysctl_table(pn, tn);
1656
1657 return ret;
1658 }
1659
1660 static struct nf_proto_net *tcp_get_net_proto(struct net *net)
1661 {
1662 return &net->ct.nf_ct_proto.tcp.pn;
1663 }
1664
1665 struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
1666 {
1667 .l3proto = PF_INET,
1668 .l4proto = IPPROTO_TCP,
1669 .name = "tcp",
1670 .pkt_to_tuple = tcp_pkt_to_tuple,
1671 .invert_tuple = tcp_invert_tuple,
1672 .print_tuple = tcp_print_tuple,
1673 .print_conntrack = tcp_print_conntrack,
1674 .packet = tcp_packet,
1675 .get_timeouts = tcp_get_timeouts,
1676 .new = tcp_new,
1677 .error = tcp_error,
1678 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1679 .to_nlattr = tcp_to_nlattr,
1680 .nlattr_size = tcp_nlattr_size,
1681 .from_nlattr = nlattr_to_tcp,
1682 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
1683 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
1684 .nlattr_tuple_size = tcp_nlattr_tuple_size,
1685 .nla_policy = nf_ct_port_nla_policy,
1686 #endif
1687 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
1688 .ctnl_timeout = {
1689 .nlattr_to_obj = tcp_timeout_nlattr_to_obj,
1690 .obj_to_nlattr = tcp_timeout_obj_to_nlattr,
1691 .nlattr_max = CTA_TIMEOUT_TCP_MAX,
1692 .obj_size = sizeof(unsigned int) *
1693 TCP_CONNTRACK_TIMEOUT_MAX,
1694 .nla_policy = tcp_timeout_nla_policy,
1695 },
1696 #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1697 .init_net = tcp_init_net,
1698 .get_net_proto = tcp_get_net_proto,
1699 };
1700 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4);
1701
1702 struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
1703 {
1704 .l3proto = PF_INET6,
1705 .l4proto = IPPROTO_TCP,
1706 .name = "tcp",
1707 .pkt_to_tuple = tcp_pkt_to_tuple,
1708 .invert_tuple = tcp_invert_tuple,
1709 .print_tuple = tcp_print_tuple,
1710 .print_conntrack = tcp_print_conntrack,
1711 .packet = tcp_packet,
1712 .get_timeouts = tcp_get_timeouts,
1713 .new = tcp_new,
1714 .error = tcp_error,
1715 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1716 .to_nlattr = tcp_to_nlattr,
1717 .nlattr_size = tcp_nlattr_size,
1718 .from_nlattr = nlattr_to_tcp,
1719 .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
1720 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
1721 .nlattr_tuple_size = tcp_nlattr_tuple_size,
1722 .nla_policy = nf_ct_port_nla_policy,
1723 #endif
1724 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
1725 .ctnl_timeout = {
1726 .nlattr_to_obj = tcp_timeout_nlattr_to_obj,
1727 .obj_to_nlattr = tcp_timeout_obj_to_nlattr,
1728 .nlattr_max = CTA_TIMEOUT_TCP_MAX,
1729 .obj_size = sizeof(unsigned int) *
1730 TCP_CONNTRACK_TIMEOUT_MAX,
1731 .nla_policy = tcp_timeout_nla_policy,
1732 },
1733 #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
1734 .init_net = tcp_init_net,
1735 .get_net_proto = tcp_get_net_proto,
1736 };
1737 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp6);
This page took 0.101772 seconds and 5 git commands to generate.