46b9baa845a66e5de4da09a0a36f37428e1c1786
[deliverable/linux.git] / net / netfilter / nf_nat_helper.c
1 /* nf_nat_helper.c - generic support functions for NAT helpers
2 *
3 * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
4 * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2007-2012 Patrick McHardy <kaber@trash.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/module.h>
12 #include <linux/gfp.h>
13 #include <linux/types.h>
14 #include <linux/skbuff.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <net/tcp.h>
18
19 #include <net/netfilter/nf_conntrack.h>
20 #include <net/netfilter/nf_conntrack_helper.h>
21 #include <net/netfilter/nf_conntrack_ecache.h>
22 #include <net/netfilter/nf_conntrack_expect.h>
23 #include <net/netfilter/nf_nat.h>
24 #include <net/netfilter/nf_nat_l3proto.h>
25 #include <net/netfilter/nf_nat_l4proto.h>
26 #include <net/netfilter/nf_nat_core.h>
27 #include <net/netfilter/nf_nat_helper.h>
28
29 #define DUMP_OFFSET(x) \
30 pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
31 x->offset_before, x->offset_after, x->correction_pos);
32
33 /* Setup TCP sequence correction given this change at this sequence */
34 static inline void
35 adjust_tcp_sequence(u32 seq,
36 int sizediff,
37 struct nf_conn *ct,
38 enum ip_conntrack_info ctinfo)
39 {
40 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
41 struct nf_conn_nat *nat = nfct_nat(ct);
42 struct nf_nat_seq *this_way = &nat->seq[dir];
43
44 pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n",
45 seq, sizediff);
46
47 pr_debug("adjust_tcp_sequence: Seq_offset before: ");
48 DUMP_OFFSET(this_way);
49
50 spin_lock_bh(&ct->lock);
51
52 /* SYN adjust. If it's uninitialized, or this is after last
53 * correction, record it: we don't handle more than one
54 * adjustment in the window, but do deal with common case of a
55 * retransmit */
56 if (this_way->offset_before == this_way->offset_after ||
57 before(this_way->correction_pos, seq)) {
58 this_way->correction_pos = seq;
59 this_way->offset_before = this_way->offset_after;
60 this_way->offset_after += sizediff;
61 }
62 spin_unlock_bh(&ct->lock);
63
64 pr_debug("adjust_tcp_sequence: Seq_offset after: ");
65 DUMP_OFFSET(this_way);
66 }
67
68 /* Get the offset value, for conntrack. Caller must have the conntrack locked */
69 s32 nf_nat_get_offset(const struct nf_conn *ct,
70 enum ip_conntrack_dir dir,
71 u32 seq)
72 {
73 struct nf_conn_nat *nat = nfct_nat(ct);
74 struct nf_nat_seq *this_way;
75
76 if (!nat)
77 return 0;
78
79 this_way = &nat->seq[dir];
80 return after(seq, this_way->correction_pos)
81 ? this_way->offset_after : this_way->offset_before;
82 }
83
84 /* Frobs data inside this packet, which is linear. */
85 static void mangle_contents(struct sk_buff *skb,
86 unsigned int dataoff,
87 unsigned int match_offset,
88 unsigned int match_len,
89 const char *rep_buffer,
90 unsigned int rep_len)
91 {
92 unsigned char *data;
93
94 BUG_ON(skb_is_nonlinear(skb));
95 data = skb_network_header(skb) + dataoff;
96
97 /* move post-replacement */
98 memmove(data + match_offset + rep_len,
99 data + match_offset + match_len,
100 skb_tail_pointer(skb) - (skb_network_header(skb) + dataoff +
101 match_offset + match_len));
102
103 /* insert data from buffer */
104 memcpy(data + match_offset, rep_buffer, rep_len);
105
106 /* update skb info */
107 if (rep_len > match_len) {
108 pr_debug("nf_nat_mangle_packet: Extending packet by "
109 "%u from %u bytes\n", rep_len - match_len, skb->len);
110 skb_put(skb, rep_len - match_len);
111 } else {
112 pr_debug("nf_nat_mangle_packet: Shrinking packet from "
113 "%u from %u bytes\n", match_len - rep_len, skb->len);
114 __skb_trim(skb, skb->len + rep_len - match_len);
115 }
116
117 if (nf_ct_l3num((struct nf_conn *)skb->nfct) == NFPROTO_IPV4) {
118 /* fix IP hdr checksum information */
119 ip_hdr(skb)->tot_len = htons(skb->len);
120 ip_send_check(ip_hdr(skb));
121 } else
122 ipv6_hdr(skb)->payload_len =
123 htons(skb->len - sizeof(struct ipv6hdr));
124 }
125
126 /* Unusual, but possible case. */
127 static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
128 {
129 if (skb->len + extra > 65535)
130 return 0;
131
132 if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC))
133 return 0;
134
135 return 1;
136 }
137
138 void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
139 __be32 seq, s32 off)
140 {
141 if (!off)
142 return;
143 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
144 adjust_tcp_sequence(ntohl(seq), off, ct, ctinfo);
145 nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
146 }
147 EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
148
149 void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
150 u32 ctinfo, int off)
151 {
152 const struct tcphdr *th;
153
154 if (nf_ct_protonum(ct) != IPPROTO_TCP)
155 return;
156
157 th = (struct tcphdr *)(skb_network_header(skb)+ ip_hdrlen(skb));
158 nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
159 }
160 EXPORT_SYMBOL_GPL(nf_nat_tcp_seq_adjust);
161
162 /* Generic function for mangling variable-length address changes inside
163 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
164 * command in FTP).
165 *
166 * Takes care about all the nasty sequence number changes, checksumming,
167 * skb enlargement, ...
168 *
169 * */
170 int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
171 struct nf_conn *ct,
172 enum ip_conntrack_info ctinfo,
173 unsigned int protoff,
174 unsigned int match_offset,
175 unsigned int match_len,
176 const char *rep_buffer,
177 unsigned int rep_len, bool adjust)
178 {
179 const struct nf_nat_l3proto *l3proto;
180 struct tcphdr *tcph;
181 int oldlen, datalen;
182
183 if (!skb_make_writable(skb, skb->len))
184 return 0;
185
186 if (rep_len > match_len &&
187 rep_len - match_len > skb_tailroom(skb) &&
188 !enlarge_skb(skb, rep_len - match_len))
189 return 0;
190
191 SKB_LINEAR_ASSERT(skb);
192
193 tcph = (void *)skb->data + protoff;
194
195 oldlen = skb->len - protoff;
196 mangle_contents(skb, protoff + tcph->doff*4,
197 match_offset, match_len, rep_buffer, rep_len);
198
199 datalen = skb->len - protoff;
200
201 l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
202 l3proto->csum_recalc(skb, IPPROTO_TCP, tcph, &tcph->check,
203 datalen, oldlen);
204
205 if (adjust && rep_len != match_len)
206 nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
207 (int)rep_len - (int)match_len);
208
209 return 1;
210 }
211 EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet);
212
213 /* Generic function for mangling variable-length address changes inside
214 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
215 * command in the Amanda protocol)
216 *
217 * Takes care about all the nasty sequence number changes, checksumming,
218 * skb enlargement, ...
219 *
220 * XXX - This function could be merged with nf_nat_mangle_tcp_packet which
221 * should be fairly easy to do.
222 */
223 int
224 nf_nat_mangle_udp_packet(struct sk_buff *skb,
225 struct nf_conn *ct,
226 enum ip_conntrack_info ctinfo,
227 unsigned int protoff,
228 unsigned int match_offset,
229 unsigned int match_len,
230 const char *rep_buffer,
231 unsigned int rep_len)
232 {
233 const struct nf_nat_l3proto *l3proto;
234 struct udphdr *udph;
235 int datalen, oldlen;
236
237 if (!skb_make_writable(skb, skb->len))
238 return 0;
239
240 if (rep_len > match_len &&
241 rep_len - match_len > skb_tailroom(skb) &&
242 !enlarge_skb(skb, rep_len - match_len))
243 return 0;
244
245 udph = (void *)skb->data + protoff;
246
247 oldlen = skb->len - protoff;
248 mangle_contents(skb, protoff + sizeof(*udph),
249 match_offset, match_len, rep_buffer, rep_len);
250
251 /* update the length of the UDP packet */
252 datalen = skb->len - protoff;
253 udph->len = htons(datalen);
254
255 /* fix udp checksum if udp checksum was previously calculated */
256 if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
257 return 1;
258
259 l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
260 l3proto->csum_recalc(skb, IPPROTO_UDP, udph, &udph->check,
261 datalen, oldlen);
262
263 return 1;
264 }
265 EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
266
267 /* Adjust one found SACK option including checksum correction */
268 static void
269 sack_adjust(struct sk_buff *skb,
270 struct tcphdr *tcph,
271 unsigned int sackoff,
272 unsigned int sackend,
273 struct nf_nat_seq *natseq)
274 {
275 while (sackoff < sackend) {
276 struct tcp_sack_block_wire *sack;
277 __be32 new_start_seq, new_end_seq;
278
279 sack = (void *)skb->data + sackoff;
280 if (after(ntohl(sack->start_seq) - natseq->offset_before,
281 natseq->correction_pos))
282 new_start_seq = htonl(ntohl(sack->start_seq)
283 - natseq->offset_after);
284 else
285 new_start_seq = htonl(ntohl(sack->start_seq)
286 - natseq->offset_before);
287
288 if (after(ntohl(sack->end_seq) - natseq->offset_before,
289 natseq->correction_pos))
290 new_end_seq = htonl(ntohl(sack->end_seq)
291 - natseq->offset_after);
292 else
293 new_end_seq = htonl(ntohl(sack->end_seq)
294 - natseq->offset_before);
295
296 pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
297 ntohl(sack->start_seq), new_start_seq,
298 ntohl(sack->end_seq), new_end_seq);
299
300 inet_proto_csum_replace4(&tcph->check, skb,
301 sack->start_seq, new_start_seq, 0);
302 inet_proto_csum_replace4(&tcph->check, skb,
303 sack->end_seq, new_end_seq, 0);
304 sack->start_seq = new_start_seq;
305 sack->end_seq = new_end_seq;
306 sackoff += sizeof(*sack);
307 }
308 }
309
310 /* TCP SACK sequence number adjustment */
311 static inline unsigned int
312 nf_nat_sack_adjust(struct sk_buff *skb,
313 unsigned int protoff,
314 struct tcphdr *tcph,
315 struct nf_conn *ct,
316 enum ip_conntrack_info ctinfo)
317 {
318 unsigned int dir, optoff, optend;
319 struct nf_conn_nat *nat = nfct_nat(ct);
320
321 optoff = protoff + sizeof(struct tcphdr);
322 optend = protoff + tcph->doff * 4;
323
324 if (!skb_make_writable(skb, optend))
325 return 0;
326
327 dir = CTINFO2DIR(ctinfo);
328
329 while (optoff < optend) {
330 /* Usually: option, length. */
331 unsigned char *op = skb->data + optoff;
332
333 switch (op[0]) {
334 case TCPOPT_EOL:
335 return 1;
336 case TCPOPT_NOP:
337 optoff++;
338 continue;
339 default:
340 /* no partial options */
341 if (optoff + 1 == optend ||
342 optoff + op[1] > optend ||
343 op[1] < 2)
344 return 0;
345 if (op[0] == TCPOPT_SACK &&
346 op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
347 ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
348 sack_adjust(skb, tcph, optoff+2,
349 optoff+op[1], &nat->seq[!dir]);
350 optoff += op[1];
351 }
352 }
353 return 1;
354 }
355
356 /* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
357 int
358 nf_nat_seq_adjust(struct sk_buff *skb,
359 struct nf_conn *ct,
360 enum ip_conntrack_info ctinfo,
361 unsigned int protoff)
362 {
363 struct tcphdr *tcph;
364 int dir;
365 __be32 newseq, newack;
366 s32 seqoff, ackoff;
367 struct nf_conn_nat *nat = nfct_nat(ct);
368 struct nf_nat_seq *this_way, *other_way;
369 int res;
370
371 dir = CTINFO2DIR(ctinfo);
372
373 this_way = &nat->seq[dir];
374 other_way = &nat->seq[!dir];
375
376 if (!skb_make_writable(skb, protoff + sizeof(*tcph)))
377 return 0;
378
379 tcph = (void *)skb->data + protoff;
380 spin_lock_bh(&ct->lock);
381 if (after(ntohl(tcph->seq), this_way->correction_pos))
382 seqoff = this_way->offset_after;
383 else
384 seqoff = this_way->offset_before;
385
386 if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
387 other_way->correction_pos))
388 ackoff = other_way->offset_after;
389 else
390 ackoff = other_way->offset_before;
391
392 newseq = htonl(ntohl(tcph->seq) + seqoff);
393 newack = htonl(ntohl(tcph->ack_seq) - ackoff);
394
395 inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
396 inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
397
398 pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
399 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
400 ntohl(newack));
401
402 tcph->seq = newseq;
403 tcph->ack_seq = newack;
404
405 res = nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo);
406 spin_unlock_bh(&ct->lock);
407
408 return res;
409 }
410
411 /* Setup NAT on this expected conntrack so it follows master. */
412 /* If we fail to get a free NAT slot, we'll get dropped on confirm */
413 void nf_nat_follow_master(struct nf_conn *ct,
414 struct nf_conntrack_expect *exp)
415 {
416 struct nf_nat_range range;
417
418 /* This must be a fresh one. */
419 BUG_ON(ct->status & IPS_NAT_DONE_MASK);
420
421 /* Change src to where master sends to */
422 range.flags = NF_NAT_RANGE_MAP_IPS;
423 range.min_addr = range.max_addr
424 = ct->master->tuplehash[!exp->dir].tuple.dst.u3;
425 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
426
427 /* For DST manip, map port here to where it's expected. */
428 range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
429 range.min_proto = range.max_proto = exp->saved_proto;
430 range.min_addr = range.max_addr
431 = ct->master->tuplehash[!exp->dir].tuple.src.u3;
432 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
433 }
434 EXPORT_SYMBOL(nf_nat_follow_master);
This page took 0.039342 seconds and 5 git commands to generate.