1 /* ip_nat_helper.c - generic support functions for NAT helpers
3 * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
4 * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/kmod.h>
12 #include <linux/types.h>
13 #include <linux/timer.h>
14 #include <linux/skbuff.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <net/checksum.h>
20 #include <linux/netfilter_ipv4.h>
21 #include <net/netfilter/nf_conntrack.h>
22 #include <net/netfilter/nf_conntrack_helper.h>
23 #include <net/netfilter/nf_conntrack_expect.h>
24 #include <net/netfilter/nf_nat.h>
25 #include <net/netfilter/nf_nat_protocol.h>
26 #include <net/netfilter/nf_nat_core.h>
27 #include <net/netfilter/nf_nat_helper.h>
29 #define DUMP_OFFSET(x) \
30 pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
31 x->offset_before, x->offset_after, x->correction_pos);
33 static DEFINE_SPINLOCK(nf_nat_seqofs_lock
);
35 /* Setup TCP sequence correction given this change at this sequence */
37 adjust_tcp_sequence(u32 seq
,
40 enum ip_conntrack_info ctinfo
)
43 struct nf_nat_seq
*this_way
, *other_way
;
44 struct nf_conn_nat
*nat
= nfct_nat(ct
);
46 pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n",
49 dir
= CTINFO2DIR(ctinfo
);
51 this_way
= &nat
->seq
[dir
];
52 other_way
= &nat
->seq
[!dir
];
54 pr_debug("nf_nat_resize_packet: Seq_offset before: ");
55 DUMP_OFFSET(this_way
);
57 spin_lock_bh(&nf_nat_seqofs_lock
);
59 /* SYN adjust. If it's uninitialized, or this is after last
60 * correction, record it: we don't handle more than one
61 * adjustment in the window, but do deal with common case of a
63 if (this_way
->offset_before
== this_way
->offset_after
||
64 before(this_way
->correction_pos
, seq
)) {
65 this_way
->correction_pos
= seq
;
66 this_way
->offset_before
= this_way
->offset_after
;
67 this_way
->offset_after
+= sizediff
;
69 spin_unlock_bh(&nf_nat_seqofs_lock
);
71 pr_debug("nf_nat_resize_packet: Seq_offset after: ");
72 DUMP_OFFSET(this_way
);
75 /* Frobs data inside this packet, which is linear. */
76 static void mangle_contents(struct sk_buff
*skb
,
78 unsigned int match_offset
,
79 unsigned int match_len
,
80 const char *rep_buffer
,
85 BUG_ON(skb_is_nonlinear(skb
));
86 data
= skb_network_header(skb
) + dataoff
;
88 /* move post-replacement */
89 memmove(data
+ match_offset
+ rep_len
,
90 data
+ match_offset
+ match_len
,
91 skb
->tail
- (skb
->network_header
+ dataoff
+
92 match_offset
+ match_len
));
94 /* insert data from buffer */
95 memcpy(data
+ match_offset
, rep_buffer
, rep_len
);
98 if (rep_len
> match_len
) {
99 pr_debug("nf_nat_mangle_packet: Extending packet by "
100 "%u from %u bytes\n", rep_len
- match_len
, skb
->len
);
101 skb_put(skb
, rep_len
- match_len
);
103 pr_debug("nf_nat_mangle_packet: Shrinking packet from "
104 "%u from %u bytes\n", match_len
- rep_len
, skb
->len
);
105 __skb_trim(skb
, skb
->len
+ rep_len
- match_len
);
108 /* fix IP hdr checksum information */
109 ip_hdr(skb
)->tot_len
= htons(skb
->len
);
110 ip_send_check(ip_hdr(skb
));
113 /* Unusual, but possible case. */
114 static int enlarge_skb(struct sk_buff
*skb
, unsigned int extra
)
116 if (skb
->len
+ extra
> 65535)
119 if (pskb_expand_head(skb
, 0, extra
- skb_tailroom(skb
), GFP_ATOMIC
))
125 /* Generic function for mangling variable-length address changes inside
126 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
129 * Takes care about all the nasty sequence number changes, checksumming,
130 * skb enlargement, ...
134 nf_nat_mangle_tcp_packet(struct sk_buff
*skb
,
136 enum ip_conntrack_info ctinfo
,
137 unsigned int match_offset
,
138 unsigned int match_len
,
139 const char *rep_buffer
,
140 unsigned int rep_len
)
142 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
147 if (!skb_make_writable(skb
, skb
->len
))
150 if (rep_len
> match_len
&&
151 rep_len
- match_len
> skb_tailroom(skb
) &&
152 !enlarge_skb(skb
, rep_len
- match_len
))
155 SKB_LINEAR_ASSERT(skb
);
158 tcph
= (void *)iph
+ iph
->ihl
*4;
160 oldlen
= skb
->len
- iph
->ihl
*4;
161 mangle_contents(skb
, iph
->ihl
*4 + tcph
->doff
*4,
162 match_offset
, match_len
, rep_buffer
, rep_len
);
164 datalen
= skb
->len
- iph
->ihl
*4;
165 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
166 if (!(rt
->rt_flags
& RTCF_LOCAL
) &&
167 skb
->dev
->features
& NETIF_F_V4_CSUM
) {
168 skb
->ip_summed
= CHECKSUM_PARTIAL
;
169 skb
->csum_start
= skb_headroom(skb
) +
170 skb_network_offset(skb
) +
172 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
173 tcph
->check
= ~tcp_v4_check(datalen
,
174 iph
->saddr
, iph
->daddr
, 0);
177 tcph
->check
= tcp_v4_check(datalen
,
178 iph
->saddr
, iph
->daddr
,
183 nf_proto_csum_replace2(&tcph
->check
, skb
,
184 htons(oldlen
), htons(datalen
), 1);
186 if (rep_len
!= match_len
) {
187 set_bit(IPS_SEQ_ADJUST_BIT
, &ct
->status
);
188 adjust_tcp_sequence(ntohl(tcph
->seq
),
189 (int)rep_len
- (int)match_len
,
191 /* Tell TCP window tracking about seq change */
192 nf_conntrack_tcp_update(skb
, ip_hdrlen(skb
),
193 ct
, CTINFO2DIR(ctinfo
));
197 EXPORT_SYMBOL(nf_nat_mangle_tcp_packet
);
199 /* Generic function for mangling variable-length address changes inside
200 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
201 * command in the Amanda protocol)
203 * Takes care about all the nasty sequence number changes, checksumming,
204 * skb enlargement, ...
206 * XXX - This function could be merged with nf_nat_mangle_tcp_packet which
207 * should be fairly easy to do.
210 nf_nat_mangle_udp_packet(struct sk_buff
*skb
,
212 enum ip_conntrack_info ctinfo
,
213 unsigned int match_offset
,
214 unsigned int match_len
,
215 const char *rep_buffer
,
216 unsigned int rep_len
)
218 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
223 /* UDP helpers might accidentally mangle the wrong packet */
225 if (skb
->len
< iph
->ihl
*4 + sizeof(*udph
) +
226 match_offset
+ match_len
)
229 if (!skb_make_writable(skb
, skb
->len
))
232 if (rep_len
> match_len
&&
233 rep_len
- match_len
> skb_tailroom(skb
) &&
234 !enlarge_skb(skb
, rep_len
- match_len
))
238 udph
= (void *)iph
+ iph
->ihl
*4;
240 oldlen
= skb
->len
- iph
->ihl
*4;
241 mangle_contents(skb
, iph
->ihl
*4 + sizeof(*udph
),
242 match_offset
, match_len
, rep_buffer
, rep_len
);
244 /* update the length of the UDP packet */
245 datalen
= skb
->len
- iph
->ihl
*4;
246 udph
->len
= htons(datalen
);
248 /* fix udp checksum if udp checksum was previously calculated */
249 if (!udph
->check
&& skb
->ip_summed
!= CHECKSUM_PARTIAL
)
252 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
253 if (!(rt
->rt_flags
& RTCF_LOCAL
) &&
254 skb
->dev
->features
& NETIF_F_V4_CSUM
) {
255 skb
->ip_summed
= CHECKSUM_PARTIAL
;
256 skb
->csum_start
= skb_headroom(skb
) +
257 skb_network_offset(skb
) +
259 skb
->csum_offset
= offsetof(struct udphdr
, check
);
260 udph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
261 datalen
, IPPROTO_UDP
,
265 udph
->check
= csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
266 datalen
, IPPROTO_UDP
,
270 udph
->check
= CSUM_MANGLED_0
;
273 nf_proto_csum_replace2(&udph
->check
, skb
,
274 htons(oldlen
), htons(datalen
), 1);
278 EXPORT_SYMBOL(nf_nat_mangle_udp_packet
);
280 /* Adjust one found SACK option including checksum correction */
282 sack_adjust(struct sk_buff
*skb
,
284 unsigned int sackoff
,
285 unsigned int sackend
,
286 struct nf_nat_seq
*natseq
)
288 while (sackoff
< sackend
) {
289 struct tcp_sack_block_wire
*sack
;
290 __be32 new_start_seq
, new_end_seq
;
292 sack
= (void *)skb
->data
+ sackoff
;
293 if (after(ntohl(sack
->start_seq
) - natseq
->offset_before
,
294 natseq
->correction_pos
))
295 new_start_seq
= htonl(ntohl(sack
->start_seq
)
296 - natseq
->offset_after
);
298 new_start_seq
= htonl(ntohl(sack
->start_seq
)
299 - natseq
->offset_before
);
301 if (after(ntohl(sack
->end_seq
) - natseq
->offset_before
,
302 natseq
->correction_pos
))
303 new_end_seq
= htonl(ntohl(sack
->end_seq
)
304 - natseq
->offset_after
);
306 new_end_seq
= htonl(ntohl(sack
->end_seq
)
307 - natseq
->offset_before
);
309 pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
310 ntohl(sack
->start_seq
), new_start_seq
,
311 ntohl(sack
->end_seq
), new_end_seq
);
313 nf_proto_csum_replace4(&tcph
->check
, skb
,
314 sack
->start_seq
, new_start_seq
, 0);
315 nf_proto_csum_replace4(&tcph
->check
, skb
,
316 sack
->end_seq
, new_end_seq
, 0);
317 sack
->start_seq
= new_start_seq
;
318 sack
->end_seq
= new_end_seq
;
319 sackoff
+= sizeof(*sack
);
323 /* TCP SACK sequence number adjustment */
324 static inline unsigned int
325 nf_nat_sack_adjust(struct sk_buff
*skb
,
328 enum ip_conntrack_info ctinfo
)
330 unsigned int dir
, optoff
, optend
;
331 struct nf_conn_nat
*nat
= nfct_nat(ct
);
333 optoff
= ip_hdrlen(skb
) + sizeof(struct tcphdr
);
334 optend
= ip_hdrlen(skb
) + tcph
->doff
* 4;
336 if (!skb_make_writable(skb
, optend
))
339 dir
= CTINFO2DIR(ctinfo
);
341 while (optoff
< optend
) {
342 /* Usually: option, length. */
343 unsigned char *op
= skb
->data
+ optoff
;
352 /* no partial options */
353 if (optoff
+ 1 == optend
||
354 optoff
+ op
[1] > optend
||
357 if (op
[0] == TCPOPT_SACK
&&
358 op
[1] >= 2+TCPOLEN_SACK_PERBLOCK
&&
359 ((op
[1] - 2) % TCPOLEN_SACK_PERBLOCK
) == 0)
360 sack_adjust(skb
, tcph
, optoff
+2,
361 optoff
+op
[1], &nat
->seq
[!dir
]);
368 /* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
370 nf_nat_seq_adjust(struct sk_buff
*skb
,
372 enum ip_conntrack_info ctinfo
)
376 __be32 newseq
, newack
;
377 struct nf_conn_nat
*nat
= nfct_nat(ct
);
378 struct nf_nat_seq
*this_way
, *other_way
;
380 dir
= CTINFO2DIR(ctinfo
);
382 this_way
= &nat
->seq
[dir
];
383 other_way
= &nat
->seq
[!dir
];
385 if (!skb_make_writable(skb
, ip_hdrlen(skb
) + sizeof(*tcph
)))
388 tcph
= (void *)skb
->data
+ ip_hdrlen(skb
);
389 if (after(ntohl(tcph
->seq
), this_way
->correction_pos
))
390 newseq
= htonl(ntohl(tcph
->seq
) + this_way
->offset_after
);
392 newseq
= htonl(ntohl(tcph
->seq
) + this_way
->offset_before
);
394 if (after(ntohl(tcph
->ack_seq
) - other_way
->offset_before
,
395 other_way
->correction_pos
))
396 newack
= htonl(ntohl(tcph
->ack_seq
) - other_way
->offset_after
);
398 newack
= htonl(ntohl(tcph
->ack_seq
) - other_way
->offset_before
);
400 nf_proto_csum_replace4(&tcph
->check
, skb
, tcph
->seq
, newseq
, 0);
401 nf_proto_csum_replace4(&tcph
->check
, skb
, tcph
->ack_seq
, newack
, 0);
403 pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
404 ntohl(tcph
->seq
), ntohl(newseq
), ntohl(tcph
->ack_seq
),
408 tcph
->ack_seq
= newack
;
410 if (!nf_nat_sack_adjust(skb
, tcph
, ct
, ctinfo
))
413 nf_conntrack_tcp_update(skb
, ip_hdrlen(skb
), ct
, dir
);
417 EXPORT_SYMBOL(nf_nat_seq_adjust
);
419 /* Setup NAT on this expected conntrack so it follows master. */
420 /* If we fail to get a free NAT slot, we'll get dropped on confirm */
421 void nf_nat_follow_master(struct nf_conn
*ct
,
422 struct nf_conntrack_expect
*exp
)
424 struct nf_nat_range range
;
426 /* This must be a fresh one. */
427 BUG_ON(ct
->status
& IPS_NAT_DONE_MASK
);
429 /* Change src to where master sends to */
430 range
.flags
= IP_NAT_RANGE_MAP_IPS
;
431 range
.min_ip
= range
.max_ip
432 = ct
->master
->tuplehash
[!exp
->dir
].tuple
.dst
.u3
.ip
;
433 /* hook doesn't matter, but it has to do source manip */
434 nf_nat_setup_info(ct
, &range
, NF_IP_POST_ROUTING
);
436 /* For DST manip, map port here to where it's expected. */
437 range
.flags
= (IP_NAT_RANGE_MAP_IPS
| IP_NAT_RANGE_PROTO_SPECIFIED
);
438 range
.min
= range
.max
= exp
->saved_proto
;
439 range
.min_ip
= range
.max_ip
440 = ct
->master
->tuplehash
[!exp
->dir
].tuple
.src
.u3
.ip
;
441 /* hook doesn't matter, but it has to do destination manip */
442 nf_nat_setup_info(ct
, &range
, NF_IP_PRE_ROUTING
);
444 EXPORT_SYMBOL(nf_nat_follow_master
);