libceph: drop unnecessary CLOSED check in socket state change callback
[deliverable/linux.git] / net / bridge / br_netfilter.c
1 /*
2 * Handle firewalling
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 * Bart De Schuymer <bdschuym@pandora.be>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 * Lennert dedicates this file to Kerstin Wurdinger.
15 */
16
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/ip.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_pppox.h>
27 #include <linux/ppp_defs.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/netfilter_ipv4.h>
30 #include <linux/netfilter_ipv6.h>
31 #include <linux/netfilter_arp.h>
32 #include <linux/in_route.h>
33 #include <linux/inetdevice.h>
34
35 #include <net/ip.h>
36 #include <net/ipv6.h>
37 #include <net/route.h>
38
39 #include <asm/uaccess.h>
40 #include "br_private.h"
41 #ifdef CONFIG_SYSCTL
42 #include <linux/sysctl.h>
43 #endif
44
45 #define skb_origaddr(skb) (((struct bridge_skb_cb *) \
46 (skb->nf_bridge->data))->daddr.ipv4)
47 #define store_orig_dstaddr(skb) (skb_origaddr(skb) = ip_hdr(skb)->daddr)
48 #define dnat_took_place(skb) (skb_origaddr(skb) != ip_hdr(skb)->daddr)
49
50 #ifdef CONFIG_SYSCTL
51 static struct ctl_table_header *brnf_sysctl_header;
52 static int brnf_call_iptables __read_mostly = 1;
53 static int brnf_call_ip6tables __read_mostly = 1;
54 static int brnf_call_arptables __read_mostly = 1;
55 static int brnf_filter_vlan_tagged __read_mostly = 0;
56 static int brnf_filter_pppoe_tagged __read_mostly = 0;
57 static int brnf_pass_vlan_indev __read_mostly = 0;
58 #else
59 #define brnf_call_iptables 1
60 #define brnf_call_ip6tables 1
61 #define brnf_call_arptables 1
62 #define brnf_filter_vlan_tagged 0
63 #define brnf_filter_pppoe_tagged 0
64 #define brnf_pass_vlan_indev 0
65 #endif
66
67 #define IS_IP(skb) \
68 (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
69
70 #define IS_IPV6(skb) \
71 (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
72
73 #define IS_ARP(skb) \
74 (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
75
76 static inline __be16 vlan_proto(const struct sk_buff *skb)
77 {
78 if (vlan_tx_tag_present(skb))
79 return skb->protocol;
80 else if (skb->protocol == htons(ETH_P_8021Q))
81 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
82 else
83 return 0;
84 }
85
86 #define IS_VLAN_IP(skb) \
87 (vlan_proto(skb) == htons(ETH_P_IP) && \
88 brnf_filter_vlan_tagged)
89
90 #define IS_VLAN_IPV6(skb) \
91 (vlan_proto(skb) == htons(ETH_P_IPV6) && \
92 brnf_filter_vlan_tagged)
93
94 #define IS_VLAN_ARP(skb) \
95 (vlan_proto(skb) == htons(ETH_P_ARP) && \
96 brnf_filter_vlan_tagged)
97
98 static inline __be16 pppoe_proto(const struct sk_buff *skb)
99 {
100 return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
101 sizeof(struct pppoe_hdr)));
102 }
103
104 #define IS_PPPOE_IP(skb) \
105 (skb->protocol == htons(ETH_P_PPP_SES) && \
106 pppoe_proto(skb) == htons(PPP_IP) && \
107 brnf_filter_pppoe_tagged)
108
109 #define IS_PPPOE_IPV6(skb) \
110 (skb->protocol == htons(ETH_P_PPP_SES) && \
111 pppoe_proto(skb) == htons(PPP_IPV6) && \
112 brnf_filter_pppoe_tagged)
113
114 static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
115 {
116 }
117
118 static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
119 {
120 return NULL;
121 }
122
123 static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const void *daddr)
124 {
125 return NULL;
126 }
127
128 static unsigned int fake_mtu(const struct dst_entry *dst)
129 {
130 return dst->dev->mtu;
131 }
132
133 static struct dst_ops fake_dst_ops = {
134 .family = AF_INET,
135 .protocol = cpu_to_be16(ETH_P_IP),
136 .update_pmtu = fake_update_pmtu,
137 .cow_metrics = fake_cow_metrics,
138 .neigh_lookup = fake_neigh_lookup,
139 .mtu = fake_mtu,
140 };
141
142 /*
143 * Initialize bogus route table used to keep netfilter happy.
144 * Currently, we fill in the PMTU entry because netfilter
145 * refragmentation needs it, and the rt_flags entry because
146 * ipt_REJECT needs it. Future netfilter modules might
147 * require us to fill additional fields.
148 */
149 static const u32 br_dst_default_metrics[RTAX_MAX] = {
150 [RTAX_MTU - 1] = 1500,
151 };
152
153 void br_netfilter_rtable_init(struct net_bridge *br)
154 {
155 struct rtable *rt = &br->fake_rtable;
156
157 atomic_set(&rt->dst.__refcnt, 1);
158 rt->dst.dev = br->dev;
159 rt->dst.path = &rt->dst;
160 dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
161 rt->dst.flags = DST_NOXFRM | DST_NOPEER | DST_FAKE_RTABLE;
162 rt->dst.ops = &fake_dst_ops;
163 }
164
165 static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
166 {
167 struct net_bridge_port *port;
168
169 port = br_port_get_rcu(dev);
170 return port ? &port->br->fake_rtable : NULL;
171 }
172
173 static inline struct net_device *bridge_parent(const struct net_device *dev)
174 {
175 struct net_bridge_port *port;
176
177 port = br_port_get_rcu(dev);
178 return port ? port->br->dev : NULL;
179 }
180
181 static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
182 {
183 skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
184 if (likely(skb->nf_bridge))
185 atomic_set(&(skb->nf_bridge->use), 1);
186
187 return skb->nf_bridge;
188 }
189
190 static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
191 {
192 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
193
194 if (atomic_read(&nf_bridge->use) > 1) {
195 struct nf_bridge_info *tmp = nf_bridge_alloc(skb);
196
197 if (tmp) {
198 memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
199 atomic_set(&tmp->use, 1);
200 }
201 nf_bridge_put(nf_bridge);
202 nf_bridge = tmp;
203 }
204 return nf_bridge;
205 }
206
207 static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
208 {
209 unsigned int len = nf_bridge_encap_header_len(skb);
210
211 skb_push(skb, len);
212 skb->network_header -= len;
213 }
214
215 static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
216 {
217 unsigned int len = nf_bridge_encap_header_len(skb);
218
219 skb_pull(skb, len);
220 skb->network_header += len;
221 }
222
223 static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
224 {
225 unsigned int len = nf_bridge_encap_header_len(skb);
226
227 skb_pull_rcsum(skb, len);
228 skb->network_header += len;
229 }
230
231 static inline void nf_bridge_save_header(struct sk_buff *skb)
232 {
233 int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
234
235 skb_copy_from_linear_data_offset(skb, -header_size,
236 skb->nf_bridge->data, header_size);
237 }
238
239 static inline void nf_bridge_update_protocol(struct sk_buff *skb)
240 {
241 if (skb->nf_bridge->mask & BRNF_8021Q)
242 skb->protocol = htons(ETH_P_8021Q);
243 else if (skb->nf_bridge->mask & BRNF_PPPoE)
244 skb->protocol = htons(ETH_P_PPP_SES);
245 }
246
247 /* When handing a packet over to the IP layer
248 * check whether we have a skb that is in the
249 * expected format
250 */
251
252 static int br_parse_ip_options(struct sk_buff *skb)
253 {
254 struct ip_options *opt;
255 const struct iphdr *iph;
256 struct net_device *dev = skb->dev;
257 u32 len;
258
259 iph = ip_hdr(skb);
260 opt = &(IPCB(skb)->opt);
261
262 /* Basic sanity checks */
263 if (iph->ihl < 5 || iph->version != 4)
264 goto inhdr_error;
265
266 if (!pskb_may_pull(skb, iph->ihl*4))
267 goto inhdr_error;
268
269 iph = ip_hdr(skb);
270 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
271 goto inhdr_error;
272
273 len = ntohs(iph->tot_len);
274 if (skb->len < len) {
275 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
276 goto drop;
277 } else if (len < (iph->ihl*4))
278 goto inhdr_error;
279
280 if (pskb_trim_rcsum(skb, len)) {
281 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
282 goto drop;
283 }
284
285 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
286 if (iph->ihl == 5)
287 return 0;
288
289 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
290 if (ip_options_compile(dev_net(dev), opt, skb))
291 goto inhdr_error;
292
293 /* Check correct handling of SRR option */
294 if (unlikely(opt->srr)) {
295 struct in_device *in_dev = __in_dev_get_rcu(dev);
296 if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev))
297 goto drop;
298
299 if (ip_options_rcv_srr(skb))
300 goto drop;
301 }
302
303 return 0;
304
305 inhdr_error:
306 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
307 drop:
308 return -1;
309 }
310
311 /* Fill in the header for fragmented IP packets handled by
312 * the IPv4 connection tracking code.
313 */
314 int nf_bridge_copy_header(struct sk_buff *skb)
315 {
316 int err;
317 unsigned int header_size;
318
319 nf_bridge_update_protocol(skb);
320 header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
321 err = skb_cow_head(skb, header_size);
322 if (err)
323 return err;
324
325 skb_copy_to_linear_data_offset(skb, -header_size,
326 skb->nf_bridge->data, header_size);
327 __skb_push(skb, nf_bridge_encap_header_len(skb));
328 return 0;
329 }
330
331 /* PF_BRIDGE/PRE_ROUTING *********************************************/
332 /* Undo the changes made for ip6tables PREROUTING and continue the
333 * bridge PRE_ROUTING hook. */
334 static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
335 {
336 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
337 struct rtable *rt;
338
339 if (nf_bridge->mask & BRNF_PKT_TYPE) {
340 skb->pkt_type = PACKET_OTHERHOST;
341 nf_bridge->mask ^= BRNF_PKT_TYPE;
342 }
343 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
344
345 rt = bridge_parent_rtable(nf_bridge->physindev);
346 if (!rt) {
347 kfree_skb(skb);
348 return 0;
349 }
350 skb_dst_set_noref(skb, &rt->dst);
351
352 skb->dev = nf_bridge->physindev;
353 nf_bridge_update_protocol(skb);
354 nf_bridge_push_encap_header(skb);
355 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
356 br_handle_frame_finish, 1);
357
358 return 0;
359 }
360
361 /* Obtain the correct destination MAC address, while preserving the original
362 * source MAC address. If we already know this address, we just copy it. If we
363 * don't, we use the neighbour framework to find out. In both cases, we make
364 * sure that br_handle_frame_finish() is called afterwards.
365 */
366 static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
367 {
368 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
369 struct neighbour *neigh;
370 struct dst_entry *dst;
371
372 skb->dev = bridge_parent(skb->dev);
373 if (!skb->dev)
374 goto free_skb;
375 dst = skb_dst(skb);
376 neigh = dst_get_neighbour_noref(dst);
377 if (neigh->hh.hh_len) {
378 neigh_hh_bridge(&neigh->hh, skb);
379 skb->dev = nf_bridge->physindev;
380 return br_handle_frame_finish(skb);
381 } else {
382 /* the neighbour function below overwrites the complete
383 * MAC header, so we save the Ethernet source address and
384 * protocol number. */
385 skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
386 /* tell br_dev_xmit to continue with forwarding */
387 nf_bridge->mask |= BRNF_BRIDGED_DNAT;
388 return neigh->output(neigh, skb);
389 }
390 free_skb:
391 kfree_skb(skb);
392 return 0;
393 }
394
395 /* This requires some explaining. If DNAT has taken place,
396 * we will need to fix up the destination Ethernet address.
397 *
398 * There are two cases to consider:
399 * 1. The packet was DNAT'ed to a device in the same bridge
400 * port group as it was received on. We can still bridge
401 * the packet.
402 * 2. The packet was DNAT'ed to a different device, either
403 * a non-bridged device or another bridge port group.
404 * The packet will need to be routed.
405 *
406 * The correct way of distinguishing between these two cases is to
407 * call ip_route_input() and to look at skb->dst->dev, which is
408 * changed to the destination device if ip_route_input() succeeds.
409 *
410 * Let's first consider the case that ip_route_input() succeeds:
411 *
412 * If the output device equals the logical bridge device the packet
413 * came in on, we can consider this bridging. The corresponding MAC
414 * address will be obtained in br_nf_pre_routing_finish_bridge.
415 * Otherwise, the packet is considered to be routed and we just
416 * change the destination MAC address so that the packet will
417 * later be passed up to the IP stack to be routed. For a redirected
418 * packet, ip_route_input() will give back the localhost as output device,
419 * which differs from the bridge device.
420 *
421 * Let's now consider the case that ip_route_input() fails:
422 *
423 * This can be because the destination address is martian, in which case
424 * the packet will be dropped.
425 * If IP forwarding is disabled, ip_route_input() will fail, while
426 * ip_route_output_key() can return success. The source
427 * address for ip_route_output_key() is set to zero, so ip_route_output_key()
428 * thinks we're handling a locally generated packet and won't care
429 * if IP forwarding is enabled. If the output device equals the logical bridge
430 * device, we proceed as if ip_route_input() succeeded. If it differs from the
431 * logical bridge port or if ip_route_output_key() fails we drop the packet.
432 */
433 static int br_nf_pre_routing_finish(struct sk_buff *skb)
434 {
435 struct net_device *dev = skb->dev;
436 struct iphdr *iph = ip_hdr(skb);
437 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
438 struct rtable *rt;
439 int err;
440
441 if (nf_bridge->mask & BRNF_PKT_TYPE) {
442 skb->pkt_type = PACKET_OTHERHOST;
443 nf_bridge->mask ^= BRNF_PKT_TYPE;
444 }
445 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
446 if (dnat_took_place(skb)) {
447 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
448 struct in_device *in_dev = __in_dev_get_rcu(dev);
449
450 /* If err equals -EHOSTUNREACH the error is due to a
451 * martian destination or due to the fact that
452 * forwarding is disabled. For most martian packets,
453 * ip_route_output_key() will fail. It won't fail for 2 types of
454 * martian destinations: loopback destinations and destination
455 * 0.0.0.0. In both cases the packet will be dropped because the
456 * destination is the loopback device and not the bridge. */
457 if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
458 goto free_skb;
459
460 rt = ip_route_output(dev_net(dev), iph->daddr, 0,
461 RT_TOS(iph->tos), 0);
462 if (!IS_ERR(rt)) {
463 /* - Bridged-and-DNAT'ed traffic doesn't
464 * require ip_forwarding. */
465 if (rt->dst.dev == dev) {
466 skb_dst_set(skb, &rt->dst);
467 goto bridged_dnat;
468 }
469 ip_rt_put(rt);
470 }
471 free_skb:
472 kfree_skb(skb);
473 return 0;
474 } else {
475 if (skb_dst(skb)->dev == dev) {
476 bridged_dnat:
477 skb->dev = nf_bridge->physindev;
478 nf_bridge_update_protocol(skb);
479 nf_bridge_push_encap_header(skb);
480 NF_HOOK_THRESH(NFPROTO_BRIDGE,
481 NF_BR_PRE_ROUTING,
482 skb, skb->dev, NULL,
483 br_nf_pre_routing_finish_bridge,
484 1);
485 return 0;
486 }
487 memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN);
488 skb->pkt_type = PACKET_HOST;
489 }
490 } else {
491 rt = bridge_parent_rtable(nf_bridge->physindev);
492 if (!rt) {
493 kfree_skb(skb);
494 return 0;
495 }
496 skb_dst_set_noref(skb, &rt->dst);
497 }
498
499 skb->dev = nf_bridge->physindev;
500 nf_bridge_update_protocol(skb);
501 nf_bridge_push_encap_header(skb);
502 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
503 br_handle_frame_finish, 1);
504
505 return 0;
506 }
507
508 static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct net_device *dev)
509 {
510 struct net_device *vlan, *br;
511
512 br = bridge_parent(dev);
513 if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
514 return br;
515
516 vlan = __vlan_find_dev_deep(br, vlan_tx_tag_get(skb) & VLAN_VID_MASK);
517
518 return vlan ? vlan : br;
519 }
520
521 /* Some common code for IPv4/IPv6 */
522 static struct net_device *setup_pre_routing(struct sk_buff *skb)
523 {
524 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
525
526 if (skb->pkt_type == PACKET_OTHERHOST) {
527 skb->pkt_type = PACKET_HOST;
528 nf_bridge->mask |= BRNF_PKT_TYPE;
529 }
530
531 nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
532 nf_bridge->physindev = skb->dev;
533 skb->dev = brnf_get_logical_dev(skb, skb->dev);
534 if (skb->protocol == htons(ETH_P_8021Q))
535 nf_bridge->mask |= BRNF_8021Q;
536 else if (skb->protocol == htons(ETH_P_PPP_SES))
537 nf_bridge->mask |= BRNF_PPPoE;
538
539 return skb->dev;
540 }
541
542 /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
543 static int check_hbh_len(struct sk_buff *skb)
544 {
545 unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1);
546 u32 pkt_len;
547 const unsigned char *nh = skb_network_header(skb);
548 int off = raw - nh;
549 int len = (raw[1] + 1) << 3;
550
551 if ((raw + len) - skb->data > skb_headlen(skb))
552 goto bad;
553
554 off += 2;
555 len -= 2;
556
557 while (len > 0) {
558 int optlen = nh[off + 1] + 2;
559
560 switch (nh[off]) {
561 case IPV6_TLV_PAD1:
562 optlen = 1;
563 break;
564
565 case IPV6_TLV_PADN:
566 break;
567
568 case IPV6_TLV_JUMBO:
569 if (nh[off + 1] != 4 || (off & 3) != 2)
570 goto bad;
571 pkt_len = ntohl(*(__be32 *) (nh + off + 2));
572 if (pkt_len <= IPV6_MAXPLEN ||
573 ipv6_hdr(skb)->payload_len)
574 goto bad;
575 if (pkt_len > skb->len - sizeof(struct ipv6hdr))
576 goto bad;
577 if (pskb_trim_rcsum(skb,
578 pkt_len + sizeof(struct ipv6hdr)))
579 goto bad;
580 nh = skb_network_header(skb);
581 break;
582 default:
583 if (optlen > len)
584 goto bad;
585 break;
586 }
587 off += optlen;
588 len -= optlen;
589 }
590 if (len == 0)
591 return 0;
592 bad:
593 return -1;
594
595 }
596
597 /* Replicate the checks that IPv6 does on packet reception and pass the packet
598 * to ip6tables, which doesn't support NAT, so things are fairly simple. */
599 static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
600 struct sk_buff *skb,
601 const struct net_device *in,
602 const struct net_device *out,
603 int (*okfn)(struct sk_buff *))
604 {
605 const struct ipv6hdr *hdr;
606 u32 pkt_len;
607
608 if (skb->len < sizeof(struct ipv6hdr))
609 return NF_DROP;
610
611 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
612 return NF_DROP;
613
614 hdr = ipv6_hdr(skb);
615
616 if (hdr->version != 6)
617 return NF_DROP;
618
619 pkt_len = ntohs(hdr->payload_len);
620
621 if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
622 if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
623 return NF_DROP;
624 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
625 return NF_DROP;
626 }
627 if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb))
628 return NF_DROP;
629
630 nf_bridge_put(skb->nf_bridge);
631 if (!nf_bridge_alloc(skb))
632 return NF_DROP;
633 if (!setup_pre_routing(skb))
634 return NF_DROP;
635
636 skb->protocol = htons(ETH_P_IPV6);
637 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
638 br_nf_pre_routing_finish_ipv6);
639
640 return NF_STOLEN;
641 }
642
643 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
644 * Replicate the checks that IPv4 does on packet reception.
645 * Set skb->dev to the bridge device (i.e. parent of the
646 * receiving device) to make netfilter happy, the REDIRECT
647 * target in particular. Save the original destination IP
648 * address to be able to detect DNAT afterwards. */
649 static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
650 const struct net_device *in,
651 const struct net_device *out,
652 int (*okfn)(struct sk_buff *))
653 {
654 struct net_bridge_port *p;
655 struct net_bridge *br;
656 __u32 len = nf_bridge_encap_header_len(skb);
657
658 if (unlikely(!pskb_may_pull(skb, len)))
659 return NF_DROP;
660
661 p = br_port_get_rcu(in);
662 if (p == NULL)
663 return NF_DROP;
664 br = p->br;
665
666 if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) {
667 if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
668 return NF_ACCEPT;
669
670 nf_bridge_pull_encap_header_rcsum(skb);
671 return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
672 }
673
674 if (!brnf_call_iptables && !br->nf_call_iptables)
675 return NF_ACCEPT;
676
677 if (!IS_IP(skb) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb))
678 return NF_ACCEPT;
679
680 nf_bridge_pull_encap_header_rcsum(skb);
681
682 if (br_parse_ip_options(skb))
683 return NF_DROP;
684
685 nf_bridge_put(skb->nf_bridge);
686 if (!nf_bridge_alloc(skb))
687 return NF_DROP;
688 if (!setup_pre_routing(skb))
689 return NF_DROP;
690 store_orig_dstaddr(skb);
691 skb->protocol = htons(ETH_P_IP);
692
693 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
694 br_nf_pre_routing_finish);
695
696 return NF_STOLEN;
697 }
698
699
700 /* PF_BRIDGE/LOCAL_IN ************************************************/
701 /* The packet is locally destined, which requires a real
702 * dst_entry, so detach the fake one. On the way up, the
703 * packet would pass through PRE_ROUTING again (which already
704 * took place when the packet entered the bridge), but we
705 * register an IPv4 PRE_ROUTING 'sabotage' hook that will
706 * prevent this from happening. */
707 static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
708 const struct net_device *in,
709 const struct net_device *out,
710 int (*okfn)(struct sk_buff *))
711 {
712 br_drop_fake_rtable(skb);
713 return NF_ACCEPT;
714 }
715
716 /* PF_BRIDGE/FORWARD *************************************************/
717 static int br_nf_forward_finish(struct sk_buff *skb)
718 {
719 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
720 struct net_device *in;
721
722 if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
723 in = nf_bridge->physindev;
724 if (nf_bridge->mask & BRNF_PKT_TYPE) {
725 skb->pkt_type = PACKET_OTHERHOST;
726 nf_bridge->mask ^= BRNF_PKT_TYPE;
727 }
728 nf_bridge_update_protocol(skb);
729 } else {
730 in = *((struct net_device **)(skb->cb));
731 }
732 nf_bridge_push_encap_header(skb);
733
734 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in,
735 skb->dev, br_forward_finish, 1);
736 return 0;
737 }
738
739
740 /* This is the 'purely bridged' case. For IP, we pass the packet to
741 * netfilter with indev and outdev set to the bridge device,
742 * but we are still able to filter on the 'real' indev/outdev
743 * because of the physdev module. For ARP, indev and outdev are the
744 * bridge ports. */
745 static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
746 const struct net_device *in,
747 const struct net_device *out,
748 int (*okfn)(struct sk_buff *))
749 {
750 struct nf_bridge_info *nf_bridge;
751 struct net_device *parent;
752 u_int8_t pf;
753
754 if (!skb->nf_bridge)
755 return NF_ACCEPT;
756
757 /* Need exclusive nf_bridge_info since we might have multiple
758 * different physoutdevs. */
759 if (!nf_bridge_unshare(skb))
760 return NF_DROP;
761
762 parent = bridge_parent(out);
763 if (!parent)
764 return NF_DROP;
765
766 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
767 pf = PF_INET;
768 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
769 pf = PF_INET6;
770 else
771 return NF_ACCEPT;
772
773 nf_bridge_pull_encap_header(skb);
774
775 nf_bridge = skb->nf_bridge;
776 if (skb->pkt_type == PACKET_OTHERHOST) {
777 skb->pkt_type = PACKET_HOST;
778 nf_bridge->mask |= BRNF_PKT_TYPE;
779 }
780
781 if (pf == PF_INET && br_parse_ip_options(skb))
782 return NF_DROP;
783
784 /* The physdev module checks on this */
785 nf_bridge->mask |= BRNF_BRIDGED;
786 nf_bridge->physoutdev = skb->dev;
787 if (pf == PF_INET)
788 skb->protocol = htons(ETH_P_IP);
789 else
790 skb->protocol = htons(ETH_P_IPV6);
791
792 NF_HOOK(pf, NF_INET_FORWARD, skb, brnf_get_logical_dev(skb, in), parent,
793 br_nf_forward_finish);
794
795 return NF_STOLEN;
796 }
797
798 static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
799 const struct net_device *in,
800 const struct net_device *out,
801 int (*okfn)(struct sk_buff *))
802 {
803 struct net_bridge_port *p;
804 struct net_bridge *br;
805 struct net_device **d = (struct net_device **)(skb->cb);
806
807 p = br_port_get_rcu(out);
808 if (p == NULL)
809 return NF_ACCEPT;
810 br = p->br;
811
812 if (!brnf_call_arptables && !br->nf_call_arptables)
813 return NF_ACCEPT;
814
815 if (!IS_ARP(skb)) {
816 if (!IS_VLAN_ARP(skb))
817 return NF_ACCEPT;
818 nf_bridge_pull_encap_header(skb);
819 }
820
821 if (arp_hdr(skb)->ar_pln != 4) {
822 if (IS_VLAN_ARP(skb))
823 nf_bridge_push_encap_header(skb);
824 return NF_ACCEPT;
825 }
826 *d = (struct net_device *)in;
827 NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in,
828 (struct net_device *)out, br_nf_forward_finish);
829
830 return NF_STOLEN;
831 }
832
833 #if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4)
834 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
835 {
836 int ret;
837
838 if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
839 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
840 !skb_is_gso(skb)) {
841 if (br_parse_ip_options(skb))
842 /* Drop invalid packet */
843 return NF_DROP;
844 ret = ip_fragment(skb, br_dev_queue_push_xmit);
845 } else
846 ret = br_dev_queue_push_xmit(skb);
847
848 return ret;
849 }
850 #else
851 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
852 {
853 return br_dev_queue_push_xmit(skb);
854 }
855 #endif
856
857 /* PF_BRIDGE/POST_ROUTING ********************************************/
858 static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
859 const struct net_device *in,
860 const struct net_device *out,
861 int (*okfn)(struct sk_buff *))
862 {
863 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
864 struct net_device *realoutdev = bridge_parent(skb->dev);
865 u_int8_t pf;
866
867 if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED))
868 return NF_ACCEPT;
869
870 if (!realoutdev)
871 return NF_DROP;
872
873 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
874 pf = PF_INET;
875 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
876 pf = PF_INET6;
877 else
878 return NF_ACCEPT;
879
880 /* We assume any code from br_dev_queue_push_xmit onwards doesn't care
881 * about the value of skb->pkt_type. */
882 if (skb->pkt_type == PACKET_OTHERHOST) {
883 skb->pkt_type = PACKET_HOST;
884 nf_bridge->mask |= BRNF_PKT_TYPE;
885 }
886
887 nf_bridge_pull_encap_header(skb);
888 nf_bridge_save_header(skb);
889 if (pf == PF_INET)
890 skb->protocol = htons(ETH_P_IP);
891 else
892 skb->protocol = htons(ETH_P_IPV6);
893
894 NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev,
895 br_nf_dev_queue_xmit);
896
897 return NF_STOLEN;
898 }
899
900 /* IP/SABOTAGE *****************************************************/
901 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
902 * for the second time. */
903 static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb,
904 const struct net_device *in,
905 const struct net_device *out,
906 int (*okfn)(struct sk_buff *))
907 {
908 if (skb->nf_bridge &&
909 !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
910 return NF_STOP;
911 }
912
913 return NF_ACCEPT;
914 }
915
916 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
917 * br_dev_queue_push_xmit is called afterwards */
918 static struct nf_hook_ops br_nf_ops[] __read_mostly = {
919 {
920 .hook = br_nf_pre_routing,
921 .owner = THIS_MODULE,
922 .pf = PF_BRIDGE,
923 .hooknum = NF_BR_PRE_ROUTING,
924 .priority = NF_BR_PRI_BRNF,
925 },
926 {
927 .hook = br_nf_local_in,
928 .owner = THIS_MODULE,
929 .pf = PF_BRIDGE,
930 .hooknum = NF_BR_LOCAL_IN,
931 .priority = NF_BR_PRI_BRNF,
932 },
933 {
934 .hook = br_nf_forward_ip,
935 .owner = THIS_MODULE,
936 .pf = PF_BRIDGE,
937 .hooknum = NF_BR_FORWARD,
938 .priority = NF_BR_PRI_BRNF - 1,
939 },
940 {
941 .hook = br_nf_forward_arp,
942 .owner = THIS_MODULE,
943 .pf = PF_BRIDGE,
944 .hooknum = NF_BR_FORWARD,
945 .priority = NF_BR_PRI_BRNF,
946 },
947 {
948 .hook = br_nf_post_routing,
949 .owner = THIS_MODULE,
950 .pf = PF_BRIDGE,
951 .hooknum = NF_BR_POST_ROUTING,
952 .priority = NF_BR_PRI_LAST,
953 },
954 {
955 .hook = ip_sabotage_in,
956 .owner = THIS_MODULE,
957 .pf = PF_INET,
958 .hooknum = NF_INET_PRE_ROUTING,
959 .priority = NF_IP_PRI_FIRST,
960 },
961 {
962 .hook = ip_sabotage_in,
963 .owner = THIS_MODULE,
964 .pf = PF_INET6,
965 .hooknum = NF_INET_PRE_ROUTING,
966 .priority = NF_IP6_PRI_FIRST,
967 },
968 };
969
970 #ifdef CONFIG_SYSCTL
971 static
972 int brnf_sysctl_call_tables(ctl_table * ctl, int write,
973 void __user * buffer, size_t * lenp, loff_t * ppos)
974 {
975 int ret;
976
977 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
978
979 if (write && *(int *)(ctl->data))
980 *(int *)(ctl->data) = 1;
981 return ret;
982 }
983
984 static ctl_table brnf_table[] = {
985 {
986 .procname = "bridge-nf-call-arptables",
987 .data = &brnf_call_arptables,
988 .maxlen = sizeof(int),
989 .mode = 0644,
990 .proc_handler = brnf_sysctl_call_tables,
991 },
992 {
993 .procname = "bridge-nf-call-iptables",
994 .data = &brnf_call_iptables,
995 .maxlen = sizeof(int),
996 .mode = 0644,
997 .proc_handler = brnf_sysctl_call_tables,
998 },
999 {
1000 .procname = "bridge-nf-call-ip6tables",
1001 .data = &brnf_call_ip6tables,
1002 .maxlen = sizeof(int),
1003 .mode = 0644,
1004 .proc_handler = brnf_sysctl_call_tables,
1005 },
1006 {
1007 .procname = "bridge-nf-filter-vlan-tagged",
1008 .data = &brnf_filter_vlan_tagged,
1009 .maxlen = sizeof(int),
1010 .mode = 0644,
1011 .proc_handler = brnf_sysctl_call_tables,
1012 },
1013 {
1014 .procname = "bridge-nf-filter-pppoe-tagged",
1015 .data = &brnf_filter_pppoe_tagged,
1016 .maxlen = sizeof(int),
1017 .mode = 0644,
1018 .proc_handler = brnf_sysctl_call_tables,
1019 },
1020 {
1021 .procname = "bridge-nf-pass-vlan-input-dev",
1022 .data = &brnf_pass_vlan_indev,
1023 .maxlen = sizeof(int),
1024 .mode = 0644,
1025 .proc_handler = brnf_sysctl_call_tables,
1026 },
1027 { }
1028 };
1029 #endif
1030
1031 int __init br_netfilter_init(void)
1032 {
1033 int ret;
1034
1035 ret = dst_entries_init(&fake_dst_ops);
1036 if (ret < 0)
1037 return ret;
1038
1039 ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1040 if (ret < 0) {
1041 dst_entries_destroy(&fake_dst_ops);
1042 return ret;
1043 }
1044 #ifdef CONFIG_SYSCTL
1045 brnf_sysctl_header = register_net_sysctl(&init_net, "net/bridge", brnf_table);
1046 if (brnf_sysctl_header == NULL) {
1047 printk(KERN_WARNING
1048 "br_netfilter: can't register to sysctl.\n");
1049 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1050 dst_entries_destroy(&fake_dst_ops);
1051 return -ENOMEM;
1052 }
1053 #endif
1054 printk(KERN_NOTICE "Bridge firewalling registered\n");
1055 return 0;
1056 }
1057
1058 void br_netfilter_fini(void)
1059 {
1060 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1061 #ifdef CONFIG_SYSCTL
1062 unregister_net_sysctl_table(brnf_sysctl_header);
1063 #endif
1064 dst_entries_destroy(&fake_dst_ops);
1065 }
This page took 0.054583 seconds and 5 git commands to generate.