301f12b0a7cde4da84c728d19ce2f507e4700df4
[deliverable/linux.git] / net / bridge / br_netfilter.c
1 /*
2 * Handle firewalling
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 * Bart De Schuymer <bdschuym@pandora.be>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 * Lennert dedicates this file to Kerstin Wurdinger.
15 */
16
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/ip.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_pppox.h>
27 #include <linux/ppp_defs.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/netfilter_ipv4.h>
30 #include <linux/netfilter_ipv6.h>
31 #include <linux/netfilter_arp.h>
32 #include <linux/in_route.h>
33 #include <linux/inetdevice.h>
34
35 #include <net/ip.h>
36 #include <net/ipv6.h>
37 #include <net/route.h>
38 #include <net/netfilter/br_netfilter.h>
39
40 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
41 #include <net/netfilter/nf_conntrack.h>
42 #endif
43
44 #include <asm/uaccess.h>
45 #include "br_private.h"
46 #ifdef CONFIG_SYSCTL
47 #include <linux/sysctl.h>
48 #endif
49
50 #ifdef CONFIG_SYSCTL
51 static struct ctl_table_header *brnf_sysctl_header;
52 static int brnf_call_iptables __read_mostly = 1;
53 static int brnf_call_ip6tables __read_mostly = 1;
54 static int brnf_call_arptables __read_mostly = 1;
55 static int brnf_filter_vlan_tagged __read_mostly = 0;
56 static int brnf_filter_pppoe_tagged __read_mostly = 0;
57 static int brnf_pass_vlan_indev __read_mostly = 0;
58 #else
59 #define brnf_call_iptables 1
60 #define brnf_call_ip6tables 1
61 #define brnf_call_arptables 1
62 #define brnf_filter_vlan_tagged 0
63 #define brnf_filter_pppoe_tagged 0
64 #define brnf_pass_vlan_indev 0
65 #endif
66
67 #define IS_IP(skb) \
68 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
69
70 #define IS_IPV6(skb) \
71 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
72
73 #define IS_ARP(skb) \
74 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
75
76 static inline __be16 vlan_proto(const struct sk_buff *skb)
77 {
78 if (skb_vlan_tag_present(skb))
79 return skb->protocol;
80 else if (skb->protocol == htons(ETH_P_8021Q))
81 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
82 else
83 return 0;
84 }
85
86 #define IS_VLAN_IP(skb) \
87 (vlan_proto(skb) == htons(ETH_P_IP) && \
88 brnf_filter_vlan_tagged)
89
90 #define IS_VLAN_IPV6(skb) \
91 (vlan_proto(skb) == htons(ETH_P_IPV6) && \
92 brnf_filter_vlan_tagged)
93
94 #define IS_VLAN_ARP(skb) \
95 (vlan_proto(skb) == htons(ETH_P_ARP) && \
96 brnf_filter_vlan_tagged)
97
98 static inline __be16 pppoe_proto(const struct sk_buff *skb)
99 {
100 return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
101 sizeof(struct pppoe_hdr)));
102 }
103
104 #define IS_PPPOE_IP(skb) \
105 (skb->protocol == htons(ETH_P_PPP_SES) && \
106 pppoe_proto(skb) == htons(PPP_IP) && \
107 brnf_filter_pppoe_tagged)
108
109 #define IS_PPPOE_IPV6(skb) \
110 (skb->protocol == htons(ETH_P_PPP_SES) && \
111 pppoe_proto(skb) == htons(PPP_IPV6) && \
112 brnf_filter_pppoe_tagged)
113
114 /* largest possible L2 header, see br_nf_dev_queue_xmit() */
115 #define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
116
117 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
118 struct brnf_frag_data {
119 char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
120 u8 encap_size;
121 u8 size;
122 };
123
124 static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
125 #endif
126
127 static struct nf_bridge_info *nf_bridge_info_get(const struct sk_buff *skb)
128 {
129 return skb->nf_bridge;
130 }
131
132 static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
133 {
134 struct net_bridge_port *port;
135
136 port = br_port_get_rcu(dev);
137 return port ? &port->br->fake_rtable : NULL;
138 }
139
140 static inline struct net_device *bridge_parent(const struct net_device *dev)
141 {
142 struct net_bridge_port *port;
143
144 port = br_port_get_rcu(dev);
145 return port ? port->br->dev : NULL;
146 }
147
148 static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
149 {
150 skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
151 if (likely(skb->nf_bridge))
152 atomic_set(&(skb->nf_bridge->use), 1);
153
154 return skb->nf_bridge;
155 }
156
157 static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
158 {
159 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
160
161 if (atomic_read(&nf_bridge->use) > 1) {
162 struct nf_bridge_info *tmp = nf_bridge_alloc(skb);
163
164 if (tmp) {
165 memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
166 atomic_set(&tmp->use, 1);
167 }
168 nf_bridge_put(nf_bridge);
169 nf_bridge = tmp;
170 }
171 return nf_bridge;
172 }
173
174 static unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
175 {
176 switch (skb->protocol) {
177 case __cpu_to_be16(ETH_P_8021Q):
178 return VLAN_HLEN;
179 case __cpu_to_be16(ETH_P_PPP_SES):
180 return PPPOE_SES_HLEN;
181 default:
182 return 0;
183 }
184 }
185
186 static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
187 {
188 unsigned int len = nf_bridge_encap_header_len(skb);
189
190 skb_push(skb, len);
191 skb->network_header -= len;
192 }
193
194 static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
195 {
196 unsigned int len = nf_bridge_encap_header_len(skb);
197
198 skb_pull(skb, len);
199 skb->network_header += len;
200 }
201
202 static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
203 {
204 unsigned int len = nf_bridge_encap_header_len(skb);
205
206 skb_pull_rcsum(skb, len);
207 skb->network_header += len;
208 }
209
210 /* When handing a packet over to the IP layer
211 * check whether we have a skb that is in the
212 * expected format
213 */
214
215 static int br_parse_ip_options(struct sk_buff *skb)
216 {
217 const struct iphdr *iph;
218 struct net_device *dev = skb->dev;
219 u32 len;
220
221 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
222 goto inhdr_error;
223
224 iph = ip_hdr(skb);
225
226 /* Basic sanity checks */
227 if (iph->ihl < 5 || iph->version != 4)
228 goto inhdr_error;
229
230 if (!pskb_may_pull(skb, iph->ihl*4))
231 goto inhdr_error;
232
233 iph = ip_hdr(skb);
234 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
235 goto inhdr_error;
236
237 len = ntohs(iph->tot_len);
238 if (skb->len < len) {
239 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
240 goto drop;
241 } else if (len < (iph->ihl*4))
242 goto inhdr_error;
243
244 if (pskb_trim_rcsum(skb, len)) {
245 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
246 goto drop;
247 }
248
249 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
250 /* We should really parse IP options here but until
251 * somebody who actually uses IP options complains to
252 * us we'll just silently ignore the options because
253 * we're lazy!
254 */
255 return 0;
256
257 inhdr_error:
258 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
259 drop:
260 return -1;
261 }
262
263 static void nf_bridge_update_protocol(struct sk_buff *skb)
264 {
265 if (skb->nf_bridge->mask & BRNF_8021Q)
266 skb->protocol = htons(ETH_P_8021Q);
267 else if (skb->nf_bridge->mask & BRNF_PPPoE)
268 skb->protocol = htons(ETH_P_PPP_SES);
269 }
270
271 /* PF_BRIDGE/PRE_ROUTING *********************************************/
272 /* Undo the changes made for ip6tables PREROUTING and continue the
273 * bridge PRE_ROUTING hook. */
274 static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
275 {
276 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
277 struct rtable *rt;
278
279 if (nf_bridge->mask & BRNF_PKT_TYPE) {
280 skb->pkt_type = PACKET_OTHERHOST;
281 nf_bridge->mask ^= BRNF_PKT_TYPE;
282 }
283 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
284
285 rt = bridge_parent_rtable(nf_bridge->physindev);
286 if (!rt) {
287 kfree_skb(skb);
288 return 0;
289 }
290 skb_dst_set_noref(skb, &rt->dst);
291
292 skb->dev = nf_bridge->physindev;
293 nf_bridge_update_protocol(skb);
294 nf_bridge_push_encap_header(skb);
295 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
296 br_handle_frame_finish, 1);
297
298 return 0;
299 }
300
301 /* Obtain the correct destination MAC address, while preserving the original
302 * source MAC address. If we already know this address, we just copy it. If we
303 * don't, we use the neighbour framework to find out. In both cases, we make
304 * sure that br_handle_frame_finish() is called afterwards.
305 */
306 static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
307 {
308 struct neighbour *neigh;
309 struct dst_entry *dst;
310
311 skb->dev = bridge_parent(skb->dev);
312 if (!skb->dev)
313 goto free_skb;
314 dst = skb_dst(skb);
315 neigh = dst_neigh_lookup_skb(dst, skb);
316 if (neigh) {
317 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
318 int ret;
319
320 if (neigh->hh.hh_len) {
321 neigh_hh_bridge(&neigh->hh, skb);
322 skb->dev = nf_bridge->physindev;
323 ret = br_handle_frame_finish(skb);
324 } else {
325 /* the neighbour function below overwrites the complete
326 * MAC header, so we save the Ethernet source address and
327 * protocol number.
328 */
329 skb_copy_from_linear_data_offset(skb,
330 -(ETH_HLEN-ETH_ALEN),
331 nf_bridge->neigh_header,
332 ETH_HLEN-ETH_ALEN);
333 /* tell br_dev_xmit to continue with forwarding */
334 nf_bridge->mask |= BRNF_BRIDGED_DNAT;
335 /* FIXME Need to refragment */
336 ret = neigh->output(neigh, skb);
337 }
338 neigh_release(neigh);
339 return ret;
340 }
341 free_skb:
342 kfree_skb(skb);
343 return 0;
344 }
345
346 static bool dnat_took_place(const struct sk_buff *skb)
347 {
348 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
349 enum ip_conntrack_info ctinfo;
350 struct nf_conn *ct;
351
352 ct = nf_ct_get(skb, &ctinfo);
353 if (!ct || nf_ct_is_untracked(ct))
354 return false;
355
356 return test_bit(IPS_DST_NAT_BIT, &ct->status);
357 #else
358 return false;
359 #endif
360 }
361
362 /* This requires some explaining. If DNAT has taken place,
363 * we will need to fix up the destination Ethernet address.
364 *
365 * There are two cases to consider:
366 * 1. The packet was DNAT'ed to a device in the same bridge
367 * port group as it was received on. We can still bridge
368 * the packet.
369 * 2. The packet was DNAT'ed to a different device, either
370 * a non-bridged device or another bridge port group.
371 * The packet will need to be routed.
372 *
373 * The correct way of distinguishing between these two cases is to
374 * call ip_route_input() and to look at skb->dst->dev, which is
375 * changed to the destination device if ip_route_input() succeeds.
376 *
377 * Let's first consider the case that ip_route_input() succeeds:
378 *
379 * If the output device equals the logical bridge device the packet
380 * came in on, we can consider this bridging. The corresponding MAC
381 * address will be obtained in br_nf_pre_routing_finish_bridge.
382 * Otherwise, the packet is considered to be routed and we just
383 * change the destination MAC address so that the packet will
384 * later be passed up to the IP stack to be routed. For a redirected
385 * packet, ip_route_input() will give back the localhost as output device,
386 * which differs from the bridge device.
387 *
388 * Let's now consider the case that ip_route_input() fails:
389 *
390 * This can be because the destination address is martian, in which case
391 * the packet will be dropped.
392 * If IP forwarding is disabled, ip_route_input() will fail, while
393 * ip_route_output_key() can return success. The source
394 * address for ip_route_output_key() is set to zero, so ip_route_output_key()
395 * thinks we're handling a locally generated packet and won't care
396 * if IP forwarding is enabled. If the output device equals the logical bridge
397 * device, we proceed as if ip_route_input() succeeded. If it differs from the
398 * logical bridge port or if ip_route_output_key() fails we drop the packet.
399 */
400 static int br_nf_pre_routing_finish(struct sk_buff *skb)
401 {
402 struct net_device *dev = skb->dev;
403 struct iphdr *iph = ip_hdr(skb);
404 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
405 struct rtable *rt;
406 int err;
407 int frag_max_size;
408
409 frag_max_size = IPCB(skb)->frag_max_size;
410 BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
411
412 if (nf_bridge->mask & BRNF_PKT_TYPE) {
413 skb->pkt_type = PACKET_OTHERHOST;
414 nf_bridge->mask ^= BRNF_PKT_TYPE;
415 }
416 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
417 if (dnat_took_place(skb)) {
418 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
419 struct in_device *in_dev = __in_dev_get_rcu(dev);
420
421 /* If err equals -EHOSTUNREACH the error is due to a
422 * martian destination or due to the fact that
423 * forwarding is disabled. For most martian packets,
424 * ip_route_output_key() will fail. It won't fail for 2 types of
425 * martian destinations: loopback destinations and destination
426 * 0.0.0.0. In both cases the packet will be dropped because the
427 * destination is the loopback device and not the bridge. */
428 if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
429 goto free_skb;
430
431 rt = ip_route_output(dev_net(dev), iph->daddr, 0,
432 RT_TOS(iph->tos), 0);
433 if (!IS_ERR(rt)) {
434 /* - Bridged-and-DNAT'ed traffic doesn't
435 * require ip_forwarding. */
436 if (rt->dst.dev == dev) {
437 skb_dst_set(skb, &rt->dst);
438 goto bridged_dnat;
439 }
440 ip_rt_put(rt);
441 }
442 free_skb:
443 kfree_skb(skb);
444 return 0;
445 } else {
446 if (skb_dst(skb)->dev == dev) {
447 bridged_dnat:
448 skb->dev = nf_bridge->physindev;
449 nf_bridge_update_protocol(skb);
450 nf_bridge_push_encap_header(skb);
451 NF_HOOK_THRESH(NFPROTO_BRIDGE,
452 NF_BR_PRE_ROUTING,
453 skb, skb->dev, NULL,
454 br_nf_pre_routing_finish_bridge,
455 1);
456 return 0;
457 }
458 ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
459 skb->pkt_type = PACKET_HOST;
460 }
461 } else {
462 rt = bridge_parent_rtable(nf_bridge->physindev);
463 if (!rt) {
464 kfree_skb(skb);
465 return 0;
466 }
467 skb_dst_set_noref(skb, &rt->dst);
468 }
469
470 skb->dev = nf_bridge->physindev;
471 nf_bridge_update_protocol(skb);
472 nf_bridge_push_encap_header(skb);
473 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
474 br_handle_frame_finish, 1);
475
476 return 0;
477 }
478
479 static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct net_device *dev)
480 {
481 struct net_device *vlan, *br;
482
483 br = bridge_parent(dev);
484 if (brnf_pass_vlan_indev == 0 || !skb_vlan_tag_present(skb))
485 return br;
486
487 vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
488 skb_vlan_tag_get(skb) & VLAN_VID_MASK);
489
490 return vlan ? vlan : br;
491 }
492
493 /* Some common code for IPv4/IPv6 */
494 static struct net_device *setup_pre_routing(struct sk_buff *skb)
495 {
496 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
497
498 if (skb->pkt_type == PACKET_OTHERHOST) {
499 skb->pkt_type = PACKET_HOST;
500 nf_bridge->mask |= BRNF_PKT_TYPE;
501 }
502
503 nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
504 nf_bridge->physindev = skb->dev;
505 skb->dev = brnf_get_logical_dev(skb, skb->dev);
506 if (skb->protocol == htons(ETH_P_8021Q))
507 nf_bridge->mask |= BRNF_8021Q;
508 else if (skb->protocol == htons(ETH_P_PPP_SES))
509 nf_bridge->mask |= BRNF_PPPoE;
510
511 /* Must drop socket now because of tproxy. */
512 skb_orphan(skb);
513 return skb->dev;
514 }
515
516 /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
517 static int check_hbh_len(struct sk_buff *skb)
518 {
519 unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1);
520 u32 pkt_len;
521 const unsigned char *nh = skb_network_header(skb);
522 int off = raw - nh;
523 int len = (raw[1] + 1) << 3;
524
525 if ((raw + len) - skb->data > skb_headlen(skb))
526 goto bad;
527
528 off += 2;
529 len -= 2;
530
531 while (len > 0) {
532 int optlen = nh[off + 1] + 2;
533
534 switch (nh[off]) {
535 case IPV6_TLV_PAD1:
536 optlen = 1;
537 break;
538
539 case IPV6_TLV_PADN:
540 break;
541
542 case IPV6_TLV_JUMBO:
543 if (nh[off + 1] != 4 || (off & 3) != 2)
544 goto bad;
545 pkt_len = ntohl(*(__be32 *) (nh + off + 2));
546 if (pkt_len <= IPV6_MAXPLEN ||
547 ipv6_hdr(skb)->payload_len)
548 goto bad;
549 if (pkt_len > skb->len - sizeof(struct ipv6hdr))
550 goto bad;
551 if (pskb_trim_rcsum(skb,
552 pkt_len + sizeof(struct ipv6hdr)))
553 goto bad;
554 nh = skb_network_header(skb);
555 break;
556 default:
557 if (optlen > len)
558 goto bad;
559 break;
560 }
561 off += optlen;
562 len -= optlen;
563 }
564 if (len == 0)
565 return 0;
566 bad:
567 return -1;
568
569 }
570
571 /* Replicate the checks that IPv6 does on packet reception and pass the packet
572 * to ip6tables, which doesn't support NAT, so things are fairly simple. */
573 static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
574 struct sk_buff *skb,
575 const struct net_device *in,
576 const struct net_device *out,
577 int (*okfn)(struct sk_buff *))
578 {
579 const struct ipv6hdr *hdr;
580 u32 pkt_len;
581
582 if (skb->len < sizeof(struct ipv6hdr))
583 return NF_DROP;
584
585 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
586 return NF_DROP;
587
588 hdr = ipv6_hdr(skb);
589
590 if (hdr->version != 6)
591 return NF_DROP;
592
593 pkt_len = ntohs(hdr->payload_len);
594
595 if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
596 if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
597 return NF_DROP;
598 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
599 return NF_DROP;
600 }
601 if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb))
602 return NF_DROP;
603
604 nf_bridge_put(skb->nf_bridge);
605 if (!nf_bridge_alloc(skb))
606 return NF_DROP;
607 if (!setup_pre_routing(skb))
608 return NF_DROP;
609
610 skb->protocol = htons(ETH_P_IPV6);
611 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
612 br_nf_pre_routing_finish_ipv6);
613
614 return NF_STOLEN;
615 }
616
617 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
618 * Replicate the checks that IPv4 does on packet reception.
619 * Set skb->dev to the bridge device (i.e. parent of the
620 * receiving device) to make netfilter happy, the REDIRECT
621 * target in particular. Save the original destination IP
622 * address to be able to detect DNAT afterwards. */
623 static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
624 struct sk_buff *skb,
625 const struct net_device *in,
626 const struct net_device *out,
627 int (*okfn)(struct sk_buff *))
628 {
629 struct net_bridge_port *p;
630 struct net_bridge *br;
631 __u32 len = nf_bridge_encap_header_len(skb);
632
633 if (unlikely(!pskb_may_pull(skb, len)))
634 return NF_DROP;
635
636 p = br_port_get_rcu(in);
637 if (p == NULL)
638 return NF_DROP;
639 br = p->br;
640
641 if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) {
642 if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
643 return NF_ACCEPT;
644
645 nf_bridge_pull_encap_header_rcsum(skb);
646 return br_nf_pre_routing_ipv6(ops, skb, in, out, okfn);
647 }
648
649 if (!brnf_call_iptables && !br->nf_call_iptables)
650 return NF_ACCEPT;
651
652 if (!IS_IP(skb) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb))
653 return NF_ACCEPT;
654
655 nf_bridge_pull_encap_header_rcsum(skb);
656
657 if (br_parse_ip_options(skb))
658 return NF_DROP;
659
660 nf_bridge_put(skb->nf_bridge);
661 if (!nf_bridge_alloc(skb))
662 return NF_DROP;
663 if (!setup_pre_routing(skb))
664 return NF_DROP;
665
666 skb->protocol = htons(ETH_P_IP);
667
668 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
669 br_nf_pre_routing_finish);
670
671 return NF_STOLEN;
672 }
673
674
675 /* PF_BRIDGE/LOCAL_IN ************************************************/
676 /* The packet is locally destined, which requires a real
677 * dst_entry, so detach the fake one. On the way up, the
678 * packet would pass through PRE_ROUTING again (which already
679 * took place when the packet entered the bridge), but we
680 * register an IPv4 PRE_ROUTING 'sabotage' hook that will
681 * prevent this from happening. */
682 static unsigned int br_nf_local_in(const struct nf_hook_ops *ops,
683 struct sk_buff *skb,
684 const struct net_device *in,
685 const struct net_device *out,
686 int (*okfn)(struct sk_buff *))
687 {
688 br_drop_fake_rtable(skb);
689 return NF_ACCEPT;
690 }
691
692 /* PF_BRIDGE/FORWARD *************************************************/
693 static int br_nf_forward_finish(struct sk_buff *skb)
694 {
695 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
696 struct net_device *in;
697
698 if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
699 int frag_max_size;
700
701 if (skb->protocol == htons(ETH_P_IP)) {
702 frag_max_size = IPCB(skb)->frag_max_size;
703 BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
704 }
705
706 in = nf_bridge->physindev;
707 if (nf_bridge->mask & BRNF_PKT_TYPE) {
708 skb->pkt_type = PACKET_OTHERHOST;
709 nf_bridge->mask ^= BRNF_PKT_TYPE;
710 }
711 nf_bridge_update_protocol(skb);
712 } else {
713 in = *((struct net_device **)(skb->cb));
714 }
715 nf_bridge_push_encap_header(skb);
716
717 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in,
718 skb->dev, br_forward_finish, 1);
719 return 0;
720 }
721
722
723 /* This is the 'purely bridged' case. For IP, we pass the packet to
724 * netfilter with indev and outdev set to the bridge device,
725 * but we are still able to filter on the 'real' indev/outdev
726 * because of the physdev module. For ARP, indev and outdev are the
727 * bridge ports. */
728 static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
729 struct sk_buff *skb,
730 const struct net_device *in,
731 const struct net_device *out,
732 int (*okfn)(struct sk_buff *))
733 {
734 struct nf_bridge_info *nf_bridge;
735 struct net_device *parent;
736 u_int8_t pf;
737
738 if (!skb->nf_bridge)
739 return NF_ACCEPT;
740
741 /* Need exclusive nf_bridge_info since we might have multiple
742 * different physoutdevs. */
743 if (!nf_bridge_unshare(skb))
744 return NF_DROP;
745
746 nf_bridge = nf_bridge_info_get(skb);
747 if (!nf_bridge)
748 return NF_DROP;
749
750 parent = bridge_parent(out);
751 if (!parent)
752 return NF_DROP;
753
754 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
755 pf = NFPROTO_IPV4;
756 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
757 pf = NFPROTO_IPV6;
758 else
759 return NF_ACCEPT;
760
761 nf_bridge_pull_encap_header(skb);
762
763 if (skb->pkt_type == PACKET_OTHERHOST) {
764 skb->pkt_type = PACKET_HOST;
765 nf_bridge->mask |= BRNF_PKT_TYPE;
766 }
767
768 if (pf == NFPROTO_IPV4) {
769 int frag_max = BR_INPUT_SKB_CB(skb)->frag_max_size;
770
771 if (br_parse_ip_options(skb))
772 return NF_DROP;
773
774 IPCB(skb)->frag_max_size = frag_max;
775 }
776
777 nf_bridge->physoutdev = skb->dev;
778 if (pf == NFPROTO_IPV4)
779 skb->protocol = htons(ETH_P_IP);
780 else
781 skb->protocol = htons(ETH_P_IPV6);
782
783 NF_HOOK(pf, NF_INET_FORWARD, skb, brnf_get_logical_dev(skb, in), parent,
784 br_nf_forward_finish);
785
786 return NF_STOLEN;
787 }
788
789 static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
790 struct sk_buff *skb,
791 const struct net_device *in,
792 const struct net_device *out,
793 int (*okfn)(struct sk_buff *))
794 {
795 struct net_bridge_port *p;
796 struct net_bridge *br;
797 struct net_device **d = (struct net_device **)(skb->cb);
798
799 p = br_port_get_rcu(out);
800 if (p == NULL)
801 return NF_ACCEPT;
802 br = p->br;
803
804 if (!brnf_call_arptables && !br->nf_call_arptables)
805 return NF_ACCEPT;
806
807 if (!IS_ARP(skb)) {
808 if (!IS_VLAN_ARP(skb))
809 return NF_ACCEPT;
810 nf_bridge_pull_encap_header(skb);
811 }
812
813 if (arp_hdr(skb)->ar_pln != 4) {
814 if (IS_VLAN_ARP(skb))
815 nf_bridge_push_encap_header(skb);
816 return NF_ACCEPT;
817 }
818 *d = (struct net_device *)in;
819 NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in,
820 (struct net_device *)out, br_nf_forward_finish);
821
822 return NF_STOLEN;
823 }
824
825 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
826 static int br_nf_push_frag_xmit(struct sk_buff *skb)
827 {
828 struct brnf_frag_data *data;
829 int err;
830
831 data = this_cpu_ptr(&brnf_frag_data_storage);
832 err = skb_cow_head(skb, data->size);
833
834 if (err) {
835 kfree_skb(skb);
836 return 0;
837 }
838
839 skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
840 __skb_push(skb, data->encap_size);
841
842 return br_dev_queue_push_xmit(skb);
843 }
844
845 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
846 {
847 int ret;
848 int frag_max_size;
849 unsigned int mtu_reserved;
850
851 if (skb_is_gso(skb) || skb->protocol != htons(ETH_P_IP))
852 return br_dev_queue_push_xmit(skb);
853
854 mtu_reserved = nf_bridge_mtu_reduction(skb);
855 /* This is wrong! We should preserve the original fragment
856 * boundaries by preserving frag_list rather than refragmenting.
857 */
858 if (skb->len + mtu_reserved > skb->dev->mtu) {
859 struct brnf_frag_data *data;
860
861 frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
862 if (br_parse_ip_options(skb))
863 /* Drop invalid packet */
864 return NF_DROP;
865 IPCB(skb)->frag_max_size = frag_max_size;
866
867 nf_bridge_update_protocol(skb);
868
869 data = this_cpu_ptr(&brnf_frag_data_storage);
870 data->encap_size = nf_bridge_encap_header_len(skb);
871 data->size = ETH_HLEN + data->encap_size;
872
873 skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
874 data->size);
875
876 ret = ip_fragment(skb, br_nf_push_frag_xmit);
877 } else {
878 ret = br_dev_queue_push_xmit(skb);
879 }
880
881 return ret;
882 }
883 #else
884 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
885 {
886 return br_dev_queue_push_xmit(skb);
887 }
888 #endif
889
890 /* PF_BRIDGE/POST_ROUTING ********************************************/
891 static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
892 struct sk_buff *skb,
893 const struct net_device *in,
894 const struct net_device *out,
895 int (*okfn)(struct sk_buff *))
896 {
897 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
898 struct net_device *realoutdev = bridge_parent(skb->dev);
899 u_int8_t pf;
900
901 /* if nf_bridge is set, but ->physoutdev is NULL, this packet came in
902 * on a bridge, but was delivered locally and is now being routed:
903 *
904 * POST_ROUTING was already invoked from the ip stack.
905 */
906 if (!nf_bridge || !nf_bridge->physoutdev)
907 return NF_ACCEPT;
908
909 if (!realoutdev)
910 return NF_DROP;
911
912 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
913 pf = NFPROTO_IPV4;
914 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
915 pf = NFPROTO_IPV6;
916 else
917 return NF_ACCEPT;
918
919 /* We assume any code from br_dev_queue_push_xmit onwards doesn't care
920 * about the value of skb->pkt_type. */
921 if (skb->pkt_type == PACKET_OTHERHOST) {
922 skb->pkt_type = PACKET_HOST;
923 nf_bridge->mask |= BRNF_PKT_TYPE;
924 }
925
926 nf_bridge_pull_encap_header(skb);
927 if (pf == NFPROTO_IPV4)
928 skb->protocol = htons(ETH_P_IP);
929 else
930 skb->protocol = htons(ETH_P_IPV6);
931
932 NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev,
933 br_nf_dev_queue_xmit);
934
935 return NF_STOLEN;
936 }
937
938 /* IP/SABOTAGE *****************************************************/
939 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
940 * for the second time. */
941 static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
942 struct sk_buff *skb,
943 const struct net_device *in,
944 const struct net_device *out,
945 int (*okfn)(struct sk_buff *))
946 {
947 if (skb->nf_bridge &&
948 !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
949 return NF_STOP;
950 }
951
952 return NF_ACCEPT;
953 }
954
955 /* This is called when br_netfilter has called into iptables/netfilter,
956 * and DNAT has taken place on a bridge-forwarded packet.
957 *
958 * neigh->output has created a new MAC header, with local br0 MAC
959 * as saddr.
960 *
961 * This restores the original MAC saddr of the bridged packet
962 * before invoking bridge forward logic to transmit the packet.
963 */
964 static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
965 {
966 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
967
968 skb_pull(skb, ETH_HLEN);
969 nf_bridge->mask &= ~BRNF_BRIDGED_DNAT;
970
971 BUILD_BUG_ON(sizeof(nf_bridge->neigh_header) != (ETH_HLEN - ETH_ALEN));
972
973 skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN),
974 nf_bridge->neigh_header,
975 ETH_HLEN - ETH_ALEN);
976 skb->dev = nf_bridge->physindev;
977 br_handle_frame_finish(skb);
978 }
979
980 static int br_nf_dev_xmit(struct sk_buff *skb)
981 {
982 if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
983 br_nf_pre_routing_finish_bridge_slow(skb);
984 return 1;
985 }
986 return 0;
987 }
988
989 static const struct nf_br_ops br_ops = {
990 .br_dev_xmit_hook = br_nf_dev_xmit,
991 };
992
993 void br_netfilter_enable(void)
994 {
995 }
996 EXPORT_SYMBOL_GPL(br_netfilter_enable);
997
998 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
999 * br_dev_queue_push_xmit is called afterwards */
1000 static struct nf_hook_ops br_nf_ops[] __read_mostly = {
1001 {
1002 .hook = br_nf_pre_routing,
1003 .owner = THIS_MODULE,
1004 .pf = NFPROTO_BRIDGE,
1005 .hooknum = NF_BR_PRE_ROUTING,
1006 .priority = NF_BR_PRI_BRNF,
1007 },
1008 {
1009 .hook = br_nf_local_in,
1010 .owner = THIS_MODULE,
1011 .pf = NFPROTO_BRIDGE,
1012 .hooknum = NF_BR_LOCAL_IN,
1013 .priority = NF_BR_PRI_BRNF,
1014 },
1015 {
1016 .hook = br_nf_forward_ip,
1017 .owner = THIS_MODULE,
1018 .pf = NFPROTO_BRIDGE,
1019 .hooknum = NF_BR_FORWARD,
1020 .priority = NF_BR_PRI_BRNF - 1,
1021 },
1022 {
1023 .hook = br_nf_forward_arp,
1024 .owner = THIS_MODULE,
1025 .pf = NFPROTO_BRIDGE,
1026 .hooknum = NF_BR_FORWARD,
1027 .priority = NF_BR_PRI_BRNF,
1028 },
1029 {
1030 .hook = br_nf_post_routing,
1031 .owner = THIS_MODULE,
1032 .pf = NFPROTO_BRIDGE,
1033 .hooknum = NF_BR_POST_ROUTING,
1034 .priority = NF_BR_PRI_LAST,
1035 },
1036 {
1037 .hook = ip_sabotage_in,
1038 .owner = THIS_MODULE,
1039 .pf = NFPROTO_IPV4,
1040 .hooknum = NF_INET_PRE_ROUTING,
1041 .priority = NF_IP_PRI_FIRST,
1042 },
1043 {
1044 .hook = ip_sabotage_in,
1045 .owner = THIS_MODULE,
1046 .pf = NFPROTO_IPV6,
1047 .hooknum = NF_INET_PRE_ROUTING,
1048 .priority = NF_IP6_PRI_FIRST,
1049 },
1050 };
1051
1052 #ifdef CONFIG_SYSCTL
1053 static
1054 int brnf_sysctl_call_tables(struct ctl_table *ctl, int write,
1055 void __user *buffer, size_t *lenp, loff_t *ppos)
1056 {
1057 int ret;
1058
1059 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1060
1061 if (write && *(int *)(ctl->data))
1062 *(int *)(ctl->data) = 1;
1063 return ret;
1064 }
1065
1066 static struct ctl_table brnf_table[] = {
1067 {
1068 .procname = "bridge-nf-call-arptables",
1069 .data = &brnf_call_arptables,
1070 .maxlen = sizeof(int),
1071 .mode = 0644,
1072 .proc_handler = brnf_sysctl_call_tables,
1073 },
1074 {
1075 .procname = "bridge-nf-call-iptables",
1076 .data = &brnf_call_iptables,
1077 .maxlen = sizeof(int),
1078 .mode = 0644,
1079 .proc_handler = brnf_sysctl_call_tables,
1080 },
1081 {
1082 .procname = "bridge-nf-call-ip6tables",
1083 .data = &brnf_call_ip6tables,
1084 .maxlen = sizeof(int),
1085 .mode = 0644,
1086 .proc_handler = brnf_sysctl_call_tables,
1087 },
1088 {
1089 .procname = "bridge-nf-filter-vlan-tagged",
1090 .data = &brnf_filter_vlan_tagged,
1091 .maxlen = sizeof(int),
1092 .mode = 0644,
1093 .proc_handler = brnf_sysctl_call_tables,
1094 },
1095 {
1096 .procname = "bridge-nf-filter-pppoe-tagged",
1097 .data = &brnf_filter_pppoe_tagged,
1098 .maxlen = sizeof(int),
1099 .mode = 0644,
1100 .proc_handler = brnf_sysctl_call_tables,
1101 },
1102 {
1103 .procname = "bridge-nf-pass-vlan-input-dev",
1104 .data = &brnf_pass_vlan_indev,
1105 .maxlen = sizeof(int),
1106 .mode = 0644,
1107 .proc_handler = brnf_sysctl_call_tables,
1108 },
1109 { }
1110 };
1111 #endif
1112
1113 static int __init br_netfilter_init(void)
1114 {
1115 int ret;
1116
1117 ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1118 if (ret < 0)
1119 return ret;
1120
1121 #ifdef CONFIG_SYSCTL
1122 brnf_sysctl_header = register_net_sysctl(&init_net, "net/bridge", brnf_table);
1123 if (brnf_sysctl_header == NULL) {
1124 printk(KERN_WARNING
1125 "br_netfilter: can't register to sysctl.\n");
1126 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1127 return -ENOMEM;
1128 }
1129 #endif
1130 RCU_INIT_POINTER(nf_br_ops, &br_ops);
1131 printk(KERN_NOTICE "Bridge firewalling registered\n");
1132 return 0;
1133 }
1134
1135 static void __exit br_netfilter_fini(void)
1136 {
1137 RCU_INIT_POINTER(nf_br_ops, NULL);
1138 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1139 #ifdef CONFIG_SYSCTL
1140 unregister_net_sysctl_table(brnf_sysctl_header);
1141 #endif
1142 }
1143
1144 module_init(br_netfilter_init);
1145 module_exit(br_netfilter_fini);
1146
1147 MODULE_LICENSE("GPL");
1148 MODULE_AUTHOR("Lennert Buytenhek <buytenh@gnu.org>");
1149 MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
1150 MODULE_DESCRIPTION("Linux ethernet netfilter firewall bridge");
This page took 0.053721 seconds and 4 git commands to generate.