2 * Copyright (c) 2014 Pablo Neira Ayuso <pablo@netfilter.org>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/netlink.h>
13 #include <linux/netfilter.h>
14 #include <linux/netfilter/nf_tables.h>
15 #include <net/netfilter/nf_tables.h>
16 #include <net/netfilter/nft_reject.h>
17 #include <net/netfilter/nf_tables_bridge.h>
18 #include <net/netfilter/ipv4/nf_reject.h>
19 #include <net/netfilter/ipv6/nf_reject.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/netfilter_bridge.h>
24 #include <linux/netfilter_ipv6.h>
25 #include "../br_private.h"
27 static void nft_reject_br_push_etherhdr(struct sk_buff
*oldskb
,
32 eth
= (struct ethhdr
*)skb_push(nskb
, ETH_HLEN
);
33 skb_reset_mac_header(nskb
);
34 ether_addr_copy(eth
->h_source
, eth_hdr(oldskb
)->h_dest
);
35 ether_addr_copy(eth
->h_dest
, eth_hdr(oldskb
)->h_source
);
36 eth
->h_proto
= eth_hdr(oldskb
)->h_proto
;
37 skb_pull(nskb
, ETH_HLEN
);
40 /* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
41 * or the bridge port (NF_BRIDGE PREROUTING).
43 static void nft_reject_br_send_v4_tcp_reset(struct net
*net
,
44 struct sk_buff
*oldskb
,
45 const struct net_device
*dev
,
50 const struct tcphdr
*oth
;
53 if (!nft_bridge_iphdr_validate(oldskb
))
56 oth
= nf_reject_ip_tcphdr_get(oldskb
, &_oth
, hook
);
60 nskb
= alloc_skb(sizeof(struct iphdr
) + sizeof(struct tcphdr
) +
61 LL_MAX_HEADER
, GFP_ATOMIC
);
65 skb_reserve(nskb
, LL_MAX_HEADER
);
66 niph
= nf_reject_iphdr_put(nskb
, oldskb
, IPPROTO_TCP
,
67 net
->ipv4
.sysctl_ip_default_ttl
);
68 nf_reject_ip_tcphdr_put(nskb
, oldskb
, oth
);
69 niph
->ttl
= net
->ipv4
.sysctl_ip_default_ttl
;
70 niph
->tot_len
= htons(nskb
->len
);
73 nft_reject_br_push_etherhdr(oldskb
, nskb
);
75 br_forward(br_port_get_rcu(dev
), nskb
, false, true);
78 static void nft_reject_br_send_v4_unreach(struct net
*net
,
79 struct sk_buff
*oldskb
,
80 const struct net_device
*dev
,
85 struct icmphdr
*icmph
;
91 if (oldskb
->csum_bad
|| !nft_bridge_iphdr_validate(oldskb
))
94 /* IP header checks: fragment. */
95 if (ip_hdr(oldskb
)->frag_off
& htons(IP_OFFSET
))
98 /* RFC says return as much as we can without exceeding 576 bytes. */
99 len
= min_t(unsigned int, 536, oldskb
->len
);
101 if (!pskb_may_pull(oldskb
, len
))
104 if (pskb_trim_rcsum(oldskb
, ntohs(ip_hdr(oldskb
)->tot_len
)))
107 if (ip_hdr(oldskb
)->protocol
== IPPROTO_TCP
||
108 ip_hdr(oldskb
)->protocol
== IPPROTO_UDP
)
109 proto
= ip_hdr(oldskb
)->protocol
;
113 if (!skb_csum_unnecessary(oldskb
) &&
114 nf_ip_checksum(oldskb
, hook
, ip_hdrlen(oldskb
), proto
))
117 nskb
= alloc_skb(sizeof(struct iphdr
) + sizeof(struct icmphdr
) +
118 LL_MAX_HEADER
+ len
, GFP_ATOMIC
);
122 skb_reserve(nskb
, LL_MAX_HEADER
);
123 niph
= nf_reject_iphdr_put(nskb
, oldskb
, IPPROTO_ICMP
,
124 net
->ipv4
.sysctl_ip_default_ttl
);
126 skb_reset_transport_header(nskb
);
127 icmph
= (struct icmphdr
*)skb_put(nskb
, sizeof(struct icmphdr
));
128 memset(icmph
, 0, sizeof(*icmph
));
129 icmph
->type
= ICMP_DEST_UNREACH
;
132 payload
= skb_put(nskb
, len
);
133 memcpy(payload
, skb_network_header(oldskb
), len
);
135 csum
= csum_partial((void *)icmph
, len
+ sizeof(struct icmphdr
), 0);
136 icmph
->checksum
= csum_fold(csum
);
138 niph
->tot_len
= htons(nskb
->len
);
141 nft_reject_br_push_etherhdr(oldskb
, nskb
);
143 br_forward(br_port_get_rcu(dev
), nskb
, false, true);
146 static void nft_reject_br_send_v6_tcp_reset(struct net
*net
,
147 struct sk_buff
*oldskb
,
148 const struct net_device
*dev
,
151 struct sk_buff
*nskb
;
152 const struct tcphdr
*oth
;
154 unsigned int otcplen
;
155 struct ipv6hdr
*nip6h
;
157 if (!nft_bridge_ip6hdr_validate(oldskb
))
160 oth
= nf_reject_ip6_tcphdr_get(oldskb
, &_oth
, &otcplen
, hook
);
164 nskb
= alloc_skb(sizeof(struct ipv6hdr
) + sizeof(struct tcphdr
) +
165 LL_MAX_HEADER
, GFP_ATOMIC
);
169 skb_reserve(nskb
, LL_MAX_HEADER
);
170 nip6h
= nf_reject_ip6hdr_put(nskb
, oldskb
, IPPROTO_TCP
,
171 net
->ipv6
.devconf_all
->hop_limit
);
172 nf_reject_ip6_tcphdr_put(nskb
, oldskb
, oth
, otcplen
);
173 nip6h
->payload_len
= htons(nskb
->len
- sizeof(struct ipv6hdr
));
175 nft_reject_br_push_etherhdr(oldskb
, nskb
);
177 br_forward(br_port_get_rcu(dev
), nskb
, false, true);
180 static bool reject6_br_csum_ok(struct sk_buff
*skb
, int hook
)
182 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
185 u8 proto
= ip6h
->nexthdr
;
190 if (skb_csum_unnecessary(skb
))
193 if (ip6h
->payload_len
&&
194 pskb_trim_rcsum(skb
, ntohs(ip6h
->payload_len
) + sizeof(*ip6h
)))
197 thoff
= ipv6_skip_exthdr(skb
, ((u8
*)(ip6h
+1) - skb
->data
), &proto
, &fo
);
198 if (thoff
< 0 || thoff
>= skb
->len
|| (fo
& htons(~0x7)) != 0)
201 return nf_ip6_checksum(skb
, hook
, thoff
, proto
) == 0;
204 static void nft_reject_br_send_v6_unreach(struct net
*net
,
205 struct sk_buff
*oldskb
,
206 const struct net_device
*dev
,
209 struct sk_buff
*nskb
;
210 struct ipv6hdr
*nip6h
;
211 struct icmp6hdr
*icmp6h
;
215 if (!nft_bridge_ip6hdr_validate(oldskb
))
218 /* Include "As much of invoking packet as possible without the ICMPv6
219 * packet exceeding the minimum IPv6 MTU" in the ICMP payload.
221 len
= min_t(unsigned int, 1220, oldskb
->len
);
223 if (!pskb_may_pull(oldskb
, len
))
226 if (!reject6_br_csum_ok(oldskb
, hook
))
229 nskb
= alloc_skb(sizeof(struct iphdr
) + sizeof(struct icmp6hdr
) +
230 LL_MAX_HEADER
+ len
, GFP_ATOMIC
);
234 skb_reserve(nskb
, LL_MAX_HEADER
);
235 nip6h
= nf_reject_ip6hdr_put(nskb
, oldskb
, IPPROTO_ICMPV6
,
236 net
->ipv6
.devconf_all
->hop_limit
);
238 skb_reset_transport_header(nskb
);
239 icmp6h
= (struct icmp6hdr
*)skb_put(nskb
, sizeof(struct icmp6hdr
));
240 memset(icmp6h
, 0, sizeof(*icmp6h
));
241 icmp6h
->icmp6_type
= ICMPV6_DEST_UNREACH
;
242 icmp6h
->icmp6_code
= code
;
244 payload
= skb_put(nskb
, len
);
245 memcpy(payload
, skb_network_header(oldskb
), len
);
246 nip6h
->payload_len
= htons(nskb
->len
- sizeof(struct ipv6hdr
));
248 icmp6h
->icmp6_cksum
=
249 csum_ipv6_magic(&nip6h
->saddr
, &nip6h
->daddr
,
250 nskb
->len
- sizeof(struct ipv6hdr
),
253 nskb
->len
- sizeof(struct ipv6hdr
),
256 nft_reject_br_push_etherhdr(oldskb
, nskb
);
258 br_forward(br_port_get_rcu(dev
), nskb
, false, true);
261 static void nft_reject_bridge_eval(const struct nft_expr
*expr
,
262 struct nft_regs
*regs
,
263 const struct nft_pktinfo
*pkt
)
265 struct nft_reject
*priv
= nft_expr_priv(expr
);
266 const unsigned char *dest
= eth_hdr(pkt
->skb
)->h_dest
;
268 if (is_broadcast_ether_addr(dest
) ||
269 is_multicast_ether_addr(dest
))
272 switch (eth_hdr(pkt
->skb
)->h_proto
) {
273 case htons(ETH_P_IP
):
274 switch (priv
->type
) {
275 case NFT_REJECT_ICMP_UNREACH
:
276 nft_reject_br_send_v4_unreach(pkt
->net
, pkt
->skb
,
280 case NFT_REJECT_TCP_RST
:
281 nft_reject_br_send_v4_tcp_reset(pkt
->net
, pkt
->skb
,
284 case NFT_REJECT_ICMPX_UNREACH
:
285 nft_reject_br_send_v4_unreach(pkt
->net
, pkt
->skb
,
287 nft_reject_icmp_code(priv
->icmp_code
));
291 case htons(ETH_P_IPV6
):
292 switch (priv
->type
) {
293 case NFT_REJECT_ICMP_UNREACH
:
294 nft_reject_br_send_v6_unreach(pkt
->net
, pkt
->skb
,
298 case NFT_REJECT_TCP_RST
:
299 nft_reject_br_send_v6_tcp_reset(pkt
->net
, pkt
->skb
,
302 case NFT_REJECT_ICMPX_UNREACH
:
303 nft_reject_br_send_v6_unreach(pkt
->net
, pkt
->skb
,
305 nft_reject_icmpv6_code(priv
->icmp_code
));
310 /* No explicit way to reject this protocol, drop it. */
314 regs
->verdict
.code
= NF_DROP
;
317 static int nft_reject_bridge_validate(const struct nft_ctx
*ctx
,
318 const struct nft_expr
*expr
,
319 const struct nft_data
**data
)
321 return nft_chain_validate_hooks(ctx
->chain
, (1 << NF_BR_PRE_ROUTING
) |
322 (1 << NF_BR_LOCAL_IN
));
325 static int nft_reject_bridge_init(const struct nft_ctx
*ctx
,
326 const struct nft_expr
*expr
,
327 const struct nlattr
* const tb
[])
329 struct nft_reject
*priv
= nft_expr_priv(expr
);
332 err
= nft_reject_bridge_validate(ctx
, expr
, NULL
);
336 if (tb
[NFTA_REJECT_TYPE
] == NULL
)
339 priv
->type
= ntohl(nla_get_be32(tb
[NFTA_REJECT_TYPE
]));
340 switch (priv
->type
) {
341 case NFT_REJECT_ICMP_UNREACH
:
342 case NFT_REJECT_ICMPX_UNREACH
:
343 if (tb
[NFTA_REJECT_ICMP_CODE
] == NULL
)
346 icmp_code
= nla_get_u8(tb
[NFTA_REJECT_ICMP_CODE
]);
347 if (priv
->type
== NFT_REJECT_ICMPX_UNREACH
&&
348 icmp_code
> NFT_REJECT_ICMPX_MAX
)
351 priv
->icmp_code
= icmp_code
;
353 case NFT_REJECT_TCP_RST
:
361 static int nft_reject_bridge_dump(struct sk_buff
*skb
,
362 const struct nft_expr
*expr
)
364 const struct nft_reject
*priv
= nft_expr_priv(expr
);
366 if (nla_put_be32(skb
, NFTA_REJECT_TYPE
, htonl(priv
->type
)))
367 goto nla_put_failure
;
369 switch (priv
->type
) {
370 case NFT_REJECT_ICMP_UNREACH
:
371 case NFT_REJECT_ICMPX_UNREACH
:
372 if (nla_put_u8(skb
, NFTA_REJECT_ICMP_CODE
, priv
->icmp_code
))
373 goto nla_put_failure
;
385 static struct nft_expr_type nft_reject_bridge_type
;
386 static const struct nft_expr_ops nft_reject_bridge_ops
= {
387 .type
= &nft_reject_bridge_type
,
388 .size
= NFT_EXPR_SIZE(sizeof(struct nft_reject
)),
389 .eval
= nft_reject_bridge_eval
,
390 .init
= nft_reject_bridge_init
,
391 .dump
= nft_reject_bridge_dump
,
392 .validate
= nft_reject_bridge_validate
,
395 static struct nft_expr_type nft_reject_bridge_type __read_mostly
= {
396 .family
= NFPROTO_BRIDGE
,
398 .ops
= &nft_reject_bridge_ops
,
399 .policy
= nft_reject_policy
,
400 .maxattr
= NFTA_REJECT_MAX
,
401 .owner
= THIS_MODULE
,
404 static int __init
nft_reject_bridge_module_init(void)
406 return nft_register_expr(&nft_reject_bridge_type
);
409 static void __exit
nft_reject_bridge_module_exit(void)
411 nft_unregister_expr(&nft_reject_bridge_type
);
414 module_init(nft_reject_bridge_module_init
);
415 module_exit(nft_reject_bridge_module_exit
);
417 MODULE_LICENSE("GPL");
418 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
419 MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE
, "reject");