Merge tag 'imx-clk-fixes-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/shawng...
[deliverable/linux.git] / net / bridge / netfilter / nft_reject_bridge.c
1 /*
2 * Copyright (c) 2014 Pablo Neira Ayuso <pablo@netfilter.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/netlink.h>
13 #include <linux/netfilter.h>
14 #include <linux/netfilter/nf_tables.h>
15 #include <net/netfilter/nf_tables.h>
16 #include <net/netfilter/nft_reject.h>
17 #include <net/netfilter/nf_tables_bridge.h>
18 #include <net/netfilter/ipv4/nf_reject.h>
19 #include <net/netfilter/ipv6/nf_reject.h>
20 #include <linux/ip.h>
21 #include <net/ip.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/netfilter_bridge.h>
24 #include <linux/netfilter_ipv6.h>
25 #include "../br_private.h"
26
27 static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
28 struct sk_buff *nskb)
29 {
30 struct ethhdr *eth;
31
32 eth = (struct ethhdr *)skb_push(nskb, ETH_HLEN);
33 skb_reset_mac_header(nskb);
34 ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest);
35 ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
36 eth->h_proto = eth_hdr(oldskb)->h_proto;
37 skb_pull(nskb, ETH_HLEN);
38 }
39
40 /* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
41 * or the bridge port (NF_BRIDGE PREROUTING).
42 */
43 static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb,
44 const struct net_device *dev,
45 int hook)
46 {
47 struct sk_buff *nskb;
48 struct iphdr *niph;
49 const struct tcphdr *oth;
50 struct tcphdr _oth;
51 struct net *net = sock_net(oldskb->sk);
52
53 if (!nft_bridge_iphdr_validate(oldskb))
54 return;
55
56 oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
57 if (!oth)
58 return;
59
60 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
61 LL_MAX_HEADER, GFP_ATOMIC);
62 if (!nskb)
63 return;
64
65 skb_reserve(nskb, LL_MAX_HEADER);
66 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
67 net->ipv4.sysctl_ip_default_ttl);
68 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
69 niph->ttl = net->ipv4.sysctl_ip_default_ttl;
70 niph->tot_len = htons(nskb->len);
71 ip_send_check(niph);
72
73 nft_reject_br_push_etherhdr(oldskb, nskb);
74
75 br_deliver(br_port_get_rcu(dev), nskb);
76 }
77
78 static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb,
79 const struct net_device *dev,
80 int hook, u8 code)
81 {
82 struct sk_buff *nskb;
83 struct iphdr *niph;
84 struct icmphdr *icmph;
85 unsigned int len;
86 void *payload;
87 __wsum csum;
88 u8 proto;
89 struct net *net = sock_net(oldskb->sk);
90
91 if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb))
92 return;
93
94 /* IP header checks: fragment. */
95 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
96 return;
97
98 /* RFC says return as much as we can without exceeding 576 bytes. */
99 len = min_t(unsigned int, 536, oldskb->len);
100
101 if (!pskb_may_pull(oldskb, len))
102 return;
103
104 if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
105 return;
106
107 if (ip_hdr(oldskb)->protocol == IPPROTO_TCP ||
108 ip_hdr(oldskb)->protocol == IPPROTO_UDP)
109 proto = ip_hdr(oldskb)->protocol;
110 else
111 proto = 0;
112
113 if (!skb_csum_unnecessary(oldskb) &&
114 nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
115 return;
116
117 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
118 LL_MAX_HEADER + len, GFP_ATOMIC);
119 if (!nskb)
120 return;
121
122 skb_reserve(nskb, LL_MAX_HEADER);
123 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
124 net->ipv4.sysctl_ip_default_ttl);
125
126 skb_reset_transport_header(nskb);
127 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
128 memset(icmph, 0, sizeof(*icmph));
129 icmph->type = ICMP_DEST_UNREACH;
130 icmph->code = code;
131
132 payload = skb_put(nskb, len);
133 memcpy(payload, skb_network_header(oldskb), len);
134
135 csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
136 icmph->checksum = csum_fold(csum);
137
138 niph->tot_len = htons(nskb->len);
139 ip_send_check(niph);
140
141 nft_reject_br_push_etherhdr(oldskb, nskb);
142
143 br_deliver(br_port_get_rcu(dev), nskb);
144 }
145
146 static void nft_reject_br_send_v6_tcp_reset(struct net *net,
147 struct sk_buff *oldskb,
148 const struct net_device *dev,
149 int hook)
150 {
151 struct sk_buff *nskb;
152 const struct tcphdr *oth;
153 struct tcphdr _oth;
154 unsigned int otcplen;
155 struct ipv6hdr *nip6h;
156
157 if (!nft_bridge_ip6hdr_validate(oldskb))
158 return;
159
160 oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
161 if (!oth)
162 return;
163
164 nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
165 LL_MAX_HEADER, GFP_ATOMIC);
166 if (!nskb)
167 return;
168
169 skb_reserve(nskb, LL_MAX_HEADER);
170 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
171 net->ipv6.devconf_all->hop_limit);
172 nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
173 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
174
175 nft_reject_br_push_etherhdr(oldskb, nskb);
176
177 br_deliver(br_port_get_rcu(dev), nskb);
178 }
179
180 static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
181 {
182 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
183 int thoff;
184 __be16 fo;
185 u8 proto = ip6h->nexthdr;
186
187 if (skb->csum_bad)
188 return false;
189
190 if (skb_csum_unnecessary(skb))
191 return true;
192
193 if (ip6h->payload_len &&
194 pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
195 return false;
196
197 thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
198 if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
199 return false;
200
201 return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
202 }
203
204 static void nft_reject_br_send_v6_unreach(struct net *net,
205 struct sk_buff *oldskb,
206 const struct net_device *dev,
207 int hook, u8 code)
208 {
209 struct sk_buff *nskb;
210 struct ipv6hdr *nip6h;
211 struct icmp6hdr *icmp6h;
212 unsigned int len;
213 void *payload;
214
215 if (!nft_bridge_ip6hdr_validate(oldskb))
216 return;
217
218 /* Include "As much of invoking packet as possible without the ICMPv6
219 * packet exceeding the minimum IPv6 MTU" in the ICMP payload.
220 */
221 len = min_t(unsigned int, 1220, oldskb->len);
222
223 if (!pskb_may_pull(oldskb, len))
224 return;
225
226 if (!reject6_br_csum_ok(oldskb, hook))
227 return;
228
229 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) +
230 LL_MAX_HEADER + len, GFP_ATOMIC);
231 if (!nskb)
232 return;
233
234 skb_reserve(nskb, LL_MAX_HEADER);
235 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
236 net->ipv6.devconf_all->hop_limit);
237
238 skb_reset_transport_header(nskb);
239 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
240 memset(icmp6h, 0, sizeof(*icmp6h));
241 icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
242 icmp6h->icmp6_code = code;
243
244 payload = skb_put(nskb, len);
245 memcpy(payload, skb_network_header(oldskb), len);
246 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
247
248 icmp6h->icmp6_cksum =
249 csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
250 nskb->len - sizeof(struct ipv6hdr),
251 IPPROTO_ICMPV6,
252 csum_partial(icmp6h,
253 nskb->len - sizeof(struct ipv6hdr),
254 0));
255
256 nft_reject_br_push_etherhdr(oldskb, nskb);
257
258 br_deliver(br_port_get_rcu(dev), nskb);
259 }
260
261 static void nft_reject_bridge_eval(const struct nft_expr *expr,
262 struct nft_regs *regs,
263 const struct nft_pktinfo *pkt)
264 {
265 struct nft_reject *priv = nft_expr_priv(expr);
266 const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
267
268 if (is_broadcast_ether_addr(dest) ||
269 is_multicast_ether_addr(dest))
270 goto out;
271
272 switch (eth_hdr(pkt->skb)->h_proto) {
273 case htons(ETH_P_IP):
274 switch (priv->type) {
275 case NFT_REJECT_ICMP_UNREACH:
276 nft_reject_br_send_v4_unreach(pkt->skb, pkt->in,
277 pkt->hook,
278 priv->icmp_code);
279 break;
280 case NFT_REJECT_TCP_RST:
281 nft_reject_br_send_v4_tcp_reset(pkt->skb, pkt->in,
282 pkt->hook);
283 break;
284 case NFT_REJECT_ICMPX_UNREACH:
285 nft_reject_br_send_v4_unreach(pkt->skb, pkt->in,
286 pkt->hook,
287 nft_reject_icmp_code(priv->icmp_code));
288 break;
289 }
290 break;
291 case htons(ETH_P_IPV6):
292 switch (priv->type) {
293 case NFT_REJECT_ICMP_UNREACH:
294 nft_reject_br_send_v6_unreach(pkt->net, pkt->skb,
295 pkt->in, pkt->hook,
296 priv->icmp_code);
297 break;
298 case NFT_REJECT_TCP_RST:
299 nft_reject_br_send_v6_tcp_reset(pkt->net, pkt->skb,
300 pkt->in, pkt->hook);
301 break;
302 case NFT_REJECT_ICMPX_UNREACH:
303 nft_reject_br_send_v6_unreach(pkt->net, pkt->skb,
304 pkt->in, pkt->hook,
305 nft_reject_icmpv6_code(priv->icmp_code));
306 break;
307 }
308 break;
309 default:
310 /* No explicit way to reject this protocol, drop it. */
311 break;
312 }
313 out:
314 regs->verdict.code = NF_DROP;
315 }
316
317 static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
318 const struct nft_expr *expr,
319 const struct nft_data **data)
320 {
321 return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
322 (1 << NF_BR_LOCAL_IN));
323 }
324
325 static int nft_reject_bridge_init(const struct nft_ctx *ctx,
326 const struct nft_expr *expr,
327 const struct nlattr * const tb[])
328 {
329 struct nft_reject *priv = nft_expr_priv(expr);
330 int icmp_code, err;
331
332 err = nft_reject_bridge_validate(ctx, expr, NULL);
333 if (err < 0)
334 return err;
335
336 if (tb[NFTA_REJECT_TYPE] == NULL)
337 return -EINVAL;
338
339 priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
340 switch (priv->type) {
341 case NFT_REJECT_ICMP_UNREACH:
342 case NFT_REJECT_ICMPX_UNREACH:
343 if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
344 return -EINVAL;
345
346 icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
347 if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
348 icmp_code > NFT_REJECT_ICMPX_MAX)
349 return -EINVAL;
350
351 priv->icmp_code = icmp_code;
352 break;
353 case NFT_REJECT_TCP_RST:
354 break;
355 default:
356 return -EINVAL;
357 }
358 return 0;
359 }
360
361 static int nft_reject_bridge_dump(struct sk_buff *skb,
362 const struct nft_expr *expr)
363 {
364 const struct nft_reject *priv = nft_expr_priv(expr);
365
366 if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
367 goto nla_put_failure;
368
369 switch (priv->type) {
370 case NFT_REJECT_ICMP_UNREACH:
371 case NFT_REJECT_ICMPX_UNREACH:
372 if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
373 goto nla_put_failure;
374 break;
375 default:
376 break;
377 }
378
379 return 0;
380
381 nla_put_failure:
382 return -1;
383 }
384
385 static struct nft_expr_type nft_reject_bridge_type;
386 static const struct nft_expr_ops nft_reject_bridge_ops = {
387 .type = &nft_reject_bridge_type,
388 .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
389 .eval = nft_reject_bridge_eval,
390 .init = nft_reject_bridge_init,
391 .dump = nft_reject_bridge_dump,
392 .validate = nft_reject_bridge_validate,
393 };
394
395 static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
396 .family = NFPROTO_BRIDGE,
397 .name = "reject",
398 .ops = &nft_reject_bridge_ops,
399 .policy = nft_reject_policy,
400 .maxattr = NFTA_REJECT_MAX,
401 .owner = THIS_MODULE,
402 };
403
404 static int __init nft_reject_bridge_module_init(void)
405 {
406 return nft_register_expr(&nft_reject_bridge_type);
407 }
408
409 static void __exit nft_reject_bridge_module_exit(void)
410 {
411 nft_unregister_expr(&nft_reject_bridge_type);
412 }
413
414 module_init(nft_reject_bridge_module_init);
415 module_exit(nft_reject_bridge_module_exit);
416
417 MODULE_LICENSE("GPL");
418 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
419 MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject");
This page took 0.038443 seconds and 5 git commands to generate.