lib: lz4: cleanup unaligned access efficiency detection
[deliverable/linux.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/kernel.h>
16 #include <linux/capability.h>
17 #include <linux/in.h>
18 #include <linux/skbuff.h>
19 #include <linux/kmod.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netdevice.h>
22 #include <linux/module.h>
23 #include <linux/poison.h>
24 #include <linux/icmpv6.h>
25 #include <net/ipv6.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
32
33 #include <linux/netfilter_ipv6/ip6_tables.h>
34 #include <linux/netfilter/x_tables.h>
35 #include <net/netfilter/nf_log.h>
36 #include "../../netfilter/xt_repldata.h"
37
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
40 MODULE_DESCRIPTION("IPv6 packet filter");
41
42 /*#define DEBUG_IP_FIREWALL*/
43 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
44 /*#define DEBUG_IP_FIREWALL_USER*/
45
46 #ifdef DEBUG_IP_FIREWALL
47 #define dprintf(format, args...) pr_info(format , ## args)
48 #else
49 #define dprintf(format, args...)
50 #endif
51
52 #ifdef DEBUG_IP_FIREWALL_USER
53 #define duprintf(format, args...) pr_info(format , ## args)
54 #else
55 #define duprintf(format, args...)
56 #endif
57
58 #ifdef CONFIG_NETFILTER_DEBUG
59 #define IP_NF_ASSERT(x) WARN_ON(!(x))
60 #else
61 #define IP_NF_ASSERT(x)
62 #endif
63
64 #if 0
65 /* All the better to debug you with... */
66 #define static
67 #define inline
68 #endif
69
70 void *ip6t_alloc_initial_table(const struct xt_table *info)
71 {
72 return xt_alloc_initial_table(ip6t, IP6T);
73 }
74 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
75
76 /*
77 We keep a set of rules for each CPU, so we can avoid write-locking
78 them in the softirq when updating the counters and therefore
79 only need to read-lock in the softirq; doing a write_lock_bh() in user
80 context stops packets coming through and allows user context to read
81 the counters or update the rules.
82
83 Hence the start of any table is given by get_table() below. */
84
85 /* Returns whether matches rule or not. */
86 /* Performance critical - called for every packet */
87 static inline bool
88 ip6_packet_match(const struct sk_buff *skb,
89 const char *indev,
90 const char *outdev,
91 const struct ip6t_ip6 *ip6info,
92 unsigned int *protoff,
93 int *fragoff, bool *hotdrop)
94 {
95 unsigned long ret;
96 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
97
98 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
99
100 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
101 &ip6info->src), IP6T_INV_SRCIP) ||
102 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
103 &ip6info->dst), IP6T_INV_DSTIP)) {
104 dprintf("Source or dest mismatch.\n");
105 /*
106 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
107 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
108 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
109 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
110 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
111 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
112 return false;
113 }
114
115 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
116
117 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
118 dprintf("VIA in mismatch (%s vs %s).%s\n",
119 indev, ip6info->iniface,
120 ip6info->invflags & IP6T_INV_VIA_IN ? " (INV)" : "");
121 return false;
122 }
123
124 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
125
126 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
127 dprintf("VIA out mismatch (%s vs %s).%s\n",
128 outdev, ip6info->outiface,
129 ip6info->invflags & IP6T_INV_VIA_OUT ? " (INV)" : "");
130 return false;
131 }
132
133 /* ... might want to do something with class and flowlabel here ... */
134
135 /* look for the desired protocol header */
136 if (ip6info->flags & IP6T_F_PROTO) {
137 int protohdr;
138 unsigned short _frag_off;
139
140 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
141 if (protohdr < 0) {
142 if (_frag_off == 0)
143 *hotdrop = true;
144 return false;
145 }
146 *fragoff = _frag_off;
147
148 dprintf("Packet protocol %hi ?= %s%hi.\n",
149 protohdr,
150 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
151 ip6info->proto);
152
153 if (ip6info->proto == protohdr) {
154 if (ip6info->invflags & IP6T_INV_PROTO)
155 return false;
156
157 return true;
158 }
159
160 /* We need match for the '-p all', too! */
161 if ((ip6info->proto != 0) &&
162 !(ip6info->invflags & IP6T_INV_PROTO))
163 return false;
164 }
165 return true;
166 }
167
168 /* should be ip6 safe */
169 static bool
170 ip6_checkentry(const struct ip6t_ip6 *ipv6)
171 {
172 if (ipv6->flags & ~IP6T_F_MASK) {
173 duprintf("Unknown flag bits set: %08X\n",
174 ipv6->flags & ~IP6T_F_MASK);
175 return false;
176 }
177 if (ipv6->invflags & ~IP6T_INV_MASK) {
178 duprintf("Unknown invflag bits set: %08X\n",
179 ipv6->invflags & ~IP6T_INV_MASK);
180 return false;
181 }
182 return true;
183 }
184
185 static unsigned int
186 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
187 {
188 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
189
190 return NF_DROP;
191 }
192
193 static inline struct ip6t_entry *
194 get_entry(const void *base, unsigned int offset)
195 {
196 return (struct ip6t_entry *)(base + offset);
197 }
198
199 /* All zeroes == unconditional rule. */
200 /* Mildly perf critical (only if packet tracing is on) */
201 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
202 {
203 static const struct ip6t_ip6 uncond;
204
205 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
206 }
207
208 static inline const struct xt_entry_target *
209 ip6t_get_target_c(const struct ip6t_entry *e)
210 {
211 return ip6t_get_target((struct ip6t_entry *)e);
212 }
213
214 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
215 /* This cries for unification! */
216 static const char *const hooknames[] = {
217 [NF_INET_PRE_ROUTING] = "PREROUTING",
218 [NF_INET_LOCAL_IN] = "INPUT",
219 [NF_INET_FORWARD] = "FORWARD",
220 [NF_INET_LOCAL_OUT] = "OUTPUT",
221 [NF_INET_POST_ROUTING] = "POSTROUTING",
222 };
223
224 enum nf_ip_trace_comments {
225 NF_IP6_TRACE_COMMENT_RULE,
226 NF_IP6_TRACE_COMMENT_RETURN,
227 NF_IP6_TRACE_COMMENT_POLICY,
228 };
229
230 static const char *const comments[] = {
231 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
232 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
233 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
234 };
235
236 static struct nf_loginfo trace_loginfo = {
237 .type = NF_LOG_TYPE_LOG,
238 .u = {
239 .log = {
240 .level = LOGLEVEL_WARNING,
241 .logflags = NF_LOG_MASK,
242 },
243 },
244 };
245
246 /* Mildly perf critical (only if packet tracing is on) */
247 static inline int
248 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
249 const char *hookname, const char **chainname,
250 const char **comment, unsigned int *rulenum)
251 {
252 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
253
254 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
255 /* Head of user chain: ERROR target with chainname */
256 *chainname = t->target.data;
257 (*rulenum) = 0;
258 } else if (s == e) {
259 (*rulenum)++;
260
261 if (s->target_offset == sizeof(struct ip6t_entry) &&
262 strcmp(t->target.u.kernel.target->name,
263 XT_STANDARD_TARGET) == 0 &&
264 t->verdict < 0 &&
265 unconditional(&s->ipv6)) {
266 /* Tail of chains: STANDARD target (return/policy) */
267 *comment = *chainname == hookname
268 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
269 : comments[NF_IP6_TRACE_COMMENT_RETURN];
270 }
271 return 1;
272 } else
273 (*rulenum)++;
274
275 return 0;
276 }
277
278 static void trace_packet(struct net *net,
279 const struct sk_buff *skb,
280 unsigned int hook,
281 const struct net_device *in,
282 const struct net_device *out,
283 const char *tablename,
284 const struct xt_table_info *private,
285 const struct ip6t_entry *e)
286 {
287 const struct ip6t_entry *root;
288 const char *hookname, *chainname, *comment;
289 const struct ip6t_entry *iter;
290 unsigned int rulenum = 0;
291
292 root = get_entry(private->entries, private->hook_entry[hook]);
293
294 hookname = chainname = hooknames[hook];
295 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
296
297 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
298 if (get_chainname_rulenum(iter, e, hookname,
299 &chainname, &comment, &rulenum) != 0)
300 break;
301
302 nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
303 "TRACE: %s:%s:%s:%u ",
304 tablename, chainname, comment, rulenum);
305 }
306 #endif
307
308 static inline struct ip6t_entry *
309 ip6t_next_entry(const struct ip6t_entry *entry)
310 {
311 return (void *)entry + entry->next_offset;
312 }
313
314 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
315 unsigned int
316 ip6t_do_table(struct sk_buff *skb,
317 const struct nf_hook_state *state,
318 struct xt_table *table)
319 {
320 unsigned int hook = state->hook;
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
322 /* Initializing verdict to NF_DROP keeps gcc happy. */
323 unsigned int verdict = NF_DROP;
324 const char *indev, *outdev;
325 const void *table_base;
326 struct ip6t_entry *e, **jumpstack;
327 unsigned int stackidx, cpu;
328 const struct xt_table_info *private;
329 struct xt_action_param acpar;
330 unsigned int addend;
331
332 /* Initialization */
333 stackidx = 0;
334 indev = state->in ? state->in->name : nulldevname;
335 outdev = state->out ? state->out->name : nulldevname;
336 /* We handle fragments by dealing with the first fragment as
337 * if it was a normal packet. All other fragments are treated
338 * normally, except that they will NEVER match rules that ask
339 * things we don't know, ie. tcp syn flag or ports). If the
340 * rule is also a fragment-specific rule, non-fragments won't
341 * match it. */
342 acpar.hotdrop = false;
343 acpar.net = state->net;
344 acpar.in = state->in;
345 acpar.out = state->out;
346 acpar.family = NFPROTO_IPV6;
347 acpar.hooknum = hook;
348
349 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
350
351 local_bh_disable();
352 addend = xt_write_recseq_begin();
353 private = table->private;
354 /*
355 * Ensure we load private-> members after we've fetched the base
356 * pointer.
357 */
358 smp_read_barrier_depends();
359 cpu = smp_processor_id();
360 table_base = private->entries;
361 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
362
363 /* Switch to alternate jumpstack if we're being invoked via TEE.
364 * TEE issues XT_CONTINUE verdict on original skb so we must not
365 * clobber the jumpstack.
366 *
367 * For recursion via REJECT or SYNPROXY the stack will be clobbered
368 * but it is no problem since absolute verdict is issued by these.
369 */
370 if (static_key_false(&xt_tee_enabled))
371 jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
372
373 e = get_entry(table_base, private->hook_entry[hook]);
374
375 do {
376 const struct xt_entry_target *t;
377 const struct xt_entry_match *ematch;
378 struct xt_counters *counter;
379
380 IP_NF_ASSERT(e);
381 acpar.thoff = 0;
382 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
383 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
384 no_match:
385 e = ip6t_next_entry(e);
386 continue;
387 }
388
389 xt_ematch_foreach(ematch, e) {
390 acpar.match = ematch->u.kernel.match;
391 acpar.matchinfo = ematch->data;
392 if (!acpar.match->match(skb, &acpar))
393 goto no_match;
394 }
395
396 counter = xt_get_this_cpu_counter(&e->counters);
397 ADD_COUNTER(*counter, skb->len, 1);
398
399 t = ip6t_get_target_c(e);
400 IP_NF_ASSERT(t->u.kernel.target);
401
402 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
403 /* The packet is traced: log it */
404 if (unlikely(skb->nf_trace))
405 trace_packet(state->net, skb, hook, state->in,
406 state->out, table->name, private, e);
407 #endif
408 /* Standard target? */
409 if (!t->u.kernel.target->target) {
410 int v;
411
412 v = ((struct xt_standard_target *)t)->verdict;
413 if (v < 0) {
414 /* Pop from stack? */
415 if (v != XT_RETURN) {
416 verdict = (unsigned int)(-v) - 1;
417 break;
418 }
419 if (stackidx == 0)
420 e = get_entry(table_base,
421 private->underflow[hook]);
422 else
423 e = ip6t_next_entry(jumpstack[--stackidx]);
424 continue;
425 }
426 if (table_base + v != ip6t_next_entry(e) &&
427 !(e->ipv6.flags & IP6T_F_GOTO)) {
428 jumpstack[stackidx++] = e;
429 }
430
431 e = get_entry(table_base, v);
432 continue;
433 }
434
435 acpar.target = t->u.kernel.target;
436 acpar.targinfo = t->data;
437
438 verdict = t->u.kernel.target->target(skb, &acpar);
439 if (verdict == XT_CONTINUE)
440 e = ip6t_next_entry(e);
441 else
442 /* Verdict */
443 break;
444 } while (!acpar.hotdrop);
445
446 xt_write_recseq_end(addend);
447 local_bh_enable();
448
449 #ifdef DEBUG_ALLOW_ALL
450 return NF_ACCEPT;
451 #else
452 if (acpar.hotdrop)
453 return NF_DROP;
454 else return verdict;
455 #endif
456 }
457
458 /* Figures out from what hook each rule can be called: returns 0 if
459 there are loops. Puts hook bitmask in comefrom. */
460 static int
461 mark_source_chains(const struct xt_table_info *newinfo,
462 unsigned int valid_hooks, void *entry0)
463 {
464 unsigned int hook;
465
466 /* No recursion; use packet counter to save back ptrs (reset
467 to 0 as we leave), and comefrom to save source hook bitmask */
468 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
469 unsigned int pos = newinfo->hook_entry[hook];
470 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
471
472 if (!(valid_hooks & (1 << hook)))
473 continue;
474
475 /* Set initial back pointer. */
476 e->counters.pcnt = pos;
477
478 for (;;) {
479 const struct xt_standard_target *t
480 = (void *)ip6t_get_target_c(e);
481 int visited = e->comefrom & (1 << hook);
482
483 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
484 pr_err("iptables: loop hook %u pos %u %08X.\n",
485 hook, pos, e->comefrom);
486 return 0;
487 }
488 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
489
490 /* Unconditional return/END. */
491 if ((e->target_offset == sizeof(struct ip6t_entry) &&
492 (strcmp(t->target.u.user.name,
493 XT_STANDARD_TARGET) == 0) &&
494 t->verdict < 0 &&
495 unconditional(&e->ipv6)) || visited) {
496 unsigned int oldpos, size;
497
498 if ((strcmp(t->target.u.user.name,
499 XT_STANDARD_TARGET) == 0) &&
500 t->verdict < -NF_MAX_VERDICT - 1) {
501 duprintf("mark_source_chains: bad "
502 "negative verdict (%i)\n",
503 t->verdict);
504 return 0;
505 }
506
507 /* Return: backtrack through the last
508 big jump. */
509 do {
510 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
511 #ifdef DEBUG_IP_FIREWALL_USER
512 if (e->comefrom
513 & (1 << NF_INET_NUMHOOKS)) {
514 duprintf("Back unset "
515 "on hook %u "
516 "rule %u\n",
517 hook, pos);
518 }
519 #endif
520 oldpos = pos;
521 pos = e->counters.pcnt;
522 e->counters.pcnt = 0;
523
524 /* We're at the start. */
525 if (pos == oldpos)
526 goto next;
527
528 e = (struct ip6t_entry *)
529 (entry0 + pos);
530 } while (oldpos == pos + e->next_offset);
531
532 /* Move along one */
533 size = e->next_offset;
534 e = (struct ip6t_entry *)
535 (entry0 + pos + size);
536 e->counters.pcnt = pos;
537 pos += size;
538 } else {
539 int newpos = t->verdict;
540
541 if (strcmp(t->target.u.user.name,
542 XT_STANDARD_TARGET) == 0 &&
543 newpos >= 0) {
544 if (newpos > newinfo->size -
545 sizeof(struct ip6t_entry)) {
546 duprintf("mark_source_chains: "
547 "bad verdict (%i)\n",
548 newpos);
549 return 0;
550 }
551 /* This a jump; chase it. */
552 duprintf("Jump rule %u -> %u\n",
553 pos, newpos);
554 } else {
555 /* ... this is a fallthru */
556 newpos = pos + e->next_offset;
557 }
558 e = (struct ip6t_entry *)
559 (entry0 + newpos);
560 e->counters.pcnt = pos;
561 pos = newpos;
562 }
563 }
564 next:
565 duprintf("Finished chain %u\n", hook);
566 }
567 return 1;
568 }
569
570 static void cleanup_match(struct xt_entry_match *m, struct net *net)
571 {
572 struct xt_mtdtor_param par;
573
574 par.net = net;
575 par.match = m->u.kernel.match;
576 par.matchinfo = m->data;
577 par.family = NFPROTO_IPV6;
578 if (par.match->destroy != NULL)
579 par.match->destroy(&par);
580 module_put(par.match->me);
581 }
582
583 static int
584 check_entry(const struct ip6t_entry *e, const char *name)
585 {
586 const struct xt_entry_target *t;
587
588 if (!ip6_checkentry(&e->ipv6)) {
589 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
590 return -EINVAL;
591 }
592
593 if (e->target_offset + sizeof(struct xt_entry_target) >
594 e->next_offset)
595 return -EINVAL;
596
597 t = ip6t_get_target_c(e);
598 if (e->target_offset + t->u.target_size > e->next_offset)
599 return -EINVAL;
600
601 return 0;
602 }
603
604 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
605 {
606 const struct ip6t_ip6 *ipv6 = par->entryinfo;
607 int ret;
608
609 par->match = m->u.kernel.match;
610 par->matchinfo = m->data;
611
612 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
613 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
614 if (ret < 0) {
615 duprintf("ip_tables: check failed for `%s'.\n",
616 par.match->name);
617 return ret;
618 }
619 return 0;
620 }
621
622 static int
623 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
624 {
625 struct xt_match *match;
626 int ret;
627
628 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
629 m->u.user.revision);
630 if (IS_ERR(match)) {
631 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
632 return PTR_ERR(match);
633 }
634 m->u.kernel.match = match;
635
636 ret = check_match(m, par);
637 if (ret)
638 goto err;
639
640 return 0;
641 err:
642 module_put(m->u.kernel.match->me);
643 return ret;
644 }
645
646 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
647 {
648 struct xt_entry_target *t = ip6t_get_target(e);
649 struct xt_tgchk_param par = {
650 .net = net,
651 .table = name,
652 .entryinfo = e,
653 .target = t->u.kernel.target,
654 .targinfo = t->data,
655 .hook_mask = e->comefrom,
656 .family = NFPROTO_IPV6,
657 };
658 int ret;
659
660 t = ip6t_get_target(e);
661 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
662 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
663 if (ret < 0) {
664 duprintf("ip_tables: check failed for `%s'.\n",
665 t->u.kernel.target->name);
666 return ret;
667 }
668 return 0;
669 }
670
671 static int
672 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
673 unsigned int size)
674 {
675 struct xt_entry_target *t;
676 struct xt_target *target;
677 int ret;
678 unsigned int j;
679 struct xt_mtchk_param mtpar;
680 struct xt_entry_match *ematch;
681
682 ret = check_entry(e, name);
683 if (ret)
684 return ret;
685
686 e->counters.pcnt = xt_percpu_counter_alloc();
687 if (IS_ERR_VALUE(e->counters.pcnt))
688 return -ENOMEM;
689
690 j = 0;
691 mtpar.net = net;
692 mtpar.table = name;
693 mtpar.entryinfo = &e->ipv6;
694 mtpar.hook_mask = e->comefrom;
695 mtpar.family = NFPROTO_IPV6;
696 xt_ematch_foreach(ematch, e) {
697 ret = find_check_match(ematch, &mtpar);
698 if (ret != 0)
699 goto cleanup_matches;
700 ++j;
701 }
702
703 t = ip6t_get_target(e);
704 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
705 t->u.user.revision);
706 if (IS_ERR(target)) {
707 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
708 ret = PTR_ERR(target);
709 goto cleanup_matches;
710 }
711 t->u.kernel.target = target;
712
713 ret = check_target(e, net, name);
714 if (ret)
715 goto err;
716 return 0;
717 err:
718 module_put(t->u.kernel.target->me);
719 cleanup_matches:
720 xt_ematch_foreach(ematch, e) {
721 if (j-- == 0)
722 break;
723 cleanup_match(ematch, net);
724 }
725
726 xt_percpu_counter_free(e->counters.pcnt);
727
728 return ret;
729 }
730
731 static bool check_underflow(const struct ip6t_entry *e)
732 {
733 const struct xt_entry_target *t;
734 unsigned int verdict;
735
736 if (!unconditional(&e->ipv6))
737 return false;
738 t = ip6t_get_target_c(e);
739 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
740 return false;
741 verdict = ((struct xt_standard_target *)t)->verdict;
742 verdict = -verdict - 1;
743 return verdict == NF_DROP || verdict == NF_ACCEPT;
744 }
745
746 static int
747 check_entry_size_and_hooks(struct ip6t_entry *e,
748 struct xt_table_info *newinfo,
749 const unsigned char *base,
750 const unsigned char *limit,
751 const unsigned int *hook_entries,
752 const unsigned int *underflows,
753 unsigned int valid_hooks)
754 {
755 unsigned int h;
756
757 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
758 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
759 duprintf("Bad offset %p\n", e);
760 return -EINVAL;
761 }
762
763 if (e->next_offset
764 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
765 duprintf("checking: element %p size %u\n",
766 e, e->next_offset);
767 return -EINVAL;
768 }
769
770 /* Check hooks & underflows */
771 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
772 if (!(valid_hooks & (1 << h)))
773 continue;
774 if ((unsigned char *)e - base == hook_entries[h])
775 newinfo->hook_entry[h] = hook_entries[h];
776 if ((unsigned char *)e - base == underflows[h]) {
777 if (!check_underflow(e)) {
778 pr_err("Underflows must be unconditional and "
779 "use the STANDARD target with "
780 "ACCEPT/DROP\n");
781 return -EINVAL;
782 }
783 newinfo->underflow[h] = underflows[h];
784 }
785 }
786
787 /* Clear counters and comefrom */
788 e->counters = ((struct xt_counters) { 0, 0 });
789 e->comefrom = 0;
790 return 0;
791 }
792
793 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
794 {
795 struct xt_tgdtor_param par;
796 struct xt_entry_target *t;
797 struct xt_entry_match *ematch;
798
799 /* Cleanup all matches */
800 xt_ematch_foreach(ematch, e)
801 cleanup_match(ematch, net);
802 t = ip6t_get_target(e);
803
804 par.net = net;
805 par.target = t->u.kernel.target;
806 par.targinfo = t->data;
807 par.family = NFPROTO_IPV6;
808 if (par.target->destroy != NULL)
809 par.target->destroy(&par);
810 module_put(par.target->me);
811
812 xt_percpu_counter_free(e->counters.pcnt);
813 }
814
815 /* Checks and translates the user-supplied table segment (held in
816 newinfo) */
817 static int
818 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
819 const struct ip6t_replace *repl)
820 {
821 struct ip6t_entry *iter;
822 unsigned int i;
823 int ret = 0;
824
825 newinfo->size = repl->size;
826 newinfo->number = repl->num_entries;
827
828 /* Init all hooks to impossible value. */
829 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
830 newinfo->hook_entry[i] = 0xFFFFFFFF;
831 newinfo->underflow[i] = 0xFFFFFFFF;
832 }
833
834 duprintf("translate_table: size %u\n", newinfo->size);
835 i = 0;
836 /* Walk through entries, checking offsets. */
837 xt_entry_foreach(iter, entry0, newinfo->size) {
838 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
839 entry0 + repl->size,
840 repl->hook_entry,
841 repl->underflow,
842 repl->valid_hooks);
843 if (ret != 0)
844 return ret;
845 ++i;
846 if (strcmp(ip6t_get_target(iter)->u.user.name,
847 XT_ERROR_TARGET) == 0)
848 ++newinfo->stacksize;
849 }
850
851 if (i != repl->num_entries) {
852 duprintf("translate_table: %u not %u entries\n",
853 i, repl->num_entries);
854 return -EINVAL;
855 }
856
857 /* Check hooks all assigned */
858 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
859 /* Only hooks which are valid */
860 if (!(repl->valid_hooks & (1 << i)))
861 continue;
862 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
863 duprintf("Invalid hook entry %u %u\n",
864 i, repl->hook_entry[i]);
865 return -EINVAL;
866 }
867 if (newinfo->underflow[i] == 0xFFFFFFFF) {
868 duprintf("Invalid underflow %u %u\n",
869 i, repl->underflow[i]);
870 return -EINVAL;
871 }
872 }
873
874 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
875 return -ELOOP;
876
877 /* Finally, each sanity check must pass */
878 i = 0;
879 xt_entry_foreach(iter, entry0, newinfo->size) {
880 ret = find_check_entry(iter, net, repl->name, repl->size);
881 if (ret != 0)
882 break;
883 ++i;
884 }
885
886 if (ret != 0) {
887 xt_entry_foreach(iter, entry0, newinfo->size) {
888 if (i-- == 0)
889 break;
890 cleanup_entry(iter, net);
891 }
892 return ret;
893 }
894
895 return ret;
896 }
897
898 static void
899 get_counters(const struct xt_table_info *t,
900 struct xt_counters counters[])
901 {
902 struct ip6t_entry *iter;
903 unsigned int cpu;
904 unsigned int i;
905
906 for_each_possible_cpu(cpu) {
907 seqcount_t *s = &per_cpu(xt_recseq, cpu);
908
909 i = 0;
910 xt_entry_foreach(iter, t->entries, t->size) {
911 struct xt_counters *tmp;
912 u64 bcnt, pcnt;
913 unsigned int start;
914
915 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
916 do {
917 start = read_seqcount_begin(s);
918 bcnt = tmp->bcnt;
919 pcnt = tmp->pcnt;
920 } while (read_seqcount_retry(s, start));
921
922 ADD_COUNTER(counters[i], bcnt, pcnt);
923 ++i;
924 }
925 }
926 }
927
928 static struct xt_counters *alloc_counters(const struct xt_table *table)
929 {
930 unsigned int countersize;
931 struct xt_counters *counters;
932 const struct xt_table_info *private = table->private;
933
934 /* We need atomic snapshot of counters: rest doesn't change
935 (other than comefrom, which userspace doesn't care
936 about). */
937 countersize = sizeof(struct xt_counters) * private->number;
938 counters = vzalloc(countersize);
939
940 if (counters == NULL)
941 return ERR_PTR(-ENOMEM);
942
943 get_counters(private, counters);
944
945 return counters;
946 }
947
948 static int
949 copy_entries_to_user(unsigned int total_size,
950 const struct xt_table *table,
951 void __user *userptr)
952 {
953 unsigned int off, num;
954 const struct ip6t_entry *e;
955 struct xt_counters *counters;
956 const struct xt_table_info *private = table->private;
957 int ret = 0;
958 const void *loc_cpu_entry;
959
960 counters = alloc_counters(table);
961 if (IS_ERR(counters))
962 return PTR_ERR(counters);
963
964 loc_cpu_entry = private->entries;
965 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
966 ret = -EFAULT;
967 goto free_counters;
968 }
969
970 /* FIXME: use iterator macros --RR */
971 /* ... then go back and fix counters and names */
972 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
973 unsigned int i;
974 const struct xt_entry_match *m;
975 const struct xt_entry_target *t;
976
977 e = (struct ip6t_entry *)(loc_cpu_entry + off);
978 if (copy_to_user(userptr + off
979 + offsetof(struct ip6t_entry, counters),
980 &counters[num],
981 sizeof(counters[num])) != 0) {
982 ret = -EFAULT;
983 goto free_counters;
984 }
985
986 for (i = sizeof(struct ip6t_entry);
987 i < e->target_offset;
988 i += m->u.match_size) {
989 m = (void *)e + i;
990
991 if (copy_to_user(userptr + off + i
992 + offsetof(struct xt_entry_match,
993 u.user.name),
994 m->u.kernel.match->name,
995 strlen(m->u.kernel.match->name)+1)
996 != 0) {
997 ret = -EFAULT;
998 goto free_counters;
999 }
1000 }
1001
1002 t = ip6t_get_target_c(e);
1003 if (copy_to_user(userptr + off + e->target_offset
1004 + offsetof(struct xt_entry_target,
1005 u.user.name),
1006 t->u.kernel.target->name,
1007 strlen(t->u.kernel.target->name)+1) != 0) {
1008 ret = -EFAULT;
1009 goto free_counters;
1010 }
1011 }
1012
1013 free_counters:
1014 vfree(counters);
1015 return ret;
1016 }
1017
1018 #ifdef CONFIG_COMPAT
1019 static void compat_standard_from_user(void *dst, const void *src)
1020 {
1021 int v = *(compat_int_t *)src;
1022
1023 if (v > 0)
1024 v += xt_compat_calc_jump(AF_INET6, v);
1025 memcpy(dst, &v, sizeof(v));
1026 }
1027
1028 static int compat_standard_to_user(void __user *dst, const void *src)
1029 {
1030 compat_int_t cv = *(int *)src;
1031
1032 if (cv > 0)
1033 cv -= xt_compat_calc_jump(AF_INET6, cv);
1034 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1035 }
1036
1037 static int compat_calc_entry(const struct ip6t_entry *e,
1038 const struct xt_table_info *info,
1039 const void *base, struct xt_table_info *newinfo)
1040 {
1041 const struct xt_entry_match *ematch;
1042 const struct xt_entry_target *t;
1043 unsigned int entry_offset;
1044 int off, i, ret;
1045
1046 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1047 entry_offset = (void *)e - base;
1048 xt_ematch_foreach(ematch, e)
1049 off += xt_compat_match_offset(ematch->u.kernel.match);
1050 t = ip6t_get_target_c(e);
1051 off += xt_compat_target_offset(t->u.kernel.target);
1052 newinfo->size -= off;
1053 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1054 if (ret)
1055 return ret;
1056
1057 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1058 if (info->hook_entry[i] &&
1059 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1060 newinfo->hook_entry[i] -= off;
1061 if (info->underflow[i] &&
1062 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1063 newinfo->underflow[i] -= off;
1064 }
1065 return 0;
1066 }
1067
1068 static int compat_table_info(const struct xt_table_info *info,
1069 struct xt_table_info *newinfo)
1070 {
1071 struct ip6t_entry *iter;
1072 const void *loc_cpu_entry;
1073 int ret;
1074
1075 if (!newinfo || !info)
1076 return -EINVAL;
1077
1078 /* we dont care about newinfo->entries */
1079 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1080 newinfo->initial_entries = 0;
1081 loc_cpu_entry = info->entries;
1082 xt_compat_init_offsets(AF_INET6, info->number);
1083 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1084 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1085 if (ret != 0)
1086 return ret;
1087 }
1088 return 0;
1089 }
1090 #endif
1091
1092 static int get_info(struct net *net, void __user *user,
1093 const int *len, int compat)
1094 {
1095 char name[XT_TABLE_MAXNAMELEN];
1096 struct xt_table *t;
1097 int ret;
1098
1099 if (*len != sizeof(struct ip6t_getinfo)) {
1100 duprintf("length %u != %zu\n", *len,
1101 sizeof(struct ip6t_getinfo));
1102 return -EINVAL;
1103 }
1104
1105 if (copy_from_user(name, user, sizeof(name)) != 0)
1106 return -EFAULT;
1107
1108 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1109 #ifdef CONFIG_COMPAT
1110 if (compat)
1111 xt_compat_lock(AF_INET6);
1112 #endif
1113 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1114 "ip6table_%s", name);
1115 if (!IS_ERR_OR_NULL(t)) {
1116 struct ip6t_getinfo info;
1117 const struct xt_table_info *private = t->private;
1118 #ifdef CONFIG_COMPAT
1119 struct xt_table_info tmp;
1120
1121 if (compat) {
1122 ret = compat_table_info(private, &tmp);
1123 xt_compat_flush_offsets(AF_INET6);
1124 private = &tmp;
1125 }
1126 #endif
1127 memset(&info, 0, sizeof(info));
1128 info.valid_hooks = t->valid_hooks;
1129 memcpy(info.hook_entry, private->hook_entry,
1130 sizeof(info.hook_entry));
1131 memcpy(info.underflow, private->underflow,
1132 sizeof(info.underflow));
1133 info.num_entries = private->number;
1134 info.size = private->size;
1135 strcpy(info.name, name);
1136
1137 if (copy_to_user(user, &info, *len) != 0)
1138 ret = -EFAULT;
1139 else
1140 ret = 0;
1141
1142 xt_table_unlock(t);
1143 module_put(t->me);
1144 } else
1145 ret = t ? PTR_ERR(t) : -ENOENT;
1146 #ifdef CONFIG_COMPAT
1147 if (compat)
1148 xt_compat_unlock(AF_INET6);
1149 #endif
1150 return ret;
1151 }
1152
1153 static int
1154 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1155 const int *len)
1156 {
1157 int ret;
1158 struct ip6t_get_entries get;
1159 struct xt_table *t;
1160
1161 if (*len < sizeof(get)) {
1162 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1163 return -EINVAL;
1164 }
1165 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1166 return -EFAULT;
1167 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1168 duprintf("get_entries: %u != %zu\n",
1169 *len, sizeof(get) + get.size);
1170 return -EINVAL;
1171 }
1172
1173 t = xt_find_table_lock(net, AF_INET6, get.name);
1174 if (!IS_ERR_OR_NULL(t)) {
1175 struct xt_table_info *private = t->private;
1176 duprintf("t->private->number = %u\n", private->number);
1177 if (get.size == private->size)
1178 ret = copy_entries_to_user(private->size,
1179 t, uptr->entrytable);
1180 else {
1181 duprintf("get_entries: I've got %u not %u!\n",
1182 private->size, get.size);
1183 ret = -EAGAIN;
1184 }
1185 module_put(t->me);
1186 xt_table_unlock(t);
1187 } else
1188 ret = t ? PTR_ERR(t) : -ENOENT;
1189
1190 return ret;
1191 }
1192
1193 static int
1194 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1195 struct xt_table_info *newinfo, unsigned int num_counters,
1196 void __user *counters_ptr)
1197 {
1198 int ret;
1199 struct xt_table *t;
1200 struct xt_table_info *oldinfo;
1201 struct xt_counters *counters;
1202 struct ip6t_entry *iter;
1203
1204 ret = 0;
1205 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1206 if (!counters) {
1207 ret = -ENOMEM;
1208 goto out;
1209 }
1210
1211 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1212 "ip6table_%s", name);
1213 if (IS_ERR_OR_NULL(t)) {
1214 ret = t ? PTR_ERR(t) : -ENOENT;
1215 goto free_newinfo_counters_untrans;
1216 }
1217
1218 /* You lied! */
1219 if (valid_hooks != t->valid_hooks) {
1220 duprintf("Valid hook crap: %08X vs %08X\n",
1221 valid_hooks, t->valid_hooks);
1222 ret = -EINVAL;
1223 goto put_module;
1224 }
1225
1226 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1227 if (!oldinfo)
1228 goto put_module;
1229
1230 /* Update module usage count based on number of rules */
1231 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1232 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1233 if ((oldinfo->number > oldinfo->initial_entries) ||
1234 (newinfo->number <= oldinfo->initial_entries))
1235 module_put(t->me);
1236 if ((oldinfo->number > oldinfo->initial_entries) &&
1237 (newinfo->number <= oldinfo->initial_entries))
1238 module_put(t->me);
1239
1240 /* Get the old counters, and synchronize with replace */
1241 get_counters(oldinfo, counters);
1242
1243 /* Decrease module usage counts and free resource */
1244 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
1245 cleanup_entry(iter, net);
1246
1247 xt_free_table_info(oldinfo);
1248 if (copy_to_user(counters_ptr, counters,
1249 sizeof(struct xt_counters) * num_counters) != 0) {
1250 /* Silent error, can't fail, new table is already in place */
1251 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1252 }
1253 vfree(counters);
1254 xt_table_unlock(t);
1255 return ret;
1256
1257 put_module:
1258 module_put(t->me);
1259 xt_table_unlock(t);
1260 free_newinfo_counters_untrans:
1261 vfree(counters);
1262 out:
1263 return ret;
1264 }
1265
1266 static int
1267 do_replace(struct net *net, const void __user *user, unsigned int len)
1268 {
1269 int ret;
1270 struct ip6t_replace tmp;
1271 struct xt_table_info *newinfo;
1272 void *loc_cpu_entry;
1273 struct ip6t_entry *iter;
1274
1275 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1276 return -EFAULT;
1277
1278 /* overflow check */
1279 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1280 return -ENOMEM;
1281 if (tmp.num_counters == 0)
1282 return -EINVAL;
1283
1284 tmp.name[sizeof(tmp.name)-1] = 0;
1285
1286 newinfo = xt_alloc_table_info(tmp.size);
1287 if (!newinfo)
1288 return -ENOMEM;
1289
1290 loc_cpu_entry = newinfo->entries;
1291 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1292 tmp.size) != 0) {
1293 ret = -EFAULT;
1294 goto free_newinfo;
1295 }
1296
1297 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1298 if (ret != 0)
1299 goto free_newinfo;
1300
1301 duprintf("ip_tables: Translated table\n");
1302
1303 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1304 tmp.num_counters, tmp.counters);
1305 if (ret)
1306 goto free_newinfo_untrans;
1307 return 0;
1308
1309 free_newinfo_untrans:
1310 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1311 cleanup_entry(iter, net);
1312 free_newinfo:
1313 xt_free_table_info(newinfo);
1314 return ret;
1315 }
1316
1317 static int
1318 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1319 int compat)
1320 {
1321 unsigned int i;
1322 struct xt_counters_info tmp;
1323 struct xt_counters *paddc;
1324 unsigned int num_counters;
1325 char *name;
1326 int size;
1327 void *ptmp;
1328 struct xt_table *t;
1329 const struct xt_table_info *private;
1330 int ret = 0;
1331 struct ip6t_entry *iter;
1332 unsigned int addend;
1333 #ifdef CONFIG_COMPAT
1334 struct compat_xt_counters_info compat_tmp;
1335
1336 if (compat) {
1337 ptmp = &compat_tmp;
1338 size = sizeof(struct compat_xt_counters_info);
1339 } else
1340 #endif
1341 {
1342 ptmp = &tmp;
1343 size = sizeof(struct xt_counters_info);
1344 }
1345
1346 if (copy_from_user(ptmp, user, size) != 0)
1347 return -EFAULT;
1348
1349 #ifdef CONFIG_COMPAT
1350 if (compat) {
1351 num_counters = compat_tmp.num_counters;
1352 name = compat_tmp.name;
1353 } else
1354 #endif
1355 {
1356 num_counters = tmp.num_counters;
1357 name = tmp.name;
1358 }
1359
1360 if (len != size + num_counters * sizeof(struct xt_counters))
1361 return -EINVAL;
1362
1363 paddc = vmalloc(len - size);
1364 if (!paddc)
1365 return -ENOMEM;
1366
1367 if (copy_from_user(paddc, user + size, len - size) != 0) {
1368 ret = -EFAULT;
1369 goto free;
1370 }
1371
1372 t = xt_find_table_lock(net, AF_INET6, name);
1373 if (IS_ERR_OR_NULL(t)) {
1374 ret = t ? PTR_ERR(t) : -ENOENT;
1375 goto free;
1376 }
1377
1378 local_bh_disable();
1379 private = t->private;
1380 if (private->number != num_counters) {
1381 ret = -EINVAL;
1382 goto unlock_up_free;
1383 }
1384
1385 i = 0;
1386 addend = xt_write_recseq_begin();
1387 xt_entry_foreach(iter, private->entries, private->size) {
1388 struct xt_counters *tmp;
1389
1390 tmp = xt_get_this_cpu_counter(&iter->counters);
1391 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1392 ++i;
1393 }
1394 xt_write_recseq_end(addend);
1395 unlock_up_free:
1396 local_bh_enable();
1397 xt_table_unlock(t);
1398 module_put(t->me);
1399 free:
1400 vfree(paddc);
1401
1402 return ret;
1403 }
1404
1405 #ifdef CONFIG_COMPAT
1406 struct compat_ip6t_replace {
1407 char name[XT_TABLE_MAXNAMELEN];
1408 u32 valid_hooks;
1409 u32 num_entries;
1410 u32 size;
1411 u32 hook_entry[NF_INET_NUMHOOKS];
1412 u32 underflow[NF_INET_NUMHOOKS];
1413 u32 num_counters;
1414 compat_uptr_t counters; /* struct xt_counters * */
1415 struct compat_ip6t_entry entries[0];
1416 };
1417
1418 static int
1419 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1420 unsigned int *size, struct xt_counters *counters,
1421 unsigned int i)
1422 {
1423 struct xt_entry_target *t;
1424 struct compat_ip6t_entry __user *ce;
1425 u_int16_t target_offset, next_offset;
1426 compat_uint_t origsize;
1427 const struct xt_entry_match *ematch;
1428 int ret = 0;
1429
1430 origsize = *size;
1431 ce = (struct compat_ip6t_entry __user *)*dstptr;
1432 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1433 copy_to_user(&ce->counters, &counters[i],
1434 sizeof(counters[i])) != 0)
1435 return -EFAULT;
1436
1437 *dstptr += sizeof(struct compat_ip6t_entry);
1438 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1439
1440 xt_ematch_foreach(ematch, e) {
1441 ret = xt_compat_match_to_user(ematch, dstptr, size);
1442 if (ret != 0)
1443 return ret;
1444 }
1445 target_offset = e->target_offset - (origsize - *size);
1446 t = ip6t_get_target(e);
1447 ret = xt_compat_target_to_user(t, dstptr, size);
1448 if (ret)
1449 return ret;
1450 next_offset = e->next_offset - (origsize - *size);
1451 if (put_user(target_offset, &ce->target_offset) != 0 ||
1452 put_user(next_offset, &ce->next_offset) != 0)
1453 return -EFAULT;
1454 return 0;
1455 }
1456
1457 static int
1458 compat_find_calc_match(struct xt_entry_match *m,
1459 const char *name,
1460 const struct ip6t_ip6 *ipv6,
1461 int *size)
1462 {
1463 struct xt_match *match;
1464
1465 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1466 m->u.user.revision);
1467 if (IS_ERR(match)) {
1468 duprintf("compat_check_calc_match: `%s' not found\n",
1469 m->u.user.name);
1470 return PTR_ERR(match);
1471 }
1472 m->u.kernel.match = match;
1473 *size += xt_compat_match_offset(match);
1474 return 0;
1475 }
1476
1477 static void compat_release_entry(struct compat_ip6t_entry *e)
1478 {
1479 struct xt_entry_target *t;
1480 struct xt_entry_match *ematch;
1481
1482 /* Cleanup all matches */
1483 xt_ematch_foreach(ematch, e)
1484 module_put(ematch->u.kernel.match->me);
1485 t = compat_ip6t_get_target(e);
1486 module_put(t->u.kernel.target->me);
1487 }
1488
1489 static int
1490 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1491 struct xt_table_info *newinfo,
1492 unsigned int *size,
1493 const unsigned char *base,
1494 const unsigned char *limit,
1495 const unsigned int *hook_entries,
1496 const unsigned int *underflows,
1497 const char *name)
1498 {
1499 struct xt_entry_match *ematch;
1500 struct xt_entry_target *t;
1501 struct xt_target *target;
1502 unsigned int entry_offset;
1503 unsigned int j;
1504 int ret, off, h;
1505
1506 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1507 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1508 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1509 duprintf("Bad offset %p, limit = %p\n", e, limit);
1510 return -EINVAL;
1511 }
1512
1513 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1514 sizeof(struct compat_xt_entry_target)) {
1515 duprintf("checking: element %p size %u\n",
1516 e, e->next_offset);
1517 return -EINVAL;
1518 }
1519
1520 /* For purposes of check_entry casting the compat entry is fine */
1521 ret = check_entry((struct ip6t_entry *)e, name);
1522 if (ret)
1523 return ret;
1524
1525 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1526 entry_offset = (void *)e - (void *)base;
1527 j = 0;
1528 xt_ematch_foreach(ematch, e) {
1529 ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
1530 if (ret != 0)
1531 goto release_matches;
1532 ++j;
1533 }
1534
1535 t = compat_ip6t_get_target(e);
1536 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1537 t->u.user.revision);
1538 if (IS_ERR(target)) {
1539 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1540 t->u.user.name);
1541 ret = PTR_ERR(target);
1542 goto release_matches;
1543 }
1544 t->u.kernel.target = target;
1545
1546 off += xt_compat_target_offset(target);
1547 *size += off;
1548 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1549 if (ret)
1550 goto out;
1551
1552 /* Check hooks & underflows */
1553 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1554 if ((unsigned char *)e - base == hook_entries[h])
1555 newinfo->hook_entry[h] = hook_entries[h];
1556 if ((unsigned char *)e - base == underflows[h])
1557 newinfo->underflow[h] = underflows[h];
1558 }
1559
1560 /* Clear counters and comefrom */
1561 memset(&e->counters, 0, sizeof(e->counters));
1562 e->comefrom = 0;
1563 return 0;
1564
1565 out:
1566 module_put(t->u.kernel.target->me);
1567 release_matches:
1568 xt_ematch_foreach(ematch, e) {
1569 if (j-- == 0)
1570 break;
1571 module_put(ematch->u.kernel.match->me);
1572 }
1573 return ret;
1574 }
1575
1576 static int
1577 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1578 unsigned int *size, const char *name,
1579 struct xt_table_info *newinfo, unsigned char *base)
1580 {
1581 struct xt_entry_target *t;
1582 struct ip6t_entry *de;
1583 unsigned int origsize;
1584 int ret, h;
1585 struct xt_entry_match *ematch;
1586
1587 ret = 0;
1588 origsize = *size;
1589 de = (struct ip6t_entry *)*dstptr;
1590 memcpy(de, e, sizeof(struct ip6t_entry));
1591 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1592
1593 *dstptr += sizeof(struct ip6t_entry);
1594 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1595
1596 xt_ematch_foreach(ematch, e) {
1597 ret = xt_compat_match_from_user(ematch, dstptr, size);
1598 if (ret != 0)
1599 return ret;
1600 }
1601 de->target_offset = e->target_offset - (origsize - *size);
1602 t = compat_ip6t_get_target(e);
1603 xt_compat_target_from_user(t, dstptr, size);
1604
1605 de->next_offset = e->next_offset - (origsize - *size);
1606 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1607 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1608 newinfo->hook_entry[h] -= origsize - *size;
1609 if ((unsigned char *)de - base < newinfo->underflow[h])
1610 newinfo->underflow[h] -= origsize - *size;
1611 }
1612 return ret;
1613 }
1614
1615 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1616 const char *name)
1617 {
1618 unsigned int j;
1619 int ret = 0;
1620 struct xt_mtchk_param mtpar;
1621 struct xt_entry_match *ematch;
1622
1623 e->counters.pcnt = xt_percpu_counter_alloc();
1624 if (IS_ERR_VALUE(e->counters.pcnt))
1625 return -ENOMEM;
1626 j = 0;
1627 mtpar.net = net;
1628 mtpar.table = name;
1629 mtpar.entryinfo = &e->ipv6;
1630 mtpar.hook_mask = e->comefrom;
1631 mtpar.family = NFPROTO_IPV6;
1632 xt_ematch_foreach(ematch, e) {
1633 ret = check_match(ematch, &mtpar);
1634 if (ret != 0)
1635 goto cleanup_matches;
1636 ++j;
1637 }
1638
1639 ret = check_target(e, net, name);
1640 if (ret)
1641 goto cleanup_matches;
1642 return 0;
1643
1644 cleanup_matches:
1645 xt_ematch_foreach(ematch, e) {
1646 if (j-- == 0)
1647 break;
1648 cleanup_match(ematch, net);
1649 }
1650
1651 xt_percpu_counter_free(e->counters.pcnt);
1652
1653 return ret;
1654 }
1655
1656 static int
1657 translate_compat_table(struct net *net,
1658 const char *name,
1659 unsigned int valid_hooks,
1660 struct xt_table_info **pinfo,
1661 void **pentry0,
1662 unsigned int total_size,
1663 unsigned int number,
1664 unsigned int *hook_entries,
1665 unsigned int *underflows)
1666 {
1667 unsigned int i, j;
1668 struct xt_table_info *newinfo, *info;
1669 void *pos, *entry0, *entry1;
1670 struct compat_ip6t_entry *iter0;
1671 struct ip6t_entry *iter1;
1672 unsigned int size;
1673 int ret = 0;
1674
1675 info = *pinfo;
1676 entry0 = *pentry0;
1677 size = total_size;
1678 info->number = number;
1679
1680 /* Init all hooks to impossible value. */
1681 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1682 info->hook_entry[i] = 0xFFFFFFFF;
1683 info->underflow[i] = 0xFFFFFFFF;
1684 }
1685
1686 duprintf("translate_compat_table: size %u\n", info->size);
1687 j = 0;
1688 xt_compat_lock(AF_INET6);
1689 xt_compat_init_offsets(AF_INET6, number);
1690 /* Walk through entries, checking offsets. */
1691 xt_entry_foreach(iter0, entry0, total_size) {
1692 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1693 entry0,
1694 entry0 + total_size,
1695 hook_entries,
1696 underflows,
1697 name);
1698 if (ret != 0)
1699 goto out_unlock;
1700 ++j;
1701 }
1702
1703 ret = -EINVAL;
1704 if (j != number) {
1705 duprintf("translate_compat_table: %u not %u entries\n",
1706 j, number);
1707 goto out_unlock;
1708 }
1709
1710 /* Check hooks all assigned */
1711 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1712 /* Only hooks which are valid */
1713 if (!(valid_hooks & (1 << i)))
1714 continue;
1715 if (info->hook_entry[i] == 0xFFFFFFFF) {
1716 duprintf("Invalid hook entry %u %u\n",
1717 i, hook_entries[i]);
1718 goto out_unlock;
1719 }
1720 if (info->underflow[i] == 0xFFFFFFFF) {
1721 duprintf("Invalid underflow %u %u\n",
1722 i, underflows[i]);
1723 goto out_unlock;
1724 }
1725 }
1726
1727 ret = -ENOMEM;
1728 newinfo = xt_alloc_table_info(size);
1729 if (!newinfo)
1730 goto out_unlock;
1731
1732 newinfo->number = number;
1733 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1734 newinfo->hook_entry[i] = info->hook_entry[i];
1735 newinfo->underflow[i] = info->underflow[i];
1736 }
1737 entry1 = newinfo->entries;
1738 pos = entry1;
1739 size = total_size;
1740 xt_entry_foreach(iter0, entry0, total_size) {
1741 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1742 name, newinfo, entry1);
1743 if (ret != 0)
1744 break;
1745 }
1746 xt_compat_flush_offsets(AF_INET6);
1747 xt_compat_unlock(AF_INET6);
1748 if (ret)
1749 goto free_newinfo;
1750
1751 ret = -ELOOP;
1752 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1753 goto free_newinfo;
1754
1755 i = 0;
1756 xt_entry_foreach(iter1, entry1, newinfo->size) {
1757 ret = compat_check_entry(iter1, net, name);
1758 if (ret != 0)
1759 break;
1760 ++i;
1761 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1762 XT_ERROR_TARGET) == 0)
1763 ++newinfo->stacksize;
1764 }
1765 if (ret) {
1766 /*
1767 * The first i matches need cleanup_entry (calls ->destroy)
1768 * because they had called ->check already. The other j-i
1769 * entries need only release.
1770 */
1771 int skip = i;
1772 j -= i;
1773 xt_entry_foreach(iter0, entry0, newinfo->size) {
1774 if (skip-- > 0)
1775 continue;
1776 if (j-- == 0)
1777 break;
1778 compat_release_entry(iter0);
1779 }
1780 xt_entry_foreach(iter1, entry1, newinfo->size) {
1781 if (i-- == 0)
1782 break;
1783 cleanup_entry(iter1, net);
1784 }
1785 xt_free_table_info(newinfo);
1786 return ret;
1787 }
1788
1789 *pinfo = newinfo;
1790 *pentry0 = entry1;
1791 xt_free_table_info(info);
1792 return 0;
1793
1794 free_newinfo:
1795 xt_free_table_info(newinfo);
1796 out:
1797 xt_entry_foreach(iter0, entry0, total_size) {
1798 if (j-- == 0)
1799 break;
1800 compat_release_entry(iter0);
1801 }
1802 return ret;
1803 out_unlock:
1804 xt_compat_flush_offsets(AF_INET6);
1805 xt_compat_unlock(AF_INET6);
1806 goto out;
1807 }
1808
1809 static int
1810 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1811 {
1812 int ret;
1813 struct compat_ip6t_replace tmp;
1814 struct xt_table_info *newinfo;
1815 void *loc_cpu_entry;
1816 struct ip6t_entry *iter;
1817
1818 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1819 return -EFAULT;
1820
1821 /* overflow check */
1822 if (tmp.size >= INT_MAX / num_possible_cpus())
1823 return -ENOMEM;
1824 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1825 return -ENOMEM;
1826 if (tmp.num_counters == 0)
1827 return -EINVAL;
1828
1829 tmp.name[sizeof(tmp.name)-1] = 0;
1830
1831 newinfo = xt_alloc_table_info(tmp.size);
1832 if (!newinfo)
1833 return -ENOMEM;
1834
1835 loc_cpu_entry = newinfo->entries;
1836 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1837 tmp.size) != 0) {
1838 ret = -EFAULT;
1839 goto free_newinfo;
1840 }
1841
1842 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1843 &newinfo, &loc_cpu_entry, tmp.size,
1844 tmp.num_entries, tmp.hook_entry,
1845 tmp.underflow);
1846 if (ret != 0)
1847 goto free_newinfo;
1848
1849 duprintf("compat_do_replace: Translated table\n");
1850
1851 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1852 tmp.num_counters, compat_ptr(tmp.counters));
1853 if (ret)
1854 goto free_newinfo_untrans;
1855 return 0;
1856
1857 free_newinfo_untrans:
1858 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1859 cleanup_entry(iter, net);
1860 free_newinfo:
1861 xt_free_table_info(newinfo);
1862 return ret;
1863 }
1864
1865 static int
1866 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1867 unsigned int len)
1868 {
1869 int ret;
1870
1871 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1872 return -EPERM;
1873
1874 switch (cmd) {
1875 case IP6T_SO_SET_REPLACE:
1876 ret = compat_do_replace(sock_net(sk), user, len);
1877 break;
1878
1879 case IP6T_SO_SET_ADD_COUNTERS:
1880 ret = do_add_counters(sock_net(sk), user, len, 1);
1881 break;
1882
1883 default:
1884 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1885 ret = -EINVAL;
1886 }
1887
1888 return ret;
1889 }
1890
1891 struct compat_ip6t_get_entries {
1892 char name[XT_TABLE_MAXNAMELEN];
1893 compat_uint_t size;
1894 struct compat_ip6t_entry entrytable[0];
1895 };
1896
1897 static int
1898 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1899 void __user *userptr)
1900 {
1901 struct xt_counters *counters;
1902 const struct xt_table_info *private = table->private;
1903 void __user *pos;
1904 unsigned int size;
1905 int ret = 0;
1906 unsigned int i = 0;
1907 struct ip6t_entry *iter;
1908
1909 counters = alloc_counters(table);
1910 if (IS_ERR(counters))
1911 return PTR_ERR(counters);
1912
1913 pos = userptr;
1914 size = total_size;
1915 xt_entry_foreach(iter, private->entries, total_size) {
1916 ret = compat_copy_entry_to_user(iter, &pos,
1917 &size, counters, i++);
1918 if (ret != 0)
1919 break;
1920 }
1921
1922 vfree(counters);
1923 return ret;
1924 }
1925
1926 static int
1927 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1928 int *len)
1929 {
1930 int ret;
1931 struct compat_ip6t_get_entries get;
1932 struct xt_table *t;
1933
1934 if (*len < sizeof(get)) {
1935 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1936 return -EINVAL;
1937 }
1938
1939 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1940 return -EFAULT;
1941
1942 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1943 duprintf("compat_get_entries: %u != %zu\n",
1944 *len, sizeof(get) + get.size);
1945 return -EINVAL;
1946 }
1947
1948 xt_compat_lock(AF_INET6);
1949 t = xt_find_table_lock(net, AF_INET6, get.name);
1950 if (!IS_ERR_OR_NULL(t)) {
1951 const struct xt_table_info *private = t->private;
1952 struct xt_table_info info;
1953 duprintf("t->private->number = %u\n", private->number);
1954 ret = compat_table_info(private, &info);
1955 if (!ret && get.size == info.size) {
1956 ret = compat_copy_entries_to_user(private->size,
1957 t, uptr->entrytable);
1958 } else if (!ret) {
1959 duprintf("compat_get_entries: I've got %u not %u!\n",
1960 private->size, get.size);
1961 ret = -EAGAIN;
1962 }
1963 xt_compat_flush_offsets(AF_INET6);
1964 module_put(t->me);
1965 xt_table_unlock(t);
1966 } else
1967 ret = t ? PTR_ERR(t) : -ENOENT;
1968
1969 xt_compat_unlock(AF_INET6);
1970 return ret;
1971 }
1972
1973 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1974
1975 static int
1976 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1977 {
1978 int ret;
1979
1980 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1981 return -EPERM;
1982
1983 switch (cmd) {
1984 case IP6T_SO_GET_INFO:
1985 ret = get_info(sock_net(sk), user, len, 1);
1986 break;
1987 case IP6T_SO_GET_ENTRIES:
1988 ret = compat_get_entries(sock_net(sk), user, len);
1989 break;
1990 default:
1991 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1992 }
1993 return ret;
1994 }
1995 #endif
1996
1997 static int
1998 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1999 {
2000 int ret;
2001
2002 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2003 return -EPERM;
2004
2005 switch (cmd) {
2006 case IP6T_SO_SET_REPLACE:
2007 ret = do_replace(sock_net(sk), user, len);
2008 break;
2009
2010 case IP6T_SO_SET_ADD_COUNTERS:
2011 ret = do_add_counters(sock_net(sk), user, len, 0);
2012 break;
2013
2014 default:
2015 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2016 ret = -EINVAL;
2017 }
2018
2019 return ret;
2020 }
2021
2022 static int
2023 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2024 {
2025 int ret;
2026
2027 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2028 return -EPERM;
2029
2030 switch (cmd) {
2031 case IP6T_SO_GET_INFO:
2032 ret = get_info(sock_net(sk), user, len, 0);
2033 break;
2034
2035 case IP6T_SO_GET_ENTRIES:
2036 ret = get_entries(sock_net(sk), user, len);
2037 break;
2038
2039 case IP6T_SO_GET_REVISION_MATCH:
2040 case IP6T_SO_GET_REVISION_TARGET: {
2041 struct xt_get_revision rev;
2042 int target;
2043
2044 if (*len != sizeof(rev)) {
2045 ret = -EINVAL;
2046 break;
2047 }
2048 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2049 ret = -EFAULT;
2050 break;
2051 }
2052 rev.name[sizeof(rev.name)-1] = 0;
2053
2054 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2055 target = 1;
2056 else
2057 target = 0;
2058
2059 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2060 rev.revision,
2061 target, &ret),
2062 "ip6t_%s", rev.name);
2063 break;
2064 }
2065
2066 default:
2067 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2068 ret = -EINVAL;
2069 }
2070
2071 return ret;
2072 }
2073
2074 static void __ip6t_unregister_table(struct net *net, struct xt_table *table)
2075 {
2076 struct xt_table_info *private;
2077 void *loc_cpu_entry;
2078 struct module *table_owner = table->me;
2079 struct ip6t_entry *iter;
2080
2081 private = xt_unregister_table(table);
2082
2083 /* Decrease module usage counts and free resources */
2084 loc_cpu_entry = private->entries;
2085 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2086 cleanup_entry(iter, net);
2087 if (private->number > private->initial_entries)
2088 module_put(table_owner);
2089 xt_free_table_info(private);
2090 }
2091
2092 int ip6t_register_table(struct net *net, const struct xt_table *table,
2093 const struct ip6t_replace *repl,
2094 const struct nf_hook_ops *ops,
2095 struct xt_table **res)
2096 {
2097 int ret;
2098 struct xt_table_info *newinfo;
2099 struct xt_table_info bootstrap = {0};
2100 void *loc_cpu_entry;
2101 struct xt_table *new_table;
2102
2103 newinfo = xt_alloc_table_info(repl->size);
2104 if (!newinfo)
2105 return -ENOMEM;
2106
2107 loc_cpu_entry = newinfo->entries;
2108 memcpy(loc_cpu_entry, repl->entries, repl->size);
2109
2110 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2111 if (ret != 0)
2112 goto out_free;
2113
2114 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2115 if (IS_ERR(new_table)) {
2116 ret = PTR_ERR(new_table);
2117 goto out_free;
2118 }
2119
2120 /* set res now, will see skbs right after nf_register_net_hooks */
2121 WRITE_ONCE(*res, new_table);
2122
2123 ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
2124 if (ret != 0) {
2125 __ip6t_unregister_table(net, new_table);
2126 *res = NULL;
2127 }
2128
2129 return ret;
2130
2131 out_free:
2132 xt_free_table_info(newinfo);
2133 return ret;
2134 }
2135
2136 void ip6t_unregister_table(struct net *net, struct xt_table *table,
2137 const struct nf_hook_ops *ops)
2138 {
2139 nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
2140 __ip6t_unregister_table(net, table);
2141 }
2142
2143 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2144 static inline bool
2145 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2146 u_int8_t type, u_int8_t code,
2147 bool invert)
2148 {
2149 return (type == test_type && code >= min_code && code <= max_code)
2150 ^ invert;
2151 }
2152
2153 static bool
2154 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2155 {
2156 const struct icmp6hdr *ic;
2157 struct icmp6hdr _icmph;
2158 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2159
2160 /* Must not be a fragment. */
2161 if (par->fragoff != 0)
2162 return false;
2163
2164 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2165 if (ic == NULL) {
2166 /* We've been asked to examine this packet, and we
2167 * can't. Hence, no choice but to drop.
2168 */
2169 duprintf("Dropping evil ICMP tinygram.\n");
2170 par->hotdrop = true;
2171 return false;
2172 }
2173
2174 return icmp6_type_code_match(icmpinfo->type,
2175 icmpinfo->code[0],
2176 icmpinfo->code[1],
2177 ic->icmp6_type, ic->icmp6_code,
2178 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2179 }
2180
2181 /* Called when user tries to insert an entry of this type. */
2182 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2183 {
2184 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2185
2186 /* Must specify no unknown invflags */
2187 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2188 }
2189
2190 /* The built-in targets: standard (NULL) and error. */
2191 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2192 {
2193 .name = XT_STANDARD_TARGET,
2194 .targetsize = sizeof(int),
2195 .family = NFPROTO_IPV6,
2196 #ifdef CONFIG_COMPAT
2197 .compatsize = sizeof(compat_int_t),
2198 .compat_from_user = compat_standard_from_user,
2199 .compat_to_user = compat_standard_to_user,
2200 #endif
2201 },
2202 {
2203 .name = XT_ERROR_TARGET,
2204 .target = ip6t_error,
2205 .targetsize = XT_FUNCTION_MAXNAMELEN,
2206 .family = NFPROTO_IPV6,
2207 },
2208 };
2209
2210 static struct nf_sockopt_ops ip6t_sockopts = {
2211 .pf = PF_INET6,
2212 .set_optmin = IP6T_BASE_CTL,
2213 .set_optmax = IP6T_SO_SET_MAX+1,
2214 .set = do_ip6t_set_ctl,
2215 #ifdef CONFIG_COMPAT
2216 .compat_set = compat_do_ip6t_set_ctl,
2217 #endif
2218 .get_optmin = IP6T_BASE_CTL,
2219 .get_optmax = IP6T_SO_GET_MAX+1,
2220 .get = do_ip6t_get_ctl,
2221 #ifdef CONFIG_COMPAT
2222 .compat_get = compat_do_ip6t_get_ctl,
2223 #endif
2224 .owner = THIS_MODULE,
2225 };
2226
2227 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2228 {
2229 .name = "icmp6",
2230 .match = icmp6_match,
2231 .matchsize = sizeof(struct ip6t_icmp),
2232 .checkentry = icmp6_checkentry,
2233 .proto = IPPROTO_ICMPV6,
2234 .family = NFPROTO_IPV6,
2235 },
2236 };
2237
2238 static int __net_init ip6_tables_net_init(struct net *net)
2239 {
2240 return xt_proto_init(net, NFPROTO_IPV6);
2241 }
2242
2243 static void __net_exit ip6_tables_net_exit(struct net *net)
2244 {
2245 xt_proto_fini(net, NFPROTO_IPV6);
2246 }
2247
2248 static struct pernet_operations ip6_tables_net_ops = {
2249 .init = ip6_tables_net_init,
2250 .exit = ip6_tables_net_exit,
2251 };
2252
2253 static int __init ip6_tables_init(void)
2254 {
2255 int ret;
2256
2257 ret = register_pernet_subsys(&ip6_tables_net_ops);
2258 if (ret < 0)
2259 goto err1;
2260
2261 /* No one else will be downing sem now, so we won't sleep */
2262 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2263 if (ret < 0)
2264 goto err2;
2265 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2266 if (ret < 0)
2267 goto err4;
2268
2269 /* Register setsockopt */
2270 ret = nf_register_sockopt(&ip6t_sockopts);
2271 if (ret < 0)
2272 goto err5;
2273
2274 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2275 return 0;
2276
2277 err5:
2278 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2279 err4:
2280 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2281 err2:
2282 unregister_pernet_subsys(&ip6_tables_net_ops);
2283 err1:
2284 return ret;
2285 }
2286
2287 static void __exit ip6_tables_fini(void)
2288 {
2289 nf_unregister_sockopt(&ip6t_sockopts);
2290
2291 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2292 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2293 unregister_pernet_subsys(&ip6_tables_net_ops);
2294 }
2295
2296 EXPORT_SYMBOL(ip6t_register_table);
2297 EXPORT_SYMBOL(ip6t_unregister_table);
2298 EXPORT_SYMBOL(ip6t_do_table);
2299
2300 module_init(ip6_tables_init);
2301 module_exit(ip6_tables_fini);
This page took 0.082053 seconds and 5 git commands to generate.