Merge tag 'driver-core-4.6-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/kernel.h>
16 #include <linux/capability.h>
17 #include <linux/in.h>
18 #include <linux/skbuff.h>
19 #include <linux/kmod.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netdevice.h>
22 #include <linux/module.h>
23 #include <linux/poison.h>
24 #include <linux/icmpv6.h>
25 #include <net/ipv6.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
32
33 #include <linux/netfilter_ipv6/ip6_tables.h>
34 #include <linux/netfilter/x_tables.h>
35 #include <net/netfilter/nf_log.h>
36 #include "../../netfilter/xt_repldata.h"
37
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
40 MODULE_DESCRIPTION("IPv6 packet filter");
41
42 /*#define DEBUG_IP_FIREWALL*/
43 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
44 /*#define DEBUG_IP_FIREWALL_USER*/
45
46 #ifdef DEBUG_IP_FIREWALL
47 #define dprintf(format, args...) pr_info(format , ## args)
48 #else
49 #define dprintf(format, args...)
50 #endif
51
52 #ifdef DEBUG_IP_FIREWALL_USER
53 #define duprintf(format, args...) pr_info(format , ## args)
54 #else
55 #define duprintf(format, args...)
56 #endif
57
58 #ifdef CONFIG_NETFILTER_DEBUG
59 #define IP_NF_ASSERT(x) WARN_ON(!(x))
60 #else
61 #define IP_NF_ASSERT(x)
62 #endif
63
64 #if 0
65 /* All the better to debug you with... */
66 #define static
67 #define inline
68 #endif
69
70 void *ip6t_alloc_initial_table(const struct xt_table *info)
71 {
72 return xt_alloc_initial_table(ip6t, IP6T);
73 }
74 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
75
76 /*
77 We keep a set of rules for each CPU, so we can avoid write-locking
78 them in the softirq when updating the counters and therefore
79 only need to read-lock in the softirq; doing a write_lock_bh() in user
80 context stops packets coming through and allows user context to read
81 the counters or update the rules.
82
83 Hence the start of any table is given by get_table() below. */
84
85 /* Returns whether matches rule or not. */
86 /* Performance critical - called for every packet */
87 static inline bool
88 ip6_packet_match(const struct sk_buff *skb,
89 const char *indev,
90 const char *outdev,
91 const struct ip6t_ip6 *ip6info,
92 unsigned int *protoff,
93 int *fragoff, bool *hotdrop)
94 {
95 unsigned long ret;
96 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
97
98 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
99
100 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
101 &ip6info->src), IP6T_INV_SRCIP) ||
102 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
103 &ip6info->dst), IP6T_INV_DSTIP)) {
104 dprintf("Source or dest mismatch.\n");
105 /*
106 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
107 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
108 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
109 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
110 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
111 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
112 return false;
113 }
114
115 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
116
117 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
118 dprintf("VIA in mismatch (%s vs %s).%s\n",
119 indev, ip6info->iniface,
120 ip6info->invflags & IP6T_INV_VIA_IN ? " (INV)" : "");
121 return false;
122 }
123
124 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
125
126 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
127 dprintf("VIA out mismatch (%s vs %s).%s\n",
128 outdev, ip6info->outiface,
129 ip6info->invflags & IP6T_INV_VIA_OUT ? " (INV)" : "");
130 return false;
131 }
132
133 /* ... might want to do something with class and flowlabel here ... */
134
135 /* look for the desired protocol header */
136 if (ip6info->flags & IP6T_F_PROTO) {
137 int protohdr;
138 unsigned short _frag_off;
139
140 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
141 if (protohdr < 0) {
142 if (_frag_off == 0)
143 *hotdrop = true;
144 return false;
145 }
146 *fragoff = _frag_off;
147
148 dprintf("Packet protocol %hi ?= %s%hi.\n",
149 protohdr,
150 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
151 ip6info->proto);
152
153 if (ip6info->proto == protohdr) {
154 if (ip6info->invflags & IP6T_INV_PROTO)
155 return false;
156
157 return true;
158 }
159
160 /* We need match for the '-p all', too! */
161 if ((ip6info->proto != 0) &&
162 !(ip6info->invflags & IP6T_INV_PROTO))
163 return false;
164 }
165 return true;
166 }
167
168 /* should be ip6 safe */
169 static bool
170 ip6_checkentry(const struct ip6t_ip6 *ipv6)
171 {
172 if (ipv6->flags & ~IP6T_F_MASK) {
173 duprintf("Unknown flag bits set: %08X\n",
174 ipv6->flags & ~IP6T_F_MASK);
175 return false;
176 }
177 if (ipv6->invflags & ~IP6T_INV_MASK) {
178 duprintf("Unknown invflag bits set: %08X\n",
179 ipv6->invflags & ~IP6T_INV_MASK);
180 return false;
181 }
182 return true;
183 }
184
185 static unsigned int
186 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
187 {
188 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
189
190 return NF_DROP;
191 }
192
193 static inline struct ip6t_entry *
194 get_entry(const void *base, unsigned int offset)
195 {
196 return (struct ip6t_entry *)(base + offset);
197 }
198
199 /* All zeroes == unconditional rule. */
200 /* Mildly perf critical (only if packet tracing is on) */
201 static inline bool unconditional(const struct ip6t_entry *e)
202 {
203 static const struct ip6t_ip6 uncond;
204
205 return e->target_offset == sizeof(struct ip6t_entry) &&
206 memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
207 }
208
209 static inline const struct xt_entry_target *
210 ip6t_get_target_c(const struct ip6t_entry *e)
211 {
212 return ip6t_get_target((struct ip6t_entry *)e);
213 }
214
215 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
216 /* This cries for unification! */
217 static const char *const hooknames[] = {
218 [NF_INET_PRE_ROUTING] = "PREROUTING",
219 [NF_INET_LOCAL_IN] = "INPUT",
220 [NF_INET_FORWARD] = "FORWARD",
221 [NF_INET_LOCAL_OUT] = "OUTPUT",
222 [NF_INET_POST_ROUTING] = "POSTROUTING",
223 };
224
225 enum nf_ip_trace_comments {
226 NF_IP6_TRACE_COMMENT_RULE,
227 NF_IP6_TRACE_COMMENT_RETURN,
228 NF_IP6_TRACE_COMMENT_POLICY,
229 };
230
231 static const char *const comments[] = {
232 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
233 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
234 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
235 };
236
237 static struct nf_loginfo trace_loginfo = {
238 .type = NF_LOG_TYPE_LOG,
239 .u = {
240 .log = {
241 .level = LOGLEVEL_WARNING,
242 .logflags = NF_LOG_MASK,
243 },
244 },
245 };
246
247 /* Mildly perf critical (only if packet tracing is on) */
248 static inline int
249 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
250 const char *hookname, const char **chainname,
251 const char **comment, unsigned int *rulenum)
252 {
253 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
254
255 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
256 /* Head of user chain: ERROR target with chainname */
257 *chainname = t->target.data;
258 (*rulenum) = 0;
259 } else if (s == e) {
260 (*rulenum)++;
261
262 if (unconditional(s) &&
263 strcmp(t->target.u.kernel.target->name,
264 XT_STANDARD_TARGET) == 0 &&
265 t->verdict < 0) {
266 /* Tail of chains: STANDARD target (return/policy) */
267 *comment = *chainname == hookname
268 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
269 : comments[NF_IP6_TRACE_COMMENT_RETURN];
270 }
271 return 1;
272 } else
273 (*rulenum)++;
274
275 return 0;
276 }
277
278 static void trace_packet(struct net *net,
279 const struct sk_buff *skb,
280 unsigned int hook,
281 const struct net_device *in,
282 const struct net_device *out,
283 const char *tablename,
284 const struct xt_table_info *private,
285 const struct ip6t_entry *e)
286 {
287 const struct ip6t_entry *root;
288 const char *hookname, *chainname, *comment;
289 const struct ip6t_entry *iter;
290 unsigned int rulenum = 0;
291
292 root = get_entry(private->entries, private->hook_entry[hook]);
293
294 hookname = chainname = hooknames[hook];
295 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
296
297 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
298 if (get_chainname_rulenum(iter, e, hookname,
299 &chainname, &comment, &rulenum) != 0)
300 break;
301
302 nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
303 "TRACE: %s:%s:%s:%u ",
304 tablename, chainname, comment, rulenum);
305 }
306 #endif
307
308 static inline struct ip6t_entry *
309 ip6t_next_entry(const struct ip6t_entry *entry)
310 {
311 return (void *)entry + entry->next_offset;
312 }
313
314 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
315 unsigned int
316 ip6t_do_table(struct sk_buff *skb,
317 const struct nf_hook_state *state,
318 struct xt_table *table)
319 {
320 unsigned int hook = state->hook;
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
322 /* Initializing verdict to NF_DROP keeps gcc happy. */
323 unsigned int verdict = NF_DROP;
324 const char *indev, *outdev;
325 const void *table_base;
326 struct ip6t_entry *e, **jumpstack;
327 unsigned int stackidx, cpu;
328 const struct xt_table_info *private;
329 struct xt_action_param acpar;
330 unsigned int addend;
331
332 /* Initialization */
333 stackidx = 0;
334 indev = state->in ? state->in->name : nulldevname;
335 outdev = state->out ? state->out->name : nulldevname;
336 /* We handle fragments by dealing with the first fragment as
337 * if it was a normal packet. All other fragments are treated
338 * normally, except that they will NEVER match rules that ask
339 * things we don't know, ie. tcp syn flag or ports). If the
340 * rule is also a fragment-specific rule, non-fragments won't
341 * match it. */
342 acpar.hotdrop = false;
343 acpar.net = state->net;
344 acpar.in = state->in;
345 acpar.out = state->out;
346 acpar.family = NFPROTO_IPV6;
347 acpar.hooknum = hook;
348
349 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
350
351 local_bh_disable();
352 addend = xt_write_recseq_begin();
353 private = table->private;
354 /*
355 * Ensure we load private-> members after we've fetched the base
356 * pointer.
357 */
358 smp_read_barrier_depends();
359 cpu = smp_processor_id();
360 table_base = private->entries;
361 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
362
363 /* Switch to alternate jumpstack if we're being invoked via TEE.
364 * TEE issues XT_CONTINUE verdict on original skb so we must not
365 * clobber the jumpstack.
366 *
367 * For recursion via REJECT or SYNPROXY the stack will be clobbered
368 * but it is no problem since absolute verdict is issued by these.
369 */
370 if (static_key_false(&xt_tee_enabled))
371 jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
372
373 e = get_entry(table_base, private->hook_entry[hook]);
374
375 do {
376 const struct xt_entry_target *t;
377 const struct xt_entry_match *ematch;
378 struct xt_counters *counter;
379
380 IP_NF_ASSERT(e);
381 acpar.thoff = 0;
382 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
383 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
384 no_match:
385 e = ip6t_next_entry(e);
386 continue;
387 }
388
389 xt_ematch_foreach(ematch, e) {
390 acpar.match = ematch->u.kernel.match;
391 acpar.matchinfo = ematch->data;
392 if (!acpar.match->match(skb, &acpar))
393 goto no_match;
394 }
395
396 counter = xt_get_this_cpu_counter(&e->counters);
397 ADD_COUNTER(*counter, skb->len, 1);
398
399 t = ip6t_get_target_c(e);
400 IP_NF_ASSERT(t->u.kernel.target);
401
402 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
403 /* The packet is traced: log it */
404 if (unlikely(skb->nf_trace))
405 trace_packet(state->net, skb, hook, state->in,
406 state->out, table->name, private, e);
407 #endif
408 /* Standard target? */
409 if (!t->u.kernel.target->target) {
410 int v;
411
412 v = ((struct xt_standard_target *)t)->verdict;
413 if (v < 0) {
414 /* Pop from stack? */
415 if (v != XT_RETURN) {
416 verdict = (unsigned int)(-v) - 1;
417 break;
418 }
419 if (stackidx == 0)
420 e = get_entry(table_base,
421 private->underflow[hook]);
422 else
423 e = ip6t_next_entry(jumpstack[--stackidx]);
424 continue;
425 }
426 if (table_base + v != ip6t_next_entry(e) &&
427 !(e->ipv6.flags & IP6T_F_GOTO)) {
428 jumpstack[stackidx++] = e;
429 }
430
431 e = get_entry(table_base, v);
432 continue;
433 }
434
435 acpar.target = t->u.kernel.target;
436 acpar.targinfo = t->data;
437
438 verdict = t->u.kernel.target->target(skb, &acpar);
439 if (verdict == XT_CONTINUE)
440 e = ip6t_next_entry(e);
441 else
442 /* Verdict */
443 break;
444 } while (!acpar.hotdrop);
445
446 xt_write_recseq_end(addend);
447 local_bh_enable();
448
449 #ifdef DEBUG_ALLOW_ALL
450 return NF_ACCEPT;
451 #else
452 if (acpar.hotdrop)
453 return NF_DROP;
454 else return verdict;
455 #endif
456 }
457
458 /* Figures out from what hook each rule can be called: returns 0 if
459 there are loops. Puts hook bitmask in comefrom. */
460 static int
461 mark_source_chains(const struct xt_table_info *newinfo,
462 unsigned int valid_hooks, void *entry0)
463 {
464 unsigned int hook;
465
466 /* No recursion; use packet counter to save back ptrs (reset
467 to 0 as we leave), and comefrom to save source hook bitmask */
468 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
469 unsigned int pos = newinfo->hook_entry[hook];
470 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
471
472 if (!(valid_hooks & (1 << hook)))
473 continue;
474
475 /* Set initial back pointer. */
476 e->counters.pcnt = pos;
477
478 for (;;) {
479 const struct xt_standard_target *t
480 = (void *)ip6t_get_target_c(e);
481 int visited = e->comefrom & (1 << hook);
482
483 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
484 pr_err("iptables: loop hook %u pos %u %08X.\n",
485 hook, pos, e->comefrom);
486 return 0;
487 }
488 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
489
490 /* Unconditional return/END. */
491 if ((unconditional(e) &&
492 (strcmp(t->target.u.user.name,
493 XT_STANDARD_TARGET) == 0) &&
494 t->verdict < 0) || visited) {
495 unsigned int oldpos, size;
496
497 if ((strcmp(t->target.u.user.name,
498 XT_STANDARD_TARGET) == 0) &&
499 t->verdict < -NF_MAX_VERDICT - 1) {
500 duprintf("mark_source_chains: bad "
501 "negative verdict (%i)\n",
502 t->verdict);
503 return 0;
504 }
505
506 /* Return: backtrack through the last
507 big jump. */
508 do {
509 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
510 #ifdef DEBUG_IP_FIREWALL_USER
511 if (e->comefrom
512 & (1 << NF_INET_NUMHOOKS)) {
513 duprintf("Back unset "
514 "on hook %u "
515 "rule %u\n",
516 hook, pos);
517 }
518 #endif
519 oldpos = pos;
520 pos = e->counters.pcnt;
521 e->counters.pcnt = 0;
522
523 /* We're at the start. */
524 if (pos == oldpos)
525 goto next;
526
527 e = (struct ip6t_entry *)
528 (entry0 + pos);
529 } while (oldpos == pos + e->next_offset);
530
531 /* Move along one */
532 size = e->next_offset;
533 e = (struct ip6t_entry *)
534 (entry0 + pos + size);
535 e->counters.pcnt = pos;
536 pos += size;
537 } else {
538 int newpos = t->verdict;
539
540 if (strcmp(t->target.u.user.name,
541 XT_STANDARD_TARGET) == 0 &&
542 newpos >= 0) {
543 if (newpos > newinfo->size -
544 sizeof(struct ip6t_entry)) {
545 duprintf("mark_source_chains: "
546 "bad verdict (%i)\n",
547 newpos);
548 return 0;
549 }
550 /* This a jump; chase it. */
551 duprintf("Jump rule %u -> %u\n",
552 pos, newpos);
553 } else {
554 /* ... this is a fallthru */
555 newpos = pos + e->next_offset;
556 }
557 e = (struct ip6t_entry *)
558 (entry0 + newpos);
559 e->counters.pcnt = pos;
560 pos = newpos;
561 }
562 }
563 next:
564 duprintf("Finished chain %u\n", hook);
565 }
566 return 1;
567 }
568
569 static void cleanup_match(struct xt_entry_match *m, struct net *net)
570 {
571 struct xt_mtdtor_param par;
572
573 par.net = net;
574 par.match = m->u.kernel.match;
575 par.matchinfo = m->data;
576 par.family = NFPROTO_IPV6;
577 if (par.match->destroy != NULL)
578 par.match->destroy(&par);
579 module_put(par.match->me);
580 }
581
582 static int
583 check_entry(const struct ip6t_entry *e)
584 {
585 const struct xt_entry_target *t;
586
587 if (!ip6_checkentry(&e->ipv6))
588 return -EINVAL;
589
590 if (e->target_offset + sizeof(struct xt_entry_target) >
591 e->next_offset)
592 return -EINVAL;
593
594 t = ip6t_get_target_c(e);
595 if (e->target_offset + t->u.target_size > e->next_offset)
596 return -EINVAL;
597
598 return 0;
599 }
600
601 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
602 {
603 const struct ip6t_ip6 *ipv6 = par->entryinfo;
604 int ret;
605
606 par->match = m->u.kernel.match;
607 par->matchinfo = m->data;
608
609 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
610 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
611 if (ret < 0) {
612 duprintf("ip_tables: check failed for `%s'.\n",
613 par.match->name);
614 return ret;
615 }
616 return 0;
617 }
618
619 static int
620 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
621 {
622 struct xt_match *match;
623 int ret;
624
625 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
626 m->u.user.revision);
627 if (IS_ERR(match)) {
628 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
629 return PTR_ERR(match);
630 }
631 m->u.kernel.match = match;
632
633 ret = check_match(m, par);
634 if (ret)
635 goto err;
636
637 return 0;
638 err:
639 module_put(m->u.kernel.match->me);
640 return ret;
641 }
642
643 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
644 {
645 struct xt_entry_target *t = ip6t_get_target(e);
646 struct xt_tgchk_param par = {
647 .net = net,
648 .table = name,
649 .entryinfo = e,
650 .target = t->u.kernel.target,
651 .targinfo = t->data,
652 .hook_mask = e->comefrom,
653 .family = NFPROTO_IPV6,
654 };
655 int ret;
656
657 t = ip6t_get_target(e);
658 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
659 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
660 if (ret < 0) {
661 duprintf("ip_tables: check failed for `%s'.\n",
662 t->u.kernel.target->name);
663 return ret;
664 }
665 return 0;
666 }
667
668 static int
669 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
670 unsigned int size)
671 {
672 struct xt_entry_target *t;
673 struct xt_target *target;
674 int ret;
675 unsigned int j;
676 struct xt_mtchk_param mtpar;
677 struct xt_entry_match *ematch;
678
679 e->counters.pcnt = xt_percpu_counter_alloc();
680 if (IS_ERR_VALUE(e->counters.pcnt))
681 return -ENOMEM;
682
683 j = 0;
684 mtpar.net = net;
685 mtpar.table = name;
686 mtpar.entryinfo = &e->ipv6;
687 mtpar.hook_mask = e->comefrom;
688 mtpar.family = NFPROTO_IPV6;
689 xt_ematch_foreach(ematch, e) {
690 ret = find_check_match(ematch, &mtpar);
691 if (ret != 0)
692 goto cleanup_matches;
693 ++j;
694 }
695
696 t = ip6t_get_target(e);
697 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
698 t->u.user.revision);
699 if (IS_ERR(target)) {
700 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
701 ret = PTR_ERR(target);
702 goto cleanup_matches;
703 }
704 t->u.kernel.target = target;
705
706 ret = check_target(e, net, name);
707 if (ret)
708 goto err;
709 return 0;
710 err:
711 module_put(t->u.kernel.target->me);
712 cleanup_matches:
713 xt_ematch_foreach(ematch, e) {
714 if (j-- == 0)
715 break;
716 cleanup_match(ematch, net);
717 }
718
719 xt_percpu_counter_free(e->counters.pcnt);
720
721 return ret;
722 }
723
724 static bool check_underflow(const struct ip6t_entry *e)
725 {
726 const struct xt_entry_target *t;
727 unsigned int verdict;
728
729 if (!unconditional(e))
730 return false;
731 t = ip6t_get_target_c(e);
732 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
733 return false;
734 verdict = ((struct xt_standard_target *)t)->verdict;
735 verdict = -verdict - 1;
736 return verdict == NF_DROP || verdict == NF_ACCEPT;
737 }
738
739 static int
740 check_entry_size_and_hooks(struct ip6t_entry *e,
741 struct xt_table_info *newinfo,
742 const unsigned char *base,
743 const unsigned char *limit,
744 const unsigned int *hook_entries,
745 const unsigned int *underflows,
746 unsigned int valid_hooks)
747 {
748 unsigned int h;
749 int err;
750
751 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
752 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
753 (unsigned char *)e + e->next_offset > limit) {
754 duprintf("Bad offset %p\n", e);
755 return -EINVAL;
756 }
757
758 if (e->next_offset
759 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
760 duprintf("checking: element %p size %u\n",
761 e, e->next_offset);
762 return -EINVAL;
763 }
764
765 err = check_entry(e);
766 if (err)
767 return err;
768
769 /* Check hooks & underflows */
770 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
771 if (!(valid_hooks & (1 << h)))
772 continue;
773 if ((unsigned char *)e - base == hook_entries[h])
774 newinfo->hook_entry[h] = hook_entries[h];
775 if ((unsigned char *)e - base == underflows[h]) {
776 if (!check_underflow(e)) {
777 pr_debug("Underflows must be unconditional and "
778 "use the STANDARD target with "
779 "ACCEPT/DROP\n");
780 return -EINVAL;
781 }
782 newinfo->underflow[h] = underflows[h];
783 }
784 }
785
786 /* Clear counters and comefrom */
787 e->counters = ((struct xt_counters) { 0, 0 });
788 e->comefrom = 0;
789 return 0;
790 }
791
792 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
793 {
794 struct xt_tgdtor_param par;
795 struct xt_entry_target *t;
796 struct xt_entry_match *ematch;
797
798 /* Cleanup all matches */
799 xt_ematch_foreach(ematch, e)
800 cleanup_match(ematch, net);
801 t = ip6t_get_target(e);
802
803 par.net = net;
804 par.target = t->u.kernel.target;
805 par.targinfo = t->data;
806 par.family = NFPROTO_IPV6;
807 if (par.target->destroy != NULL)
808 par.target->destroy(&par);
809 module_put(par.target->me);
810
811 xt_percpu_counter_free(e->counters.pcnt);
812 }
813
814 /* Checks and translates the user-supplied table segment (held in
815 newinfo) */
816 static int
817 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
818 const struct ip6t_replace *repl)
819 {
820 struct ip6t_entry *iter;
821 unsigned int i;
822 int ret = 0;
823
824 newinfo->size = repl->size;
825 newinfo->number = repl->num_entries;
826
827 /* Init all hooks to impossible value. */
828 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
829 newinfo->hook_entry[i] = 0xFFFFFFFF;
830 newinfo->underflow[i] = 0xFFFFFFFF;
831 }
832
833 duprintf("translate_table: size %u\n", newinfo->size);
834 i = 0;
835 /* Walk through entries, checking offsets. */
836 xt_entry_foreach(iter, entry0, newinfo->size) {
837 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
838 entry0 + repl->size,
839 repl->hook_entry,
840 repl->underflow,
841 repl->valid_hooks);
842 if (ret != 0)
843 return ret;
844 ++i;
845 if (strcmp(ip6t_get_target(iter)->u.user.name,
846 XT_ERROR_TARGET) == 0)
847 ++newinfo->stacksize;
848 }
849
850 if (i != repl->num_entries) {
851 duprintf("translate_table: %u not %u entries\n",
852 i, repl->num_entries);
853 return -EINVAL;
854 }
855
856 /* Check hooks all assigned */
857 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
858 /* Only hooks which are valid */
859 if (!(repl->valid_hooks & (1 << i)))
860 continue;
861 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
862 duprintf("Invalid hook entry %u %u\n",
863 i, repl->hook_entry[i]);
864 return -EINVAL;
865 }
866 if (newinfo->underflow[i] == 0xFFFFFFFF) {
867 duprintf("Invalid underflow %u %u\n",
868 i, repl->underflow[i]);
869 return -EINVAL;
870 }
871 }
872
873 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
874 return -ELOOP;
875
876 /* Finally, each sanity check must pass */
877 i = 0;
878 xt_entry_foreach(iter, entry0, newinfo->size) {
879 ret = find_check_entry(iter, net, repl->name, repl->size);
880 if (ret != 0)
881 break;
882 ++i;
883 }
884
885 if (ret != 0) {
886 xt_entry_foreach(iter, entry0, newinfo->size) {
887 if (i-- == 0)
888 break;
889 cleanup_entry(iter, net);
890 }
891 return ret;
892 }
893
894 return ret;
895 }
896
897 static void
898 get_counters(const struct xt_table_info *t,
899 struct xt_counters counters[])
900 {
901 struct ip6t_entry *iter;
902 unsigned int cpu;
903 unsigned int i;
904
905 for_each_possible_cpu(cpu) {
906 seqcount_t *s = &per_cpu(xt_recseq, cpu);
907
908 i = 0;
909 xt_entry_foreach(iter, t->entries, t->size) {
910 struct xt_counters *tmp;
911 u64 bcnt, pcnt;
912 unsigned int start;
913
914 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
915 do {
916 start = read_seqcount_begin(s);
917 bcnt = tmp->bcnt;
918 pcnt = tmp->pcnt;
919 } while (read_seqcount_retry(s, start));
920
921 ADD_COUNTER(counters[i], bcnt, pcnt);
922 ++i;
923 }
924 }
925 }
926
927 static struct xt_counters *alloc_counters(const struct xt_table *table)
928 {
929 unsigned int countersize;
930 struct xt_counters *counters;
931 const struct xt_table_info *private = table->private;
932
933 /* We need atomic snapshot of counters: rest doesn't change
934 (other than comefrom, which userspace doesn't care
935 about). */
936 countersize = sizeof(struct xt_counters) * private->number;
937 counters = vzalloc(countersize);
938
939 if (counters == NULL)
940 return ERR_PTR(-ENOMEM);
941
942 get_counters(private, counters);
943
944 return counters;
945 }
946
947 static int
948 copy_entries_to_user(unsigned int total_size,
949 const struct xt_table *table,
950 void __user *userptr)
951 {
952 unsigned int off, num;
953 const struct ip6t_entry *e;
954 struct xt_counters *counters;
955 const struct xt_table_info *private = table->private;
956 int ret = 0;
957 const void *loc_cpu_entry;
958
959 counters = alloc_counters(table);
960 if (IS_ERR(counters))
961 return PTR_ERR(counters);
962
963 loc_cpu_entry = private->entries;
964 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
965 ret = -EFAULT;
966 goto free_counters;
967 }
968
969 /* FIXME: use iterator macros --RR */
970 /* ... then go back and fix counters and names */
971 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
972 unsigned int i;
973 const struct xt_entry_match *m;
974 const struct xt_entry_target *t;
975
976 e = (struct ip6t_entry *)(loc_cpu_entry + off);
977 if (copy_to_user(userptr + off
978 + offsetof(struct ip6t_entry, counters),
979 &counters[num],
980 sizeof(counters[num])) != 0) {
981 ret = -EFAULT;
982 goto free_counters;
983 }
984
985 for (i = sizeof(struct ip6t_entry);
986 i < e->target_offset;
987 i += m->u.match_size) {
988 m = (void *)e + i;
989
990 if (copy_to_user(userptr + off + i
991 + offsetof(struct xt_entry_match,
992 u.user.name),
993 m->u.kernel.match->name,
994 strlen(m->u.kernel.match->name)+1)
995 != 0) {
996 ret = -EFAULT;
997 goto free_counters;
998 }
999 }
1000
1001 t = ip6t_get_target_c(e);
1002 if (copy_to_user(userptr + off + e->target_offset
1003 + offsetof(struct xt_entry_target,
1004 u.user.name),
1005 t->u.kernel.target->name,
1006 strlen(t->u.kernel.target->name)+1) != 0) {
1007 ret = -EFAULT;
1008 goto free_counters;
1009 }
1010 }
1011
1012 free_counters:
1013 vfree(counters);
1014 return ret;
1015 }
1016
1017 #ifdef CONFIG_COMPAT
1018 static void compat_standard_from_user(void *dst, const void *src)
1019 {
1020 int v = *(compat_int_t *)src;
1021
1022 if (v > 0)
1023 v += xt_compat_calc_jump(AF_INET6, v);
1024 memcpy(dst, &v, sizeof(v));
1025 }
1026
1027 static int compat_standard_to_user(void __user *dst, const void *src)
1028 {
1029 compat_int_t cv = *(int *)src;
1030
1031 if (cv > 0)
1032 cv -= xt_compat_calc_jump(AF_INET6, cv);
1033 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1034 }
1035
1036 static int compat_calc_entry(const struct ip6t_entry *e,
1037 const struct xt_table_info *info,
1038 const void *base, struct xt_table_info *newinfo)
1039 {
1040 const struct xt_entry_match *ematch;
1041 const struct xt_entry_target *t;
1042 unsigned int entry_offset;
1043 int off, i, ret;
1044
1045 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1046 entry_offset = (void *)e - base;
1047 xt_ematch_foreach(ematch, e)
1048 off += xt_compat_match_offset(ematch->u.kernel.match);
1049 t = ip6t_get_target_c(e);
1050 off += xt_compat_target_offset(t->u.kernel.target);
1051 newinfo->size -= off;
1052 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1053 if (ret)
1054 return ret;
1055
1056 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1057 if (info->hook_entry[i] &&
1058 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1059 newinfo->hook_entry[i] -= off;
1060 if (info->underflow[i] &&
1061 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1062 newinfo->underflow[i] -= off;
1063 }
1064 return 0;
1065 }
1066
1067 static int compat_table_info(const struct xt_table_info *info,
1068 struct xt_table_info *newinfo)
1069 {
1070 struct ip6t_entry *iter;
1071 const void *loc_cpu_entry;
1072 int ret;
1073
1074 if (!newinfo || !info)
1075 return -EINVAL;
1076
1077 /* we dont care about newinfo->entries */
1078 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1079 newinfo->initial_entries = 0;
1080 loc_cpu_entry = info->entries;
1081 xt_compat_init_offsets(AF_INET6, info->number);
1082 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1083 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1084 if (ret != 0)
1085 return ret;
1086 }
1087 return 0;
1088 }
1089 #endif
1090
1091 static int get_info(struct net *net, void __user *user,
1092 const int *len, int compat)
1093 {
1094 char name[XT_TABLE_MAXNAMELEN];
1095 struct xt_table *t;
1096 int ret;
1097
1098 if (*len != sizeof(struct ip6t_getinfo)) {
1099 duprintf("length %u != %zu\n", *len,
1100 sizeof(struct ip6t_getinfo));
1101 return -EINVAL;
1102 }
1103
1104 if (copy_from_user(name, user, sizeof(name)) != 0)
1105 return -EFAULT;
1106
1107 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1108 #ifdef CONFIG_COMPAT
1109 if (compat)
1110 xt_compat_lock(AF_INET6);
1111 #endif
1112 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1113 "ip6table_%s", name);
1114 if (!IS_ERR_OR_NULL(t)) {
1115 struct ip6t_getinfo info;
1116 const struct xt_table_info *private = t->private;
1117 #ifdef CONFIG_COMPAT
1118 struct xt_table_info tmp;
1119
1120 if (compat) {
1121 ret = compat_table_info(private, &tmp);
1122 xt_compat_flush_offsets(AF_INET6);
1123 private = &tmp;
1124 }
1125 #endif
1126 memset(&info, 0, sizeof(info));
1127 info.valid_hooks = t->valid_hooks;
1128 memcpy(info.hook_entry, private->hook_entry,
1129 sizeof(info.hook_entry));
1130 memcpy(info.underflow, private->underflow,
1131 sizeof(info.underflow));
1132 info.num_entries = private->number;
1133 info.size = private->size;
1134 strcpy(info.name, name);
1135
1136 if (copy_to_user(user, &info, *len) != 0)
1137 ret = -EFAULT;
1138 else
1139 ret = 0;
1140
1141 xt_table_unlock(t);
1142 module_put(t->me);
1143 } else
1144 ret = t ? PTR_ERR(t) : -ENOENT;
1145 #ifdef CONFIG_COMPAT
1146 if (compat)
1147 xt_compat_unlock(AF_INET6);
1148 #endif
1149 return ret;
1150 }
1151
1152 static int
1153 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1154 const int *len)
1155 {
1156 int ret;
1157 struct ip6t_get_entries get;
1158 struct xt_table *t;
1159
1160 if (*len < sizeof(get)) {
1161 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1162 return -EINVAL;
1163 }
1164 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1165 return -EFAULT;
1166 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1167 duprintf("get_entries: %u != %zu\n",
1168 *len, sizeof(get) + get.size);
1169 return -EINVAL;
1170 }
1171 get.name[sizeof(get.name) - 1] = '\0';
1172
1173 t = xt_find_table_lock(net, AF_INET6, get.name);
1174 if (!IS_ERR_OR_NULL(t)) {
1175 struct xt_table_info *private = t->private;
1176 duprintf("t->private->number = %u\n", private->number);
1177 if (get.size == private->size)
1178 ret = copy_entries_to_user(private->size,
1179 t, uptr->entrytable);
1180 else {
1181 duprintf("get_entries: I've got %u not %u!\n",
1182 private->size, get.size);
1183 ret = -EAGAIN;
1184 }
1185 module_put(t->me);
1186 xt_table_unlock(t);
1187 } else
1188 ret = t ? PTR_ERR(t) : -ENOENT;
1189
1190 return ret;
1191 }
1192
1193 static int
1194 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1195 struct xt_table_info *newinfo, unsigned int num_counters,
1196 void __user *counters_ptr)
1197 {
1198 int ret;
1199 struct xt_table *t;
1200 struct xt_table_info *oldinfo;
1201 struct xt_counters *counters;
1202 struct ip6t_entry *iter;
1203
1204 ret = 0;
1205 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1206 if (!counters) {
1207 ret = -ENOMEM;
1208 goto out;
1209 }
1210
1211 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1212 "ip6table_%s", name);
1213 if (IS_ERR_OR_NULL(t)) {
1214 ret = t ? PTR_ERR(t) : -ENOENT;
1215 goto free_newinfo_counters_untrans;
1216 }
1217
1218 /* You lied! */
1219 if (valid_hooks != t->valid_hooks) {
1220 duprintf("Valid hook crap: %08X vs %08X\n",
1221 valid_hooks, t->valid_hooks);
1222 ret = -EINVAL;
1223 goto put_module;
1224 }
1225
1226 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1227 if (!oldinfo)
1228 goto put_module;
1229
1230 /* Update module usage count based on number of rules */
1231 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1232 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1233 if ((oldinfo->number > oldinfo->initial_entries) ||
1234 (newinfo->number <= oldinfo->initial_entries))
1235 module_put(t->me);
1236 if ((oldinfo->number > oldinfo->initial_entries) &&
1237 (newinfo->number <= oldinfo->initial_entries))
1238 module_put(t->me);
1239
1240 /* Get the old counters, and synchronize with replace */
1241 get_counters(oldinfo, counters);
1242
1243 /* Decrease module usage counts and free resource */
1244 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
1245 cleanup_entry(iter, net);
1246
1247 xt_free_table_info(oldinfo);
1248 if (copy_to_user(counters_ptr, counters,
1249 sizeof(struct xt_counters) * num_counters) != 0) {
1250 /* Silent error, can't fail, new table is already in place */
1251 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1252 }
1253 vfree(counters);
1254 xt_table_unlock(t);
1255 return ret;
1256
1257 put_module:
1258 module_put(t->me);
1259 xt_table_unlock(t);
1260 free_newinfo_counters_untrans:
1261 vfree(counters);
1262 out:
1263 return ret;
1264 }
1265
1266 static int
1267 do_replace(struct net *net, const void __user *user, unsigned int len)
1268 {
1269 int ret;
1270 struct ip6t_replace tmp;
1271 struct xt_table_info *newinfo;
1272 void *loc_cpu_entry;
1273 struct ip6t_entry *iter;
1274
1275 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1276 return -EFAULT;
1277
1278 /* overflow check */
1279 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1280 return -ENOMEM;
1281 if (tmp.num_counters == 0)
1282 return -EINVAL;
1283
1284 tmp.name[sizeof(tmp.name)-1] = 0;
1285
1286 newinfo = xt_alloc_table_info(tmp.size);
1287 if (!newinfo)
1288 return -ENOMEM;
1289
1290 loc_cpu_entry = newinfo->entries;
1291 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1292 tmp.size) != 0) {
1293 ret = -EFAULT;
1294 goto free_newinfo;
1295 }
1296
1297 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1298 if (ret != 0)
1299 goto free_newinfo;
1300
1301 duprintf("ip_tables: Translated table\n");
1302
1303 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1304 tmp.num_counters, tmp.counters);
1305 if (ret)
1306 goto free_newinfo_untrans;
1307 return 0;
1308
1309 free_newinfo_untrans:
1310 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1311 cleanup_entry(iter, net);
1312 free_newinfo:
1313 xt_free_table_info(newinfo);
1314 return ret;
1315 }
1316
1317 static int
1318 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1319 int compat)
1320 {
1321 unsigned int i;
1322 struct xt_counters_info tmp;
1323 struct xt_counters *paddc;
1324 unsigned int num_counters;
1325 char *name;
1326 int size;
1327 void *ptmp;
1328 struct xt_table *t;
1329 const struct xt_table_info *private;
1330 int ret = 0;
1331 struct ip6t_entry *iter;
1332 unsigned int addend;
1333 #ifdef CONFIG_COMPAT
1334 struct compat_xt_counters_info compat_tmp;
1335
1336 if (compat) {
1337 ptmp = &compat_tmp;
1338 size = sizeof(struct compat_xt_counters_info);
1339 } else
1340 #endif
1341 {
1342 ptmp = &tmp;
1343 size = sizeof(struct xt_counters_info);
1344 }
1345
1346 if (copy_from_user(ptmp, user, size) != 0)
1347 return -EFAULT;
1348
1349 #ifdef CONFIG_COMPAT
1350 if (compat) {
1351 num_counters = compat_tmp.num_counters;
1352 name = compat_tmp.name;
1353 } else
1354 #endif
1355 {
1356 num_counters = tmp.num_counters;
1357 name = tmp.name;
1358 }
1359
1360 if (len != size + num_counters * sizeof(struct xt_counters))
1361 return -EINVAL;
1362
1363 paddc = vmalloc(len - size);
1364 if (!paddc)
1365 return -ENOMEM;
1366
1367 if (copy_from_user(paddc, user + size, len - size) != 0) {
1368 ret = -EFAULT;
1369 goto free;
1370 }
1371
1372 t = xt_find_table_lock(net, AF_INET6, name);
1373 if (IS_ERR_OR_NULL(t)) {
1374 ret = t ? PTR_ERR(t) : -ENOENT;
1375 goto free;
1376 }
1377
1378 local_bh_disable();
1379 private = t->private;
1380 if (private->number != num_counters) {
1381 ret = -EINVAL;
1382 goto unlock_up_free;
1383 }
1384
1385 i = 0;
1386 addend = xt_write_recseq_begin();
1387 xt_entry_foreach(iter, private->entries, private->size) {
1388 struct xt_counters *tmp;
1389
1390 tmp = xt_get_this_cpu_counter(&iter->counters);
1391 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1392 ++i;
1393 }
1394 xt_write_recseq_end(addend);
1395 unlock_up_free:
1396 local_bh_enable();
1397 xt_table_unlock(t);
1398 module_put(t->me);
1399 free:
1400 vfree(paddc);
1401
1402 return ret;
1403 }
1404
1405 #ifdef CONFIG_COMPAT
1406 struct compat_ip6t_replace {
1407 char name[XT_TABLE_MAXNAMELEN];
1408 u32 valid_hooks;
1409 u32 num_entries;
1410 u32 size;
1411 u32 hook_entry[NF_INET_NUMHOOKS];
1412 u32 underflow[NF_INET_NUMHOOKS];
1413 u32 num_counters;
1414 compat_uptr_t counters; /* struct xt_counters * */
1415 struct compat_ip6t_entry entries[0];
1416 };
1417
1418 static int
1419 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1420 unsigned int *size, struct xt_counters *counters,
1421 unsigned int i)
1422 {
1423 struct xt_entry_target *t;
1424 struct compat_ip6t_entry __user *ce;
1425 u_int16_t target_offset, next_offset;
1426 compat_uint_t origsize;
1427 const struct xt_entry_match *ematch;
1428 int ret = 0;
1429
1430 origsize = *size;
1431 ce = (struct compat_ip6t_entry __user *)*dstptr;
1432 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1433 copy_to_user(&ce->counters, &counters[i],
1434 sizeof(counters[i])) != 0)
1435 return -EFAULT;
1436
1437 *dstptr += sizeof(struct compat_ip6t_entry);
1438 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1439
1440 xt_ematch_foreach(ematch, e) {
1441 ret = xt_compat_match_to_user(ematch, dstptr, size);
1442 if (ret != 0)
1443 return ret;
1444 }
1445 target_offset = e->target_offset - (origsize - *size);
1446 t = ip6t_get_target(e);
1447 ret = xt_compat_target_to_user(t, dstptr, size);
1448 if (ret)
1449 return ret;
1450 next_offset = e->next_offset - (origsize - *size);
1451 if (put_user(target_offset, &ce->target_offset) != 0 ||
1452 put_user(next_offset, &ce->next_offset) != 0)
1453 return -EFAULT;
1454 return 0;
1455 }
1456
1457 static int
1458 compat_find_calc_match(struct xt_entry_match *m,
1459 const char *name,
1460 const struct ip6t_ip6 *ipv6,
1461 int *size)
1462 {
1463 struct xt_match *match;
1464
1465 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1466 m->u.user.revision);
1467 if (IS_ERR(match)) {
1468 duprintf("compat_check_calc_match: `%s' not found\n",
1469 m->u.user.name);
1470 return PTR_ERR(match);
1471 }
1472 m->u.kernel.match = match;
1473 *size += xt_compat_match_offset(match);
1474 return 0;
1475 }
1476
1477 static void compat_release_entry(struct compat_ip6t_entry *e)
1478 {
1479 struct xt_entry_target *t;
1480 struct xt_entry_match *ematch;
1481
1482 /* Cleanup all matches */
1483 xt_ematch_foreach(ematch, e)
1484 module_put(ematch->u.kernel.match->me);
1485 t = compat_ip6t_get_target(e);
1486 module_put(t->u.kernel.target->me);
1487 }
1488
1489 static int
1490 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1491 struct xt_table_info *newinfo,
1492 unsigned int *size,
1493 const unsigned char *base,
1494 const unsigned char *limit,
1495 const unsigned int *hook_entries,
1496 const unsigned int *underflows,
1497 const char *name)
1498 {
1499 struct xt_entry_match *ematch;
1500 struct xt_entry_target *t;
1501 struct xt_target *target;
1502 unsigned int entry_offset;
1503 unsigned int j;
1504 int ret, off, h;
1505
1506 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1507 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1508 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
1509 (unsigned char *)e + e->next_offset > limit) {
1510 duprintf("Bad offset %p, limit = %p\n", e, limit);
1511 return -EINVAL;
1512 }
1513
1514 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1515 sizeof(struct compat_xt_entry_target)) {
1516 duprintf("checking: element %p size %u\n",
1517 e, e->next_offset);
1518 return -EINVAL;
1519 }
1520
1521 /* For purposes of check_entry casting the compat entry is fine */
1522 ret = check_entry((struct ip6t_entry *)e);
1523 if (ret)
1524 return ret;
1525
1526 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1527 entry_offset = (void *)e - (void *)base;
1528 j = 0;
1529 xt_ematch_foreach(ematch, e) {
1530 ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
1531 if (ret != 0)
1532 goto release_matches;
1533 ++j;
1534 }
1535
1536 t = compat_ip6t_get_target(e);
1537 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1538 t->u.user.revision);
1539 if (IS_ERR(target)) {
1540 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1541 t->u.user.name);
1542 ret = PTR_ERR(target);
1543 goto release_matches;
1544 }
1545 t->u.kernel.target = target;
1546
1547 off += xt_compat_target_offset(target);
1548 *size += off;
1549 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1550 if (ret)
1551 goto out;
1552
1553 /* Check hooks & underflows */
1554 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1555 if ((unsigned char *)e - base == hook_entries[h])
1556 newinfo->hook_entry[h] = hook_entries[h];
1557 if ((unsigned char *)e - base == underflows[h])
1558 newinfo->underflow[h] = underflows[h];
1559 }
1560
1561 /* Clear counters and comefrom */
1562 memset(&e->counters, 0, sizeof(e->counters));
1563 e->comefrom = 0;
1564 return 0;
1565
1566 out:
1567 module_put(t->u.kernel.target->me);
1568 release_matches:
1569 xt_ematch_foreach(ematch, e) {
1570 if (j-- == 0)
1571 break;
1572 module_put(ematch->u.kernel.match->me);
1573 }
1574 return ret;
1575 }
1576
1577 static int
1578 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1579 unsigned int *size, const char *name,
1580 struct xt_table_info *newinfo, unsigned char *base)
1581 {
1582 struct xt_entry_target *t;
1583 struct ip6t_entry *de;
1584 unsigned int origsize;
1585 int ret, h;
1586 struct xt_entry_match *ematch;
1587
1588 ret = 0;
1589 origsize = *size;
1590 de = (struct ip6t_entry *)*dstptr;
1591 memcpy(de, e, sizeof(struct ip6t_entry));
1592 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1593
1594 *dstptr += sizeof(struct ip6t_entry);
1595 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1596
1597 xt_ematch_foreach(ematch, e) {
1598 ret = xt_compat_match_from_user(ematch, dstptr, size);
1599 if (ret != 0)
1600 return ret;
1601 }
1602 de->target_offset = e->target_offset - (origsize - *size);
1603 t = compat_ip6t_get_target(e);
1604 xt_compat_target_from_user(t, dstptr, size);
1605
1606 de->next_offset = e->next_offset - (origsize - *size);
1607 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1608 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1609 newinfo->hook_entry[h] -= origsize - *size;
1610 if ((unsigned char *)de - base < newinfo->underflow[h])
1611 newinfo->underflow[h] -= origsize - *size;
1612 }
1613 return ret;
1614 }
1615
1616 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1617 const char *name)
1618 {
1619 unsigned int j;
1620 int ret = 0;
1621 struct xt_mtchk_param mtpar;
1622 struct xt_entry_match *ematch;
1623
1624 e->counters.pcnt = xt_percpu_counter_alloc();
1625 if (IS_ERR_VALUE(e->counters.pcnt))
1626 return -ENOMEM;
1627 j = 0;
1628 mtpar.net = net;
1629 mtpar.table = name;
1630 mtpar.entryinfo = &e->ipv6;
1631 mtpar.hook_mask = e->comefrom;
1632 mtpar.family = NFPROTO_IPV6;
1633 xt_ematch_foreach(ematch, e) {
1634 ret = check_match(ematch, &mtpar);
1635 if (ret != 0)
1636 goto cleanup_matches;
1637 ++j;
1638 }
1639
1640 ret = check_target(e, net, name);
1641 if (ret)
1642 goto cleanup_matches;
1643 return 0;
1644
1645 cleanup_matches:
1646 xt_ematch_foreach(ematch, e) {
1647 if (j-- == 0)
1648 break;
1649 cleanup_match(ematch, net);
1650 }
1651
1652 xt_percpu_counter_free(e->counters.pcnt);
1653
1654 return ret;
1655 }
1656
1657 static int
1658 translate_compat_table(struct net *net,
1659 const char *name,
1660 unsigned int valid_hooks,
1661 struct xt_table_info **pinfo,
1662 void **pentry0,
1663 unsigned int total_size,
1664 unsigned int number,
1665 unsigned int *hook_entries,
1666 unsigned int *underflows)
1667 {
1668 unsigned int i, j;
1669 struct xt_table_info *newinfo, *info;
1670 void *pos, *entry0, *entry1;
1671 struct compat_ip6t_entry *iter0;
1672 struct ip6t_entry *iter1;
1673 unsigned int size;
1674 int ret = 0;
1675
1676 info = *pinfo;
1677 entry0 = *pentry0;
1678 size = total_size;
1679 info->number = number;
1680
1681 /* Init all hooks to impossible value. */
1682 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1683 info->hook_entry[i] = 0xFFFFFFFF;
1684 info->underflow[i] = 0xFFFFFFFF;
1685 }
1686
1687 duprintf("translate_compat_table: size %u\n", info->size);
1688 j = 0;
1689 xt_compat_lock(AF_INET6);
1690 xt_compat_init_offsets(AF_INET6, number);
1691 /* Walk through entries, checking offsets. */
1692 xt_entry_foreach(iter0, entry0, total_size) {
1693 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1694 entry0,
1695 entry0 + total_size,
1696 hook_entries,
1697 underflows,
1698 name);
1699 if (ret != 0)
1700 goto out_unlock;
1701 ++j;
1702 }
1703
1704 ret = -EINVAL;
1705 if (j != number) {
1706 duprintf("translate_compat_table: %u not %u entries\n",
1707 j, number);
1708 goto out_unlock;
1709 }
1710
1711 /* Check hooks all assigned */
1712 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1713 /* Only hooks which are valid */
1714 if (!(valid_hooks & (1 << i)))
1715 continue;
1716 if (info->hook_entry[i] == 0xFFFFFFFF) {
1717 duprintf("Invalid hook entry %u %u\n",
1718 i, hook_entries[i]);
1719 goto out_unlock;
1720 }
1721 if (info->underflow[i] == 0xFFFFFFFF) {
1722 duprintf("Invalid underflow %u %u\n",
1723 i, underflows[i]);
1724 goto out_unlock;
1725 }
1726 }
1727
1728 ret = -ENOMEM;
1729 newinfo = xt_alloc_table_info(size);
1730 if (!newinfo)
1731 goto out_unlock;
1732
1733 newinfo->number = number;
1734 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1735 newinfo->hook_entry[i] = info->hook_entry[i];
1736 newinfo->underflow[i] = info->underflow[i];
1737 }
1738 entry1 = newinfo->entries;
1739 pos = entry1;
1740 size = total_size;
1741 xt_entry_foreach(iter0, entry0, total_size) {
1742 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1743 name, newinfo, entry1);
1744 if (ret != 0)
1745 break;
1746 }
1747 xt_compat_flush_offsets(AF_INET6);
1748 xt_compat_unlock(AF_INET6);
1749 if (ret)
1750 goto free_newinfo;
1751
1752 ret = -ELOOP;
1753 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1754 goto free_newinfo;
1755
1756 i = 0;
1757 xt_entry_foreach(iter1, entry1, newinfo->size) {
1758 ret = compat_check_entry(iter1, net, name);
1759 if (ret != 0)
1760 break;
1761 ++i;
1762 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1763 XT_ERROR_TARGET) == 0)
1764 ++newinfo->stacksize;
1765 }
1766 if (ret) {
1767 /*
1768 * The first i matches need cleanup_entry (calls ->destroy)
1769 * because they had called ->check already. The other j-i
1770 * entries need only release.
1771 */
1772 int skip = i;
1773 j -= i;
1774 xt_entry_foreach(iter0, entry0, newinfo->size) {
1775 if (skip-- > 0)
1776 continue;
1777 if (j-- == 0)
1778 break;
1779 compat_release_entry(iter0);
1780 }
1781 xt_entry_foreach(iter1, entry1, newinfo->size) {
1782 if (i-- == 0)
1783 break;
1784 cleanup_entry(iter1, net);
1785 }
1786 xt_free_table_info(newinfo);
1787 return ret;
1788 }
1789
1790 *pinfo = newinfo;
1791 *pentry0 = entry1;
1792 xt_free_table_info(info);
1793 return 0;
1794
1795 free_newinfo:
1796 xt_free_table_info(newinfo);
1797 out:
1798 xt_entry_foreach(iter0, entry0, total_size) {
1799 if (j-- == 0)
1800 break;
1801 compat_release_entry(iter0);
1802 }
1803 return ret;
1804 out_unlock:
1805 xt_compat_flush_offsets(AF_INET6);
1806 xt_compat_unlock(AF_INET6);
1807 goto out;
1808 }
1809
1810 static int
1811 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1812 {
1813 int ret;
1814 struct compat_ip6t_replace tmp;
1815 struct xt_table_info *newinfo;
1816 void *loc_cpu_entry;
1817 struct ip6t_entry *iter;
1818
1819 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1820 return -EFAULT;
1821
1822 /* overflow check */
1823 if (tmp.size >= INT_MAX / num_possible_cpus())
1824 return -ENOMEM;
1825 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1826 return -ENOMEM;
1827 if (tmp.num_counters == 0)
1828 return -EINVAL;
1829
1830 tmp.name[sizeof(tmp.name)-1] = 0;
1831
1832 newinfo = xt_alloc_table_info(tmp.size);
1833 if (!newinfo)
1834 return -ENOMEM;
1835
1836 loc_cpu_entry = newinfo->entries;
1837 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1838 tmp.size) != 0) {
1839 ret = -EFAULT;
1840 goto free_newinfo;
1841 }
1842
1843 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1844 &newinfo, &loc_cpu_entry, tmp.size,
1845 tmp.num_entries, tmp.hook_entry,
1846 tmp.underflow);
1847 if (ret != 0)
1848 goto free_newinfo;
1849
1850 duprintf("compat_do_replace: Translated table\n");
1851
1852 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1853 tmp.num_counters, compat_ptr(tmp.counters));
1854 if (ret)
1855 goto free_newinfo_untrans;
1856 return 0;
1857
1858 free_newinfo_untrans:
1859 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1860 cleanup_entry(iter, net);
1861 free_newinfo:
1862 xt_free_table_info(newinfo);
1863 return ret;
1864 }
1865
1866 static int
1867 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1868 unsigned int len)
1869 {
1870 int ret;
1871
1872 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1873 return -EPERM;
1874
1875 switch (cmd) {
1876 case IP6T_SO_SET_REPLACE:
1877 ret = compat_do_replace(sock_net(sk), user, len);
1878 break;
1879
1880 case IP6T_SO_SET_ADD_COUNTERS:
1881 ret = do_add_counters(sock_net(sk), user, len, 1);
1882 break;
1883
1884 default:
1885 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1886 ret = -EINVAL;
1887 }
1888
1889 return ret;
1890 }
1891
1892 struct compat_ip6t_get_entries {
1893 char name[XT_TABLE_MAXNAMELEN];
1894 compat_uint_t size;
1895 struct compat_ip6t_entry entrytable[0];
1896 };
1897
1898 static int
1899 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1900 void __user *userptr)
1901 {
1902 struct xt_counters *counters;
1903 const struct xt_table_info *private = table->private;
1904 void __user *pos;
1905 unsigned int size;
1906 int ret = 0;
1907 unsigned int i = 0;
1908 struct ip6t_entry *iter;
1909
1910 counters = alloc_counters(table);
1911 if (IS_ERR(counters))
1912 return PTR_ERR(counters);
1913
1914 pos = userptr;
1915 size = total_size;
1916 xt_entry_foreach(iter, private->entries, total_size) {
1917 ret = compat_copy_entry_to_user(iter, &pos,
1918 &size, counters, i++);
1919 if (ret != 0)
1920 break;
1921 }
1922
1923 vfree(counters);
1924 return ret;
1925 }
1926
1927 static int
1928 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1929 int *len)
1930 {
1931 int ret;
1932 struct compat_ip6t_get_entries get;
1933 struct xt_table *t;
1934
1935 if (*len < sizeof(get)) {
1936 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1937 return -EINVAL;
1938 }
1939
1940 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1941 return -EFAULT;
1942
1943 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1944 duprintf("compat_get_entries: %u != %zu\n",
1945 *len, sizeof(get) + get.size);
1946 return -EINVAL;
1947 }
1948 get.name[sizeof(get.name) - 1] = '\0';
1949
1950 xt_compat_lock(AF_INET6);
1951 t = xt_find_table_lock(net, AF_INET6, get.name);
1952 if (!IS_ERR_OR_NULL(t)) {
1953 const struct xt_table_info *private = t->private;
1954 struct xt_table_info info;
1955 duprintf("t->private->number = %u\n", private->number);
1956 ret = compat_table_info(private, &info);
1957 if (!ret && get.size == info.size) {
1958 ret = compat_copy_entries_to_user(private->size,
1959 t, uptr->entrytable);
1960 } else if (!ret) {
1961 duprintf("compat_get_entries: I've got %u not %u!\n",
1962 private->size, get.size);
1963 ret = -EAGAIN;
1964 }
1965 xt_compat_flush_offsets(AF_INET6);
1966 module_put(t->me);
1967 xt_table_unlock(t);
1968 } else
1969 ret = t ? PTR_ERR(t) : -ENOENT;
1970
1971 xt_compat_unlock(AF_INET6);
1972 return ret;
1973 }
1974
1975 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1976
1977 static int
1978 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1979 {
1980 int ret;
1981
1982 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1983 return -EPERM;
1984
1985 switch (cmd) {
1986 case IP6T_SO_GET_INFO:
1987 ret = get_info(sock_net(sk), user, len, 1);
1988 break;
1989 case IP6T_SO_GET_ENTRIES:
1990 ret = compat_get_entries(sock_net(sk), user, len);
1991 break;
1992 default:
1993 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1994 }
1995 return ret;
1996 }
1997 #endif
1998
1999 static int
2000 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2001 {
2002 int ret;
2003
2004 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2005 return -EPERM;
2006
2007 switch (cmd) {
2008 case IP6T_SO_SET_REPLACE:
2009 ret = do_replace(sock_net(sk), user, len);
2010 break;
2011
2012 case IP6T_SO_SET_ADD_COUNTERS:
2013 ret = do_add_counters(sock_net(sk), user, len, 0);
2014 break;
2015
2016 default:
2017 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2018 ret = -EINVAL;
2019 }
2020
2021 return ret;
2022 }
2023
2024 static int
2025 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2026 {
2027 int ret;
2028
2029 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2030 return -EPERM;
2031
2032 switch (cmd) {
2033 case IP6T_SO_GET_INFO:
2034 ret = get_info(sock_net(sk), user, len, 0);
2035 break;
2036
2037 case IP6T_SO_GET_ENTRIES:
2038 ret = get_entries(sock_net(sk), user, len);
2039 break;
2040
2041 case IP6T_SO_GET_REVISION_MATCH:
2042 case IP6T_SO_GET_REVISION_TARGET: {
2043 struct xt_get_revision rev;
2044 int target;
2045
2046 if (*len != sizeof(rev)) {
2047 ret = -EINVAL;
2048 break;
2049 }
2050 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2051 ret = -EFAULT;
2052 break;
2053 }
2054 rev.name[sizeof(rev.name)-1] = 0;
2055
2056 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2057 target = 1;
2058 else
2059 target = 0;
2060
2061 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2062 rev.revision,
2063 target, &ret),
2064 "ip6t_%s", rev.name);
2065 break;
2066 }
2067
2068 default:
2069 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2070 ret = -EINVAL;
2071 }
2072
2073 return ret;
2074 }
2075
2076 static void __ip6t_unregister_table(struct net *net, struct xt_table *table)
2077 {
2078 struct xt_table_info *private;
2079 void *loc_cpu_entry;
2080 struct module *table_owner = table->me;
2081 struct ip6t_entry *iter;
2082
2083 private = xt_unregister_table(table);
2084
2085 /* Decrease module usage counts and free resources */
2086 loc_cpu_entry = private->entries;
2087 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2088 cleanup_entry(iter, net);
2089 if (private->number > private->initial_entries)
2090 module_put(table_owner);
2091 xt_free_table_info(private);
2092 }
2093
2094 int ip6t_register_table(struct net *net, const struct xt_table *table,
2095 const struct ip6t_replace *repl,
2096 const struct nf_hook_ops *ops,
2097 struct xt_table **res)
2098 {
2099 int ret;
2100 struct xt_table_info *newinfo;
2101 struct xt_table_info bootstrap = {0};
2102 void *loc_cpu_entry;
2103 struct xt_table *new_table;
2104
2105 newinfo = xt_alloc_table_info(repl->size);
2106 if (!newinfo)
2107 return -ENOMEM;
2108
2109 loc_cpu_entry = newinfo->entries;
2110 memcpy(loc_cpu_entry, repl->entries, repl->size);
2111
2112 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2113 if (ret != 0)
2114 goto out_free;
2115
2116 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2117 if (IS_ERR(new_table)) {
2118 ret = PTR_ERR(new_table);
2119 goto out_free;
2120 }
2121
2122 /* set res now, will see skbs right after nf_register_net_hooks */
2123 WRITE_ONCE(*res, new_table);
2124
2125 ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
2126 if (ret != 0) {
2127 __ip6t_unregister_table(net, new_table);
2128 *res = NULL;
2129 }
2130
2131 return ret;
2132
2133 out_free:
2134 xt_free_table_info(newinfo);
2135 return ret;
2136 }
2137
2138 void ip6t_unregister_table(struct net *net, struct xt_table *table,
2139 const struct nf_hook_ops *ops)
2140 {
2141 nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
2142 __ip6t_unregister_table(net, table);
2143 }
2144
2145 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2146 static inline bool
2147 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2148 u_int8_t type, u_int8_t code,
2149 bool invert)
2150 {
2151 return (type == test_type && code >= min_code && code <= max_code)
2152 ^ invert;
2153 }
2154
2155 static bool
2156 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2157 {
2158 const struct icmp6hdr *ic;
2159 struct icmp6hdr _icmph;
2160 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2161
2162 /* Must not be a fragment. */
2163 if (par->fragoff != 0)
2164 return false;
2165
2166 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2167 if (ic == NULL) {
2168 /* We've been asked to examine this packet, and we
2169 * can't. Hence, no choice but to drop.
2170 */
2171 duprintf("Dropping evil ICMP tinygram.\n");
2172 par->hotdrop = true;
2173 return false;
2174 }
2175
2176 return icmp6_type_code_match(icmpinfo->type,
2177 icmpinfo->code[0],
2178 icmpinfo->code[1],
2179 ic->icmp6_type, ic->icmp6_code,
2180 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2181 }
2182
2183 /* Called when user tries to insert an entry of this type. */
2184 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2185 {
2186 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2187
2188 /* Must specify no unknown invflags */
2189 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2190 }
2191
2192 /* The built-in targets: standard (NULL) and error. */
2193 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2194 {
2195 .name = XT_STANDARD_TARGET,
2196 .targetsize = sizeof(int),
2197 .family = NFPROTO_IPV6,
2198 #ifdef CONFIG_COMPAT
2199 .compatsize = sizeof(compat_int_t),
2200 .compat_from_user = compat_standard_from_user,
2201 .compat_to_user = compat_standard_to_user,
2202 #endif
2203 },
2204 {
2205 .name = XT_ERROR_TARGET,
2206 .target = ip6t_error,
2207 .targetsize = XT_FUNCTION_MAXNAMELEN,
2208 .family = NFPROTO_IPV6,
2209 },
2210 };
2211
2212 static struct nf_sockopt_ops ip6t_sockopts = {
2213 .pf = PF_INET6,
2214 .set_optmin = IP6T_BASE_CTL,
2215 .set_optmax = IP6T_SO_SET_MAX+1,
2216 .set = do_ip6t_set_ctl,
2217 #ifdef CONFIG_COMPAT
2218 .compat_set = compat_do_ip6t_set_ctl,
2219 #endif
2220 .get_optmin = IP6T_BASE_CTL,
2221 .get_optmax = IP6T_SO_GET_MAX+1,
2222 .get = do_ip6t_get_ctl,
2223 #ifdef CONFIG_COMPAT
2224 .compat_get = compat_do_ip6t_get_ctl,
2225 #endif
2226 .owner = THIS_MODULE,
2227 };
2228
2229 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2230 {
2231 .name = "icmp6",
2232 .match = icmp6_match,
2233 .matchsize = sizeof(struct ip6t_icmp),
2234 .checkentry = icmp6_checkentry,
2235 .proto = IPPROTO_ICMPV6,
2236 .family = NFPROTO_IPV6,
2237 },
2238 };
2239
2240 static int __net_init ip6_tables_net_init(struct net *net)
2241 {
2242 return xt_proto_init(net, NFPROTO_IPV6);
2243 }
2244
2245 static void __net_exit ip6_tables_net_exit(struct net *net)
2246 {
2247 xt_proto_fini(net, NFPROTO_IPV6);
2248 }
2249
2250 static struct pernet_operations ip6_tables_net_ops = {
2251 .init = ip6_tables_net_init,
2252 .exit = ip6_tables_net_exit,
2253 };
2254
2255 static int __init ip6_tables_init(void)
2256 {
2257 int ret;
2258
2259 ret = register_pernet_subsys(&ip6_tables_net_ops);
2260 if (ret < 0)
2261 goto err1;
2262
2263 /* No one else will be downing sem now, so we won't sleep */
2264 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2265 if (ret < 0)
2266 goto err2;
2267 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2268 if (ret < 0)
2269 goto err4;
2270
2271 /* Register setsockopt */
2272 ret = nf_register_sockopt(&ip6t_sockopts);
2273 if (ret < 0)
2274 goto err5;
2275
2276 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2277 return 0;
2278
2279 err5:
2280 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2281 err4:
2282 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2283 err2:
2284 unregister_pernet_subsys(&ip6_tables_net_ops);
2285 err1:
2286 return ret;
2287 }
2288
2289 static void __exit ip6_tables_fini(void)
2290 {
2291 nf_unregister_sockopt(&ip6t_sockopts);
2292
2293 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2294 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2295 unregister_pernet_subsys(&ip6_tables_net_ops);
2296 }
2297
2298 EXPORT_SYMBOL(ip6t_register_table);
2299 EXPORT_SYMBOL(ip6t_unregister_table);
2300 EXPORT_SYMBOL(ip6t_do_table);
2301
2302 module_init(ip6_tables_init);
2303 module_exit(ip6_tables_fini);
This page took 0.077456 seconds and 5 git commands to generate.