netfilter: xtables: move extension arguments into compound structure (5/6)
[deliverable/linux.git] / net / ipv6 / netfilter / ip6_tables.c
... / ...
CommitLineData
1/*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/capability.h>
13#include <linux/in.h>
14#include <linux/skbuff.h>
15#include <linux/kmod.h>
16#include <linux/vmalloc.h>
17#include <linux/netdevice.h>
18#include <linux/module.h>
19#include <linux/poison.h>
20#include <linux/icmpv6.h>
21#include <net/ipv6.h>
22#include <net/compat.h>
23#include <asm/uaccess.h>
24#include <linux/mutex.h>
25#include <linux/proc_fs.h>
26#include <linux/err.h>
27#include <linux/cpumask.h>
28
29#include <linux/netfilter_ipv6/ip6_tables.h>
30#include <linux/netfilter/x_tables.h>
31#include <net/netfilter/nf_log.h>
32
33MODULE_LICENSE("GPL");
34MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35MODULE_DESCRIPTION("IPv6 packet filter");
36
37/*#define DEBUG_IP_FIREWALL*/
38/*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39/*#define DEBUG_IP_FIREWALL_USER*/
40
41#ifdef DEBUG_IP_FIREWALL
42#define dprintf(format, args...) printk(format , ## args)
43#else
44#define dprintf(format, args...)
45#endif
46
47#ifdef DEBUG_IP_FIREWALL_USER
48#define duprintf(format, args...) printk(format , ## args)
49#else
50#define duprintf(format, args...)
51#endif
52
53#ifdef CONFIG_NETFILTER_DEBUG
54#define IP_NF_ASSERT(x) \
55do { \
56 if (!(x)) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
59} while(0)
60#else
61#define IP_NF_ASSERT(x)
62#endif
63
64#if 0
65/* All the better to debug you with... */
66#define static
67#define inline
68#endif
69
70/*
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
76
77 Hence the start of any table is given by get_table() below. */
78
79/* Check for an extension */
80int
81ip6t_ext_hdr(u8 nexthdr)
82{
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
90}
91
92/* Returns whether matches rule or not. */
93/* Performance critical - called for every packet */
94static inline bool
95ip6_packet_match(const struct sk_buff *skb,
96 const char *indev,
97 const char *outdev,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
101{
102 size_t i;
103 unsigned long ret;
104 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
105
106#define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
107
108 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
109 &ip6info->src), IP6T_INV_SRCIP)
110 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
111 &ip6info->dst), IP6T_INV_DSTIP)) {
112 dprintf("Source or dest mismatch.\n");
113/*
114 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
115 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
116 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
117 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
118 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
119 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
120 return false;
121 }
122
123 /* Look for ifname matches; this should unroll nicely. */
124 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
125 ret |= (((const unsigned long *)indev)[i]
126 ^ ((const unsigned long *)ip6info->iniface)[i])
127 & ((const unsigned long *)ip6info->iniface_mask)[i];
128 }
129
130 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
131 dprintf("VIA in mismatch (%s vs %s).%s\n",
132 indev, ip6info->iniface,
133 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
134 return false;
135 }
136
137 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
138 ret |= (((const unsigned long *)outdev)[i]
139 ^ ((const unsigned long *)ip6info->outiface)[i])
140 & ((const unsigned long *)ip6info->outiface_mask)[i];
141 }
142
143 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
144 dprintf("VIA out mismatch (%s vs %s).%s\n",
145 outdev, ip6info->outiface,
146 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
147 return false;
148 }
149
150/* ... might want to do something with class and flowlabel here ... */
151
152 /* look for the desired protocol header */
153 if((ip6info->flags & IP6T_F_PROTO)) {
154 int protohdr;
155 unsigned short _frag_off;
156
157 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
158 if (protohdr < 0) {
159 if (_frag_off == 0)
160 *hotdrop = true;
161 return false;
162 }
163 *fragoff = _frag_off;
164
165 dprintf("Packet protocol %hi ?= %s%hi.\n",
166 protohdr,
167 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
168 ip6info->proto);
169
170 if (ip6info->proto == protohdr) {
171 if(ip6info->invflags & IP6T_INV_PROTO) {
172 return false;
173 }
174 return true;
175 }
176
177 /* We need match for the '-p all', too! */
178 if ((ip6info->proto != 0) &&
179 !(ip6info->invflags & IP6T_INV_PROTO))
180 return false;
181 }
182 return true;
183}
184
185/* should be ip6 safe */
186static bool
187ip6_checkentry(const struct ip6t_ip6 *ipv6)
188{
189 if (ipv6->flags & ~IP6T_F_MASK) {
190 duprintf("Unknown flag bits set: %08X\n",
191 ipv6->flags & ~IP6T_F_MASK);
192 return false;
193 }
194 if (ipv6->invflags & ~IP6T_INV_MASK) {
195 duprintf("Unknown invflag bits set: %08X\n",
196 ipv6->invflags & ~IP6T_INV_MASK);
197 return false;
198 }
199 return true;
200}
201
202static unsigned int
203ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
204{
205 if (net_ratelimit())
206 printk("ip6_tables: error: `%s'\n",
207 (const char *)par->targinfo);
208
209 return NF_DROP;
210}
211
212/* Performance critical - called for every packet */
213static inline bool
214do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
215 struct xt_match_param *par)
216{
217 par->match = m->u.kernel.match;
218 par->matchinfo = m->data;
219
220 /* Stop iteration if it doesn't match */
221 if (!m->u.kernel.match->match(skb, par))
222 return true;
223 else
224 return false;
225}
226
227static inline struct ip6t_entry *
228get_entry(void *base, unsigned int offset)
229{
230 return (struct ip6t_entry *)(base + offset);
231}
232
233/* All zeroes == unconditional rule. */
234/* Mildly perf critical (only if packet tracing is on) */
235static inline int
236unconditional(const struct ip6t_ip6 *ipv6)
237{
238 unsigned int i;
239
240 for (i = 0; i < sizeof(*ipv6); i++)
241 if (((char *)ipv6)[i])
242 break;
243
244 return (i == sizeof(*ipv6));
245}
246
247#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
248 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
249/* This cries for unification! */
250static const char *const hooknames[] = {
251 [NF_INET_PRE_ROUTING] = "PREROUTING",
252 [NF_INET_LOCAL_IN] = "INPUT",
253 [NF_INET_FORWARD] = "FORWARD",
254 [NF_INET_LOCAL_OUT] = "OUTPUT",
255 [NF_INET_POST_ROUTING] = "POSTROUTING",
256};
257
258enum nf_ip_trace_comments {
259 NF_IP6_TRACE_COMMENT_RULE,
260 NF_IP6_TRACE_COMMENT_RETURN,
261 NF_IP6_TRACE_COMMENT_POLICY,
262};
263
264static const char *const comments[] = {
265 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
266 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
267 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
268};
269
270static struct nf_loginfo trace_loginfo = {
271 .type = NF_LOG_TYPE_LOG,
272 .u = {
273 .log = {
274 .level = 4,
275 .logflags = NF_LOG_MASK,
276 },
277 },
278};
279
280/* Mildly perf critical (only if packet tracing is on) */
281static inline int
282get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
283 char *hookname, char **chainname,
284 char **comment, unsigned int *rulenum)
285{
286 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
287
288 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
289 /* Head of user chain: ERROR target with chainname */
290 *chainname = t->target.data;
291 (*rulenum) = 0;
292 } else if (s == e) {
293 (*rulenum)++;
294
295 if (s->target_offset == sizeof(struct ip6t_entry)
296 && strcmp(t->target.u.kernel.target->name,
297 IP6T_STANDARD_TARGET) == 0
298 && t->verdict < 0
299 && unconditional(&s->ipv6)) {
300 /* Tail of chains: STANDARD target (return/policy) */
301 *comment = *chainname == hookname
302 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
303 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
304 }
305 return 1;
306 } else
307 (*rulenum)++;
308
309 return 0;
310}
311
312static void trace_packet(struct sk_buff *skb,
313 unsigned int hook,
314 const struct net_device *in,
315 const struct net_device *out,
316 const char *tablename,
317 struct xt_table_info *private,
318 struct ip6t_entry *e)
319{
320 void *table_base;
321 const struct ip6t_entry *root;
322 char *hookname, *chainname, *comment;
323 unsigned int rulenum = 0;
324
325 table_base = (void *)private->entries[smp_processor_id()];
326 root = get_entry(table_base, private->hook_entry[hook]);
327
328 hookname = chainname = (char *)hooknames[hook];
329 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
330
331 IP6T_ENTRY_ITERATE(root,
332 private->size - private->hook_entry[hook],
333 get_chainname_rulenum,
334 e, hookname, &chainname, &comment, &rulenum);
335
336 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
337 "TRACE: %s:%s:%s:%u ",
338 tablename, chainname, comment, rulenum);
339}
340#endif
341
342/* Returns one of the generic firewall policies, like NF_ACCEPT. */
343unsigned int
344ip6t_do_table(struct sk_buff *skb,
345 unsigned int hook,
346 const struct net_device *in,
347 const struct net_device *out,
348 struct xt_table *table)
349{
350 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
351 bool hotdrop = false;
352 /* Initializing verdict to NF_DROP keeps gcc happy. */
353 unsigned int verdict = NF_DROP;
354 const char *indev, *outdev;
355 void *table_base;
356 struct ip6t_entry *e, *back;
357 struct xt_table_info *private;
358 struct xt_match_param mtpar;
359 struct xt_target_param tgpar;
360
361 /* Initialization */
362 indev = in ? in->name : nulldevname;
363 outdev = out ? out->name : nulldevname;
364 /* We handle fragments by dealing with the first fragment as
365 * if it was a normal packet. All other fragments are treated
366 * normally, except that they will NEVER match rules that ask
367 * things we don't know, ie. tcp syn flag or ports). If the
368 * rule is also a fragment-specific rule, non-fragments won't
369 * match it. */
370 mtpar.hotdrop = &hotdrop;
371 mtpar.in = tgpar.in = in;
372 mtpar.out = tgpar.out = out;
373 tgpar.hooknum = hook;
374
375 read_lock_bh(&table->lock);
376 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
377 private = table->private;
378 table_base = (void *)private->entries[smp_processor_id()];
379 e = get_entry(table_base, private->hook_entry[hook]);
380
381 /* For return from builtin chain */
382 back = get_entry(table_base, private->underflow[hook]);
383
384 do {
385 IP_NF_ASSERT(e);
386 IP_NF_ASSERT(back);
387 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
388 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
389 struct ip6t_entry_target *t;
390
391 if (IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
392 goto no_match;
393
394 ADD_COUNTER(e->counters,
395 ntohs(ipv6_hdr(skb)->payload_len) +
396 sizeof(struct ipv6hdr), 1);
397
398 t = ip6t_get_target(e);
399 IP_NF_ASSERT(t->u.kernel.target);
400
401#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
402 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
403 /* The packet is traced: log it */
404 if (unlikely(skb->nf_trace))
405 trace_packet(skb, hook, in, out,
406 table->name, private, e);
407#endif
408 /* Standard target? */
409 if (!t->u.kernel.target->target) {
410 int v;
411
412 v = ((struct ip6t_standard_target *)t)->verdict;
413 if (v < 0) {
414 /* Pop from stack? */
415 if (v != IP6T_RETURN) {
416 verdict = (unsigned)(-v) - 1;
417 break;
418 }
419 e = back;
420 back = get_entry(table_base,
421 back->comefrom);
422 continue;
423 }
424 if (table_base + v != (void *)e + e->next_offset
425 && !(e->ipv6.flags & IP6T_F_GOTO)) {
426 /* Save old back ptr in next entry */
427 struct ip6t_entry *next
428 = (void *)e + e->next_offset;
429 next->comefrom
430 = (void *)back - table_base;
431 /* set back pointer to next entry */
432 back = next;
433 }
434
435 e = get_entry(table_base, v);
436 } else {
437 /* Targets which reenter must return
438 abs. verdicts */
439 tgpar.target = t->u.kernel.target;
440 tgpar.targinfo = t->data;
441
442#ifdef CONFIG_NETFILTER_DEBUG
443 ((struct ip6t_entry *)table_base)->comefrom
444 = 0xeeeeeeec;
445#endif
446 verdict = t->u.kernel.target->target(skb,
447 &tgpar);
448
449#ifdef CONFIG_NETFILTER_DEBUG
450 if (((struct ip6t_entry *)table_base)->comefrom
451 != 0xeeeeeeec
452 && verdict == IP6T_CONTINUE) {
453 printk("Target %s reentered!\n",
454 t->u.kernel.target->name);
455 verdict = NF_DROP;
456 }
457 ((struct ip6t_entry *)table_base)->comefrom
458 = 0x57acc001;
459#endif
460 if (verdict == IP6T_CONTINUE)
461 e = (void *)e + e->next_offset;
462 else
463 /* Verdict */
464 break;
465 }
466 } else {
467
468 no_match:
469 e = (void *)e + e->next_offset;
470 }
471 } while (!hotdrop);
472
473#ifdef CONFIG_NETFILTER_DEBUG
474 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
475#endif
476 read_unlock_bh(&table->lock);
477
478#ifdef DEBUG_ALLOW_ALL
479 return NF_ACCEPT;
480#else
481 if (hotdrop)
482 return NF_DROP;
483 else return verdict;
484#endif
485}
486
487/* Figures out from what hook each rule can be called: returns 0 if
488 there are loops. Puts hook bitmask in comefrom. */
489static int
490mark_source_chains(struct xt_table_info *newinfo,
491 unsigned int valid_hooks, void *entry0)
492{
493 unsigned int hook;
494
495 /* No recursion; use packet counter to save back ptrs (reset
496 to 0 as we leave), and comefrom to save source hook bitmask */
497 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
498 unsigned int pos = newinfo->hook_entry[hook];
499 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
500
501 if (!(valid_hooks & (1 << hook)))
502 continue;
503
504 /* Set initial back pointer. */
505 e->counters.pcnt = pos;
506
507 for (;;) {
508 struct ip6t_standard_target *t
509 = (void *)ip6t_get_target(e);
510 int visited = e->comefrom & (1 << hook);
511
512 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
513 printk("iptables: loop hook %u pos %u %08X.\n",
514 hook, pos, e->comefrom);
515 return 0;
516 }
517 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
518
519 /* Unconditional return/END. */
520 if ((e->target_offset == sizeof(struct ip6t_entry)
521 && (strcmp(t->target.u.user.name,
522 IP6T_STANDARD_TARGET) == 0)
523 && t->verdict < 0
524 && unconditional(&e->ipv6)) || visited) {
525 unsigned int oldpos, size;
526
527 if (t->verdict < -NF_MAX_VERDICT - 1) {
528 duprintf("mark_source_chains: bad "
529 "negative verdict (%i)\n",
530 t->verdict);
531 return 0;
532 }
533
534 /* Return: backtrack through the last
535 big jump. */
536 do {
537 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
538#ifdef DEBUG_IP_FIREWALL_USER
539 if (e->comefrom
540 & (1 << NF_INET_NUMHOOKS)) {
541 duprintf("Back unset "
542 "on hook %u "
543 "rule %u\n",
544 hook, pos);
545 }
546#endif
547 oldpos = pos;
548 pos = e->counters.pcnt;
549 e->counters.pcnt = 0;
550
551 /* We're at the start. */
552 if (pos == oldpos)
553 goto next;
554
555 e = (struct ip6t_entry *)
556 (entry0 + pos);
557 } while (oldpos == pos + e->next_offset);
558
559 /* Move along one */
560 size = e->next_offset;
561 e = (struct ip6t_entry *)
562 (entry0 + pos + size);
563 e->counters.pcnt = pos;
564 pos += size;
565 } else {
566 int newpos = t->verdict;
567
568 if (strcmp(t->target.u.user.name,
569 IP6T_STANDARD_TARGET) == 0
570 && newpos >= 0) {
571 if (newpos > newinfo->size -
572 sizeof(struct ip6t_entry)) {
573 duprintf("mark_source_chains: "
574 "bad verdict (%i)\n",
575 newpos);
576 return 0;
577 }
578 /* This a jump; chase it. */
579 duprintf("Jump rule %u -> %u\n",
580 pos, newpos);
581 } else {
582 /* ... this is a fallthru */
583 newpos = pos + e->next_offset;
584 }
585 e = (struct ip6t_entry *)
586 (entry0 + newpos);
587 e->counters.pcnt = pos;
588 pos = newpos;
589 }
590 }
591 next:
592 duprintf("Finished chain %u\n", hook);
593 }
594 return 1;
595}
596
597static int
598cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
599{
600 struct xt_mtdtor_param par;
601
602 if (i && (*i)-- == 0)
603 return 1;
604
605 par.match = m->u.kernel.match;
606 par.matchinfo = m->data;
607 if (par.match->destroy != NULL)
608 par.match->destroy(&par);
609 module_put(par.match->me);
610 return 0;
611}
612
613static int
614check_entry(struct ip6t_entry *e, const char *name)
615{
616 struct ip6t_entry_target *t;
617
618 if (!ip6_checkentry(&e->ipv6)) {
619 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
620 return -EINVAL;
621 }
622
623 if (e->target_offset + sizeof(struct ip6t_entry_target) >
624 e->next_offset)
625 return -EINVAL;
626
627 t = ip6t_get_target(e);
628 if (e->target_offset + t->u.target_size > e->next_offset)
629 return -EINVAL;
630
631 return 0;
632}
633
634static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
635 unsigned int *i)
636{
637 const struct ip6t_ip6 *ipv6 = par->entryinfo;
638 int ret;
639
640 par->match = m->u.kernel.match;
641 par->matchinfo = m->data;
642
643 ret = xt_check_match(par, NFPROTO_IPV6, m->u.match_size - sizeof(*m),
644 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
645 if (ret < 0) {
646 duprintf("ip_tables: check failed for `%s'.\n",
647 par.match->name);
648 return ret;
649 }
650 ++*i;
651 return 0;
652}
653
654static int
655find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
656 unsigned int *i)
657{
658 struct xt_match *match;
659 int ret;
660
661 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
662 m->u.user.revision),
663 "ip6t_%s", m->u.user.name);
664 if (IS_ERR(match) || !match) {
665 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
666 return match ? PTR_ERR(match) : -ENOENT;
667 }
668 m->u.kernel.match = match;
669
670 ret = check_match(m, par, i);
671 if (ret)
672 goto err;
673
674 return 0;
675err:
676 module_put(m->u.kernel.match->me);
677 return ret;
678}
679
680static int check_target(struct ip6t_entry *e, const char *name)
681{
682 struct ip6t_entry_target *t = ip6t_get_target(e);
683 struct xt_tgchk_param par = {
684 .table = name,
685 .entryinfo = e,
686 .target = t->u.kernel.target,
687 .targinfo = t->data,
688 .hook_mask = e->comefrom,
689 };
690 int ret;
691
692 t = ip6t_get_target(e);
693 ret = xt_check_target(&par, NFPROTO_IPV6, t->u.target_size - sizeof(*t),
694 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
695 if (ret < 0) {
696 duprintf("ip_tables: check failed for `%s'.\n",
697 t->u.kernel.target->name);
698 return ret;
699 }
700 return 0;
701}
702
703static int
704find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
705 unsigned int *i)
706{
707 struct ip6t_entry_target *t;
708 struct xt_target *target;
709 int ret;
710 unsigned int j;
711 struct xt_mtchk_param mtpar;
712
713 ret = check_entry(e, name);
714 if (ret)
715 return ret;
716
717 j = 0;
718 mtpar.table = name;
719 mtpar.entryinfo = &e->ipv6;
720 mtpar.hook_mask = e->comefrom;
721 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
722 if (ret != 0)
723 goto cleanup_matches;
724
725 t = ip6t_get_target(e);
726 target = try_then_request_module(xt_find_target(AF_INET6,
727 t->u.user.name,
728 t->u.user.revision),
729 "ip6t_%s", t->u.user.name);
730 if (IS_ERR(target) || !target) {
731 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
732 ret = target ? PTR_ERR(target) : -ENOENT;
733 goto cleanup_matches;
734 }
735 t->u.kernel.target = target;
736
737 ret = check_target(e, name);
738 if (ret)
739 goto err;
740
741 (*i)++;
742 return 0;
743 err:
744 module_put(t->u.kernel.target->me);
745 cleanup_matches:
746 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
747 return ret;
748}
749
750static int
751check_entry_size_and_hooks(struct ip6t_entry *e,
752 struct xt_table_info *newinfo,
753 unsigned char *base,
754 unsigned char *limit,
755 const unsigned int *hook_entries,
756 const unsigned int *underflows,
757 unsigned int *i)
758{
759 unsigned int h;
760
761 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
762 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
763 duprintf("Bad offset %p\n", e);
764 return -EINVAL;
765 }
766
767 if (e->next_offset
768 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
769 duprintf("checking: element %p size %u\n",
770 e, e->next_offset);
771 return -EINVAL;
772 }
773
774 /* Check hooks & underflows */
775 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
776 if ((unsigned char *)e - base == hook_entries[h])
777 newinfo->hook_entry[h] = hook_entries[h];
778 if ((unsigned char *)e - base == underflows[h])
779 newinfo->underflow[h] = underflows[h];
780 }
781
782 /* FIXME: underflows must be unconditional, standard verdicts
783 < 0 (not IP6T_RETURN). --RR */
784
785 /* Clear counters and comefrom */
786 e->counters = ((struct xt_counters) { 0, 0 });
787 e->comefrom = 0;
788
789 (*i)++;
790 return 0;
791}
792
793static int
794cleanup_entry(struct ip6t_entry *e, unsigned int *i)
795{
796 struct ip6t_entry_target *t;
797
798 if (i && (*i)-- == 0)
799 return 1;
800
801 /* Cleanup all matches */
802 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
803 t = ip6t_get_target(e);
804 if (t->u.kernel.target->destroy)
805 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
806 module_put(t->u.kernel.target->me);
807 return 0;
808}
809
810/* Checks and translates the user-supplied table segment (held in
811 newinfo) */
812static int
813translate_table(const char *name,
814 unsigned int valid_hooks,
815 struct xt_table_info *newinfo,
816 void *entry0,
817 unsigned int size,
818 unsigned int number,
819 const unsigned int *hook_entries,
820 const unsigned int *underflows)
821{
822 unsigned int i;
823 int ret;
824
825 newinfo->size = size;
826 newinfo->number = number;
827
828 /* Init all hooks to impossible value. */
829 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
830 newinfo->hook_entry[i] = 0xFFFFFFFF;
831 newinfo->underflow[i] = 0xFFFFFFFF;
832 }
833
834 duprintf("translate_table: size %u\n", newinfo->size);
835 i = 0;
836 /* Walk through entries, checking offsets. */
837 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
838 check_entry_size_and_hooks,
839 newinfo,
840 entry0,
841 entry0 + size,
842 hook_entries, underflows, &i);
843 if (ret != 0)
844 return ret;
845
846 if (i != number) {
847 duprintf("translate_table: %u not %u entries\n",
848 i, number);
849 return -EINVAL;
850 }
851
852 /* Check hooks all assigned */
853 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
854 /* Only hooks which are valid */
855 if (!(valid_hooks & (1 << i)))
856 continue;
857 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
858 duprintf("Invalid hook entry %u %u\n",
859 i, hook_entries[i]);
860 return -EINVAL;
861 }
862 if (newinfo->underflow[i] == 0xFFFFFFFF) {
863 duprintf("Invalid underflow %u %u\n",
864 i, underflows[i]);
865 return -EINVAL;
866 }
867 }
868
869 if (!mark_source_chains(newinfo, valid_hooks, entry0))
870 return -ELOOP;
871
872 /* Finally, each sanity check must pass */
873 i = 0;
874 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
875 find_check_entry, name, size, &i);
876
877 if (ret != 0) {
878 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
879 cleanup_entry, &i);
880 return ret;
881 }
882
883 /* And one copy for every other CPU */
884 for_each_possible_cpu(i) {
885 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
886 memcpy(newinfo->entries[i], entry0, newinfo->size);
887 }
888
889 return ret;
890}
891
892/* Gets counters. */
893static inline int
894add_entry_to_counter(const struct ip6t_entry *e,
895 struct xt_counters total[],
896 unsigned int *i)
897{
898 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
899
900 (*i)++;
901 return 0;
902}
903
904static inline int
905set_entry_to_counter(const struct ip6t_entry *e,
906 struct ip6t_counters total[],
907 unsigned int *i)
908{
909 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
910
911 (*i)++;
912 return 0;
913}
914
915static void
916get_counters(const struct xt_table_info *t,
917 struct xt_counters counters[])
918{
919 unsigned int cpu;
920 unsigned int i;
921 unsigned int curcpu;
922
923 /* Instead of clearing (by a previous call to memset())
924 * the counters and using adds, we set the counters
925 * with data used by 'current' CPU
926 * We dont care about preemption here.
927 */
928 curcpu = raw_smp_processor_id();
929
930 i = 0;
931 IP6T_ENTRY_ITERATE(t->entries[curcpu],
932 t->size,
933 set_entry_to_counter,
934 counters,
935 &i);
936
937 for_each_possible_cpu(cpu) {
938 if (cpu == curcpu)
939 continue;
940 i = 0;
941 IP6T_ENTRY_ITERATE(t->entries[cpu],
942 t->size,
943 add_entry_to_counter,
944 counters,
945 &i);
946 }
947}
948
949static struct xt_counters *alloc_counters(struct xt_table *table)
950{
951 unsigned int countersize;
952 struct xt_counters *counters;
953 const struct xt_table_info *private = table->private;
954
955 /* We need atomic snapshot of counters: rest doesn't change
956 (other than comefrom, which userspace doesn't care
957 about). */
958 countersize = sizeof(struct xt_counters) * private->number;
959 counters = vmalloc_node(countersize, numa_node_id());
960
961 if (counters == NULL)
962 return ERR_PTR(-ENOMEM);
963
964 /* First, sum counters... */
965 write_lock_bh(&table->lock);
966 get_counters(private, counters);
967 write_unlock_bh(&table->lock);
968
969 return counters;
970}
971
972static int
973copy_entries_to_user(unsigned int total_size,
974 struct xt_table *table,
975 void __user *userptr)
976{
977 unsigned int off, num;
978 struct ip6t_entry *e;
979 struct xt_counters *counters;
980 const struct xt_table_info *private = table->private;
981 int ret = 0;
982 const void *loc_cpu_entry;
983
984 counters = alloc_counters(table);
985 if (IS_ERR(counters))
986 return PTR_ERR(counters);
987
988 /* choose the copy that is on our node/cpu, ...
989 * This choice is lazy (because current thread is
990 * allowed to migrate to another cpu)
991 */
992 loc_cpu_entry = private->entries[raw_smp_processor_id()];
993 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
994 ret = -EFAULT;
995 goto free_counters;
996 }
997
998 /* FIXME: use iterator macros --RR */
999 /* ... then go back and fix counters and names */
1000 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1001 unsigned int i;
1002 const struct ip6t_entry_match *m;
1003 const struct ip6t_entry_target *t;
1004
1005 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1006 if (copy_to_user(userptr + off
1007 + offsetof(struct ip6t_entry, counters),
1008 &counters[num],
1009 sizeof(counters[num])) != 0) {
1010 ret = -EFAULT;
1011 goto free_counters;
1012 }
1013
1014 for (i = sizeof(struct ip6t_entry);
1015 i < e->target_offset;
1016 i += m->u.match_size) {
1017 m = (void *)e + i;
1018
1019 if (copy_to_user(userptr + off + i
1020 + offsetof(struct ip6t_entry_match,
1021 u.user.name),
1022 m->u.kernel.match->name,
1023 strlen(m->u.kernel.match->name)+1)
1024 != 0) {
1025 ret = -EFAULT;
1026 goto free_counters;
1027 }
1028 }
1029
1030 t = ip6t_get_target(e);
1031 if (copy_to_user(userptr + off + e->target_offset
1032 + offsetof(struct ip6t_entry_target,
1033 u.user.name),
1034 t->u.kernel.target->name,
1035 strlen(t->u.kernel.target->name)+1) != 0) {
1036 ret = -EFAULT;
1037 goto free_counters;
1038 }
1039 }
1040
1041 free_counters:
1042 vfree(counters);
1043 return ret;
1044}
1045
1046#ifdef CONFIG_COMPAT
1047static void compat_standard_from_user(void *dst, void *src)
1048{
1049 int v = *(compat_int_t *)src;
1050
1051 if (v > 0)
1052 v += xt_compat_calc_jump(AF_INET6, v);
1053 memcpy(dst, &v, sizeof(v));
1054}
1055
1056static int compat_standard_to_user(void __user *dst, void *src)
1057{
1058 compat_int_t cv = *(int *)src;
1059
1060 if (cv > 0)
1061 cv -= xt_compat_calc_jump(AF_INET6, cv);
1062 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1063}
1064
1065static inline int
1066compat_calc_match(struct ip6t_entry_match *m, int *size)
1067{
1068 *size += xt_compat_match_offset(m->u.kernel.match);
1069 return 0;
1070}
1071
1072static int compat_calc_entry(struct ip6t_entry *e,
1073 const struct xt_table_info *info,
1074 void *base, struct xt_table_info *newinfo)
1075{
1076 struct ip6t_entry_target *t;
1077 unsigned int entry_offset;
1078 int off, i, ret;
1079
1080 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1081 entry_offset = (void *)e - base;
1082 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1083 t = ip6t_get_target(e);
1084 off += xt_compat_target_offset(t->u.kernel.target);
1085 newinfo->size -= off;
1086 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1087 if (ret)
1088 return ret;
1089
1090 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1091 if (info->hook_entry[i] &&
1092 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1093 newinfo->hook_entry[i] -= off;
1094 if (info->underflow[i] &&
1095 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1096 newinfo->underflow[i] -= off;
1097 }
1098 return 0;
1099}
1100
1101static int compat_table_info(const struct xt_table_info *info,
1102 struct xt_table_info *newinfo)
1103{
1104 void *loc_cpu_entry;
1105
1106 if (!newinfo || !info)
1107 return -EINVAL;
1108
1109 /* we dont care about newinfo->entries[] */
1110 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1111 newinfo->initial_entries = 0;
1112 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1113 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1114 compat_calc_entry, info, loc_cpu_entry,
1115 newinfo);
1116}
1117#endif
1118
1119static int get_info(struct net *net, void __user *user, int *len, int compat)
1120{
1121 char name[IP6T_TABLE_MAXNAMELEN];
1122 struct xt_table *t;
1123 int ret;
1124
1125 if (*len != sizeof(struct ip6t_getinfo)) {
1126 duprintf("length %u != %zu\n", *len,
1127 sizeof(struct ip6t_getinfo));
1128 return -EINVAL;
1129 }
1130
1131 if (copy_from_user(name, user, sizeof(name)) != 0)
1132 return -EFAULT;
1133
1134 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1135#ifdef CONFIG_COMPAT
1136 if (compat)
1137 xt_compat_lock(AF_INET6);
1138#endif
1139 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1140 "ip6table_%s", name);
1141 if (t && !IS_ERR(t)) {
1142 struct ip6t_getinfo info;
1143 const struct xt_table_info *private = t->private;
1144
1145#ifdef CONFIG_COMPAT
1146 if (compat) {
1147 struct xt_table_info tmp;
1148 ret = compat_table_info(private, &tmp);
1149 xt_compat_flush_offsets(AF_INET6);
1150 private = &tmp;
1151 }
1152#endif
1153 info.valid_hooks = t->valid_hooks;
1154 memcpy(info.hook_entry, private->hook_entry,
1155 sizeof(info.hook_entry));
1156 memcpy(info.underflow, private->underflow,
1157 sizeof(info.underflow));
1158 info.num_entries = private->number;
1159 info.size = private->size;
1160 strcpy(info.name, name);
1161
1162 if (copy_to_user(user, &info, *len) != 0)
1163 ret = -EFAULT;
1164 else
1165 ret = 0;
1166
1167 xt_table_unlock(t);
1168 module_put(t->me);
1169 } else
1170 ret = t ? PTR_ERR(t) : -ENOENT;
1171#ifdef CONFIG_COMPAT
1172 if (compat)
1173 xt_compat_unlock(AF_INET6);
1174#endif
1175 return ret;
1176}
1177
1178static int
1179get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1180{
1181 int ret;
1182 struct ip6t_get_entries get;
1183 struct xt_table *t;
1184
1185 if (*len < sizeof(get)) {
1186 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1187 return -EINVAL;
1188 }
1189 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1190 return -EFAULT;
1191 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1192 duprintf("get_entries: %u != %zu\n",
1193 *len, sizeof(get) + get.size);
1194 return -EINVAL;
1195 }
1196
1197 t = xt_find_table_lock(net, AF_INET6, get.name);
1198 if (t && !IS_ERR(t)) {
1199 struct xt_table_info *private = t->private;
1200 duprintf("t->private->number = %u\n", private->number);
1201 if (get.size == private->size)
1202 ret = copy_entries_to_user(private->size,
1203 t, uptr->entrytable);
1204 else {
1205 duprintf("get_entries: I've got %u not %u!\n",
1206 private->size, get.size);
1207 ret = -EAGAIN;
1208 }
1209 module_put(t->me);
1210 xt_table_unlock(t);
1211 } else
1212 ret = t ? PTR_ERR(t) : -ENOENT;
1213
1214 return ret;
1215}
1216
1217static int
1218__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1219 struct xt_table_info *newinfo, unsigned int num_counters,
1220 void __user *counters_ptr)
1221{
1222 int ret;
1223 struct xt_table *t;
1224 struct xt_table_info *oldinfo;
1225 struct xt_counters *counters;
1226 const void *loc_cpu_old_entry;
1227
1228 ret = 0;
1229 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1230 numa_node_id());
1231 if (!counters) {
1232 ret = -ENOMEM;
1233 goto out;
1234 }
1235
1236 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1237 "ip6table_%s", name);
1238 if (!t || IS_ERR(t)) {
1239 ret = t ? PTR_ERR(t) : -ENOENT;
1240 goto free_newinfo_counters_untrans;
1241 }
1242
1243 /* You lied! */
1244 if (valid_hooks != t->valid_hooks) {
1245 duprintf("Valid hook crap: %08X vs %08X\n",
1246 valid_hooks, t->valid_hooks);
1247 ret = -EINVAL;
1248 goto put_module;
1249 }
1250
1251 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1252 if (!oldinfo)
1253 goto put_module;
1254
1255 /* Update module usage count based on number of rules */
1256 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1257 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1258 if ((oldinfo->number > oldinfo->initial_entries) ||
1259 (newinfo->number <= oldinfo->initial_entries))
1260 module_put(t->me);
1261 if ((oldinfo->number > oldinfo->initial_entries) &&
1262 (newinfo->number <= oldinfo->initial_entries))
1263 module_put(t->me);
1264
1265 /* Get the old counters. */
1266 get_counters(oldinfo, counters);
1267 /* Decrease module usage counts and free resource */
1268 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1269 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1270 NULL);
1271 xt_free_table_info(oldinfo);
1272 if (copy_to_user(counters_ptr, counters,
1273 sizeof(struct xt_counters) * num_counters) != 0)
1274 ret = -EFAULT;
1275 vfree(counters);
1276 xt_table_unlock(t);
1277 return ret;
1278
1279 put_module:
1280 module_put(t->me);
1281 xt_table_unlock(t);
1282 free_newinfo_counters_untrans:
1283 vfree(counters);
1284 out:
1285 return ret;
1286}
1287
1288static int
1289do_replace(struct net *net, void __user *user, unsigned int len)
1290{
1291 int ret;
1292 struct ip6t_replace tmp;
1293 struct xt_table_info *newinfo;
1294 void *loc_cpu_entry;
1295
1296 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1297 return -EFAULT;
1298
1299 /* overflow check */
1300 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1301 return -ENOMEM;
1302
1303 newinfo = xt_alloc_table_info(tmp.size);
1304 if (!newinfo)
1305 return -ENOMEM;
1306
1307 /* choose the copy that is on our node/cpu */
1308 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1309 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1310 tmp.size) != 0) {
1311 ret = -EFAULT;
1312 goto free_newinfo;
1313 }
1314
1315 ret = translate_table(tmp.name, tmp.valid_hooks,
1316 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1317 tmp.hook_entry, tmp.underflow);
1318 if (ret != 0)
1319 goto free_newinfo;
1320
1321 duprintf("ip_tables: Translated table\n");
1322
1323 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1324 tmp.num_counters, tmp.counters);
1325 if (ret)
1326 goto free_newinfo_untrans;
1327 return 0;
1328
1329 free_newinfo_untrans:
1330 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1331 free_newinfo:
1332 xt_free_table_info(newinfo);
1333 return ret;
1334}
1335
1336/* We're lazy, and add to the first CPU; overflow works its fey magic
1337 * and everything is OK. */
1338static inline int
1339add_counter_to_entry(struct ip6t_entry *e,
1340 const struct xt_counters addme[],
1341 unsigned int *i)
1342{
1343#if 0
1344 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1345 *i,
1346 (long unsigned int)e->counters.pcnt,
1347 (long unsigned int)e->counters.bcnt,
1348 (long unsigned int)addme[*i].pcnt,
1349 (long unsigned int)addme[*i].bcnt);
1350#endif
1351
1352 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1353
1354 (*i)++;
1355 return 0;
1356}
1357
1358static int
1359do_add_counters(struct net *net, void __user *user, unsigned int len,
1360 int compat)
1361{
1362 unsigned int i;
1363 struct xt_counters_info tmp;
1364 struct xt_counters *paddc;
1365 unsigned int num_counters;
1366 char *name;
1367 int size;
1368 void *ptmp;
1369 struct xt_table *t;
1370 const struct xt_table_info *private;
1371 int ret = 0;
1372 const void *loc_cpu_entry;
1373#ifdef CONFIG_COMPAT
1374 struct compat_xt_counters_info compat_tmp;
1375
1376 if (compat) {
1377 ptmp = &compat_tmp;
1378 size = sizeof(struct compat_xt_counters_info);
1379 } else
1380#endif
1381 {
1382 ptmp = &tmp;
1383 size = sizeof(struct xt_counters_info);
1384 }
1385
1386 if (copy_from_user(ptmp, user, size) != 0)
1387 return -EFAULT;
1388
1389#ifdef CONFIG_COMPAT
1390 if (compat) {
1391 num_counters = compat_tmp.num_counters;
1392 name = compat_tmp.name;
1393 } else
1394#endif
1395 {
1396 num_counters = tmp.num_counters;
1397 name = tmp.name;
1398 }
1399
1400 if (len != size + num_counters * sizeof(struct xt_counters))
1401 return -EINVAL;
1402
1403 paddc = vmalloc_node(len - size, numa_node_id());
1404 if (!paddc)
1405 return -ENOMEM;
1406
1407 if (copy_from_user(paddc, user + size, len - size) != 0) {
1408 ret = -EFAULT;
1409 goto free;
1410 }
1411
1412 t = xt_find_table_lock(net, AF_INET6, name);
1413 if (!t || IS_ERR(t)) {
1414 ret = t ? PTR_ERR(t) : -ENOENT;
1415 goto free;
1416 }
1417
1418 write_lock_bh(&t->lock);
1419 private = t->private;
1420 if (private->number != num_counters) {
1421 ret = -EINVAL;
1422 goto unlock_up_free;
1423 }
1424
1425 i = 0;
1426 /* Choose the copy that is on our node */
1427 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1428 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1429 private->size,
1430 add_counter_to_entry,
1431 paddc,
1432 &i);
1433 unlock_up_free:
1434 write_unlock_bh(&t->lock);
1435 xt_table_unlock(t);
1436 module_put(t->me);
1437 free:
1438 vfree(paddc);
1439
1440 return ret;
1441}
1442
1443#ifdef CONFIG_COMPAT
1444struct compat_ip6t_replace {
1445 char name[IP6T_TABLE_MAXNAMELEN];
1446 u32 valid_hooks;
1447 u32 num_entries;
1448 u32 size;
1449 u32 hook_entry[NF_INET_NUMHOOKS];
1450 u32 underflow[NF_INET_NUMHOOKS];
1451 u32 num_counters;
1452 compat_uptr_t counters; /* struct ip6t_counters * */
1453 struct compat_ip6t_entry entries[0];
1454};
1455
1456static int
1457compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1458 unsigned int *size, struct xt_counters *counters,
1459 unsigned int *i)
1460{
1461 struct ip6t_entry_target *t;
1462 struct compat_ip6t_entry __user *ce;
1463 u_int16_t target_offset, next_offset;
1464 compat_uint_t origsize;
1465 int ret;
1466
1467 ret = -EFAULT;
1468 origsize = *size;
1469 ce = (struct compat_ip6t_entry __user *)*dstptr;
1470 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1471 goto out;
1472
1473 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1474 goto out;
1475
1476 *dstptr += sizeof(struct compat_ip6t_entry);
1477 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1478
1479 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1480 target_offset = e->target_offset - (origsize - *size);
1481 if (ret)
1482 goto out;
1483 t = ip6t_get_target(e);
1484 ret = xt_compat_target_to_user(t, dstptr, size);
1485 if (ret)
1486 goto out;
1487 ret = -EFAULT;
1488 next_offset = e->next_offset - (origsize - *size);
1489 if (put_user(target_offset, &ce->target_offset))
1490 goto out;
1491 if (put_user(next_offset, &ce->next_offset))
1492 goto out;
1493
1494 (*i)++;
1495 return 0;
1496out:
1497 return ret;
1498}
1499
1500static int
1501compat_find_calc_match(struct ip6t_entry_match *m,
1502 const char *name,
1503 const struct ip6t_ip6 *ipv6,
1504 unsigned int hookmask,
1505 int *size, unsigned int *i)
1506{
1507 struct xt_match *match;
1508
1509 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1510 m->u.user.revision),
1511 "ip6t_%s", m->u.user.name);
1512 if (IS_ERR(match) || !match) {
1513 duprintf("compat_check_calc_match: `%s' not found\n",
1514 m->u.user.name);
1515 return match ? PTR_ERR(match) : -ENOENT;
1516 }
1517 m->u.kernel.match = match;
1518 *size += xt_compat_match_offset(match);
1519
1520 (*i)++;
1521 return 0;
1522}
1523
1524static int
1525compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1526{
1527 if (i && (*i)-- == 0)
1528 return 1;
1529
1530 module_put(m->u.kernel.match->me);
1531 return 0;
1532}
1533
1534static int
1535compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1536{
1537 struct ip6t_entry_target *t;
1538
1539 if (i && (*i)-- == 0)
1540 return 1;
1541
1542 /* Cleanup all matches */
1543 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1544 t = compat_ip6t_get_target(e);
1545 module_put(t->u.kernel.target->me);
1546 return 0;
1547}
1548
1549static int
1550check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1551 struct xt_table_info *newinfo,
1552 unsigned int *size,
1553 unsigned char *base,
1554 unsigned char *limit,
1555 unsigned int *hook_entries,
1556 unsigned int *underflows,
1557 unsigned int *i,
1558 const char *name)
1559{
1560 struct ip6t_entry_target *t;
1561 struct xt_target *target;
1562 unsigned int entry_offset;
1563 unsigned int j;
1564 int ret, off, h;
1565
1566 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1567 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1568 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1569 duprintf("Bad offset %p, limit = %p\n", e, limit);
1570 return -EINVAL;
1571 }
1572
1573 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1574 sizeof(struct compat_xt_entry_target)) {
1575 duprintf("checking: element %p size %u\n",
1576 e, e->next_offset);
1577 return -EINVAL;
1578 }
1579
1580 /* For purposes of check_entry casting the compat entry is fine */
1581 ret = check_entry((struct ip6t_entry *)e, name);
1582 if (ret)
1583 return ret;
1584
1585 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1586 entry_offset = (void *)e - (void *)base;
1587 j = 0;
1588 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1589 &e->ipv6, e->comefrom, &off, &j);
1590 if (ret != 0)
1591 goto release_matches;
1592
1593 t = compat_ip6t_get_target(e);
1594 target = try_then_request_module(xt_find_target(AF_INET6,
1595 t->u.user.name,
1596 t->u.user.revision),
1597 "ip6t_%s", t->u.user.name);
1598 if (IS_ERR(target) || !target) {
1599 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1600 t->u.user.name);
1601 ret = target ? PTR_ERR(target) : -ENOENT;
1602 goto release_matches;
1603 }
1604 t->u.kernel.target = target;
1605
1606 off += xt_compat_target_offset(target);
1607 *size += off;
1608 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1609 if (ret)
1610 goto out;
1611
1612 /* Check hooks & underflows */
1613 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1614 if ((unsigned char *)e - base == hook_entries[h])
1615 newinfo->hook_entry[h] = hook_entries[h];
1616 if ((unsigned char *)e - base == underflows[h])
1617 newinfo->underflow[h] = underflows[h];
1618 }
1619
1620 /* Clear counters and comefrom */
1621 memset(&e->counters, 0, sizeof(e->counters));
1622 e->comefrom = 0;
1623
1624 (*i)++;
1625 return 0;
1626
1627out:
1628 module_put(t->u.kernel.target->me);
1629release_matches:
1630 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1631 return ret;
1632}
1633
1634static int
1635compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1636 unsigned int *size, const char *name,
1637 struct xt_table_info *newinfo, unsigned char *base)
1638{
1639 struct ip6t_entry_target *t;
1640 struct xt_target *target;
1641 struct ip6t_entry *de;
1642 unsigned int origsize;
1643 int ret, h;
1644
1645 ret = 0;
1646 origsize = *size;
1647 de = (struct ip6t_entry *)*dstptr;
1648 memcpy(de, e, sizeof(struct ip6t_entry));
1649 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1650
1651 *dstptr += sizeof(struct ip6t_entry);
1652 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1653
1654 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1655 dstptr, size);
1656 if (ret)
1657 return ret;
1658 de->target_offset = e->target_offset - (origsize - *size);
1659 t = compat_ip6t_get_target(e);
1660 target = t->u.kernel.target;
1661 xt_compat_target_from_user(t, dstptr, size);
1662
1663 de->next_offset = e->next_offset - (origsize - *size);
1664 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1665 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1666 newinfo->hook_entry[h] -= origsize - *size;
1667 if ((unsigned char *)de - base < newinfo->underflow[h])
1668 newinfo->underflow[h] -= origsize - *size;
1669 }
1670 return ret;
1671}
1672
1673static int compat_check_entry(struct ip6t_entry *e, const char *name,
1674 unsigned int *i)
1675{
1676 unsigned int j;
1677 int ret;
1678 struct xt_mtchk_param mtpar;
1679
1680 j = 0;
1681 mtpar.table = name;
1682 mtpar.entryinfo = &e->ipv6;
1683 mtpar.hook_mask = e->comefrom;
1684 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1685 if (ret)
1686 goto cleanup_matches;
1687
1688 ret = check_target(e, name);
1689 if (ret)
1690 goto cleanup_matches;
1691
1692 (*i)++;
1693 return 0;
1694
1695 cleanup_matches:
1696 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1697 return ret;
1698}
1699
1700static int
1701translate_compat_table(const char *name,
1702 unsigned int valid_hooks,
1703 struct xt_table_info **pinfo,
1704 void **pentry0,
1705 unsigned int total_size,
1706 unsigned int number,
1707 unsigned int *hook_entries,
1708 unsigned int *underflows)
1709{
1710 unsigned int i, j;
1711 struct xt_table_info *newinfo, *info;
1712 void *pos, *entry0, *entry1;
1713 unsigned int size;
1714 int ret;
1715
1716 info = *pinfo;
1717 entry0 = *pentry0;
1718 size = total_size;
1719 info->number = number;
1720
1721 /* Init all hooks to impossible value. */
1722 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1723 info->hook_entry[i] = 0xFFFFFFFF;
1724 info->underflow[i] = 0xFFFFFFFF;
1725 }
1726
1727 duprintf("translate_compat_table: size %u\n", info->size);
1728 j = 0;
1729 xt_compat_lock(AF_INET6);
1730 /* Walk through entries, checking offsets. */
1731 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1732 check_compat_entry_size_and_hooks,
1733 info, &size, entry0,
1734 entry0 + total_size,
1735 hook_entries, underflows, &j, name);
1736 if (ret != 0)
1737 goto out_unlock;
1738
1739 ret = -EINVAL;
1740 if (j != number) {
1741 duprintf("translate_compat_table: %u not %u entries\n",
1742 j, number);
1743 goto out_unlock;
1744 }
1745
1746 /* Check hooks all assigned */
1747 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1748 /* Only hooks which are valid */
1749 if (!(valid_hooks & (1 << i)))
1750 continue;
1751 if (info->hook_entry[i] == 0xFFFFFFFF) {
1752 duprintf("Invalid hook entry %u %u\n",
1753 i, hook_entries[i]);
1754 goto out_unlock;
1755 }
1756 if (info->underflow[i] == 0xFFFFFFFF) {
1757 duprintf("Invalid underflow %u %u\n",
1758 i, underflows[i]);
1759 goto out_unlock;
1760 }
1761 }
1762
1763 ret = -ENOMEM;
1764 newinfo = xt_alloc_table_info(size);
1765 if (!newinfo)
1766 goto out_unlock;
1767
1768 newinfo->number = number;
1769 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1770 newinfo->hook_entry[i] = info->hook_entry[i];
1771 newinfo->underflow[i] = info->underflow[i];
1772 }
1773 entry1 = newinfo->entries[raw_smp_processor_id()];
1774 pos = entry1;
1775 size = total_size;
1776 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1777 compat_copy_entry_from_user,
1778 &pos, &size, name, newinfo, entry1);
1779 xt_compat_flush_offsets(AF_INET6);
1780 xt_compat_unlock(AF_INET6);
1781 if (ret)
1782 goto free_newinfo;
1783
1784 ret = -ELOOP;
1785 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1786 goto free_newinfo;
1787
1788 i = 0;
1789 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1790 name, &i);
1791 if (ret) {
1792 j -= i;
1793 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1794 compat_release_entry, &j);
1795 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1796 xt_free_table_info(newinfo);
1797 return ret;
1798 }
1799
1800 /* And one copy for every other CPU */
1801 for_each_possible_cpu(i)
1802 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1803 memcpy(newinfo->entries[i], entry1, newinfo->size);
1804
1805 *pinfo = newinfo;
1806 *pentry0 = entry1;
1807 xt_free_table_info(info);
1808 return 0;
1809
1810free_newinfo:
1811 xt_free_table_info(newinfo);
1812out:
1813 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1814 return ret;
1815out_unlock:
1816 xt_compat_flush_offsets(AF_INET6);
1817 xt_compat_unlock(AF_INET6);
1818 goto out;
1819}
1820
1821static int
1822compat_do_replace(struct net *net, void __user *user, unsigned int len)
1823{
1824 int ret;
1825 struct compat_ip6t_replace tmp;
1826 struct xt_table_info *newinfo;
1827 void *loc_cpu_entry;
1828
1829 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1830 return -EFAULT;
1831
1832 /* overflow check */
1833 if (tmp.size >= INT_MAX / num_possible_cpus())
1834 return -ENOMEM;
1835 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1836 return -ENOMEM;
1837
1838 newinfo = xt_alloc_table_info(tmp.size);
1839 if (!newinfo)
1840 return -ENOMEM;
1841
1842 /* choose the copy that is on our node/cpu */
1843 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1844 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1845 tmp.size) != 0) {
1846 ret = -EFAULT;
1847 goto free_newinfo;
1848 }
1849
1850 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1851 &newinfo, &loc_cpu_entry, tmp.size,
1852 tmp.num_entries, tmp.hook_entry,
1853 tmp.underflow);
1854 if (ret != 0)
1855 goto free_newinfo;
1856
1857 duprintf("compat_do_replace: Translated table\n");
1858
1859 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1860 tmp.num_counters, compat_ptr(tmp.counters));
1861 if (ret)
1862 goto free_newinfo_untrans;
1863 return 0;
1864
1865 free_newinfo_untrans:
1866 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1867 free_newinfo:
1868 xt_free_table_info(newinfo);
1869 return ret;
1870}
1871
1872static int
1873compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1874 unsigned int len)
1875{
1876 int ret;
1877
1878 if (!capable(CAP_NET_ADMIN))
1879 return -EPERM;
1880
1881 switch (cmd) {
1882 case IP6T_SO_SET_REPLACE:
1883 ret = compat_do_replace(sock_net(sk), user, len);
1884 break;
1885
1886 case IP6T_SO_SET_ADD_COUNTERS:
1887 ret = do_add_counters(sock_net(sk), user, len, 1);
1888 break;
1889
1890 default:
1891 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1892 ret = -EINVAL;
1893 }
1894
1895 return ret;
1896}
1897
1898struct compat_ip6t_get_entries {
1899 char name[IP6T_TABLE_MAXNAMELEN];
1900 compat_uint_t size;
1901 struct compat_ip6t_entry entrytable[0];
1902};
1903
1904static int
1905compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1906 void __user *userptr)
1907{
1908 struct xt_counters *counters;
1909 const struct xt_table_info *private = table->private;
1910 void __user *pos;
1911 unsigned int size;
1912 int ret = 0;
1913 const void *loc_cpu_entry;
1914 unsigned int i = 0;
1915
1916 counters = alloc_counters(table);
1917 if (IS_ERR(counters))
1918 return PTR_ERR(counters);
1919
1920 /* choose the copy that is on our node/cpu, ...
1921 * This choice is lazy (because current thread is
1922 * allowed to migrate to another cpu)
1923 */
1924 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1925 pos = userptr;
1926 size = total_size;
1927 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1928 compat_copy_entry_to_user,
1929 &pos, &size, counters, &i);
1930
1931 vfree(counters);
1932 return ret;
1933}
1934
1935static int
1936compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1937 int *len)
1938{
1939 int ret;
1940 struct compat_ip6t_get_entries get;
1941 struct xt_table *t;
1942
1943 if (*len < sizeof(get)) {
1944 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1945 return -EINVAL;
1946 }
1947
1948 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1949 return -EFAULT;
1950
1951 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1952 duprintf("compat_get_entries: %u != %zu\n",
1953 *len, sizeof(get) + get.size);
1954 return -EINVAL;
1955 }
1956
1957 xt_compat_lock(AF_INET6);
1958 t = xt_find_table_lock(net, AF_INET6, get.name);
1959 if (t && !IS_ERR(t)) {
1960 const struct xt_table_info *private = t->private;
1961 struct xt_table_info info;
1962 duprintf("t->private->number = %u\n", private->number);
1963 ret = compat_table_info(private, &info);
1964 if (!ret && get.size == info.size) {
1965 ret = compat_copy_entries_to_user(private->size,
1966 t, uptr->entrytable);
1967 } else if (!ret) {
1968 duprintf("compat_get_entries: I've got %u not %u!\n",
1969 private->size, get.size);
1970 ret = -EAGAIN;
1971 }
1972 xt_compat_flush_offsets(AF_INET6);
1973 module_put(t->me);
1974 xt_table_unlock(t);
1975 } else
1976 ret = t ? PTR_ERR(t) : -ENOENT;
1977
1978 xt_compat_unlock(AF_INET6);
1979 return ret;
1980}
1981
1982static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1983
1984static int
1985compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1986{
1987 int ret;
1988
1989 if (!capable(CAP_NET_ADMIN))
1990 return -EPERM;
1991
1992 switch (cmd) {
1993 case IP6T_SO_GET_INFO:
1994 ret = get_info(sock_net(sk), user, len, 1);
1995 break;
1996 case IP6T_SO_GET_ENTRIES:
1997 ret = compat_get_entries(sock_net(sk), user, len);
1998 break;
1999 default:
2000 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2001 }
2002 return ret;
2003}
2004#endif
2005
2006static int
2007do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2008{
2009 int ret;
2010
2011 if (!capable(CAP_NET_ADMIN))
2012 return -EPERM;
2013
2014 switch (cmd) {
2015 case IP6T_SO_SET_REPLACE:
2016 ret = do_replace(sock_net(sk), user, len);
2017 break;
2018
2019 case IP6T_SO_SET_ADD_COUNTERS:
2020 ret = do_add_counters(sock_net(sk), user, len, 0);
2021 break;
2022
2023 default:
2024 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2025 ret = -EINVAL;
2026 }
2027
2028 return ret;
2029}
2030
2031static int
2032do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2033{
2034 int ret;
2035
2036 if (!capable(CAP_NET_ADMIN))
2037 return -EPERM;
2038
2039 switch (cmd) {
2040 case IP6T_SO_GET_INFO:
2041 ret = get_info(sock_net(sk), user, len, 0);
2042 break;
2043
2044 case IP6T_SO_GET_ENTRIES:
2045 ret = get_entries(sock_net(sk), user, len);
2046 break;
2047
2048 case IP6T_SO_GET_REVISION_MATCH:
2049 case IP6T_SO_GET_REVISION_TARGET: {
2050 struct ip6t_get_revision rev;
2051 int target;
2052
2053 if (*len != sizeof(rev)) {
2054 ret = -EINVAL;
2055 break;
2056 }
2057 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2058 ret = -EFAULT;
2059 break;
2060 }
2061
2062 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2063 target = 1;
2064 else
2065 target = 0;
2066
2067 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2068 rev.revision,
2069 target, &ret),
2070 "ip6t_%s", rev.name);
2071 break;
2072 }
2073
2074 default:
2075 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2076 ret = -EINVAL;
2077 }
2078
2079 return ret;
2080}
2081
2082struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2083 const struct ip6t_replace *repl)
2084{
2085 int ret;
2086 struct xt_table_info *newinfo;
2087 struct xt_table_info bootstrap
2088 = { 0, 0, 0, { 0 }, { 0 }, { } };
2089 void *loc_cpu_entry;
2090 struct xt_table *new_table;
2091
2092 newinfo = xt_alloc_table_info(repl->size);
2093 if (!newinfo) {
2094 ret = -ENOMEM;
2095 goto out;
2096 }
2097
2098 /* choose the copy on our node/cpu, but dont care about preemption */
2099 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2100 memcpy(loc_cpu_entry, repl->entries, repl->size);
2101
2102 ret = translate_table(table->name, table->valid_hooks,
2103 newinfo, loc_cpu_entry, repl->size,
2104 repl->num_entries,
2105 repl->hook_entry,
2106 repl->underflow);
2107 if (ret != 0)
2108 goto out_free;
2109
2110 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2111 if (IS_ERR(new_table)) {
2112 ret = PTR_ERR(new_table);
2113 goto out_free;
2114 }
2115 return new_table;
2116
2117out_free:
2118 xt_free_table_info(newinfo);
2119out:
2120 return ERR_PTR(ret);
2121}
2122
2123void ip6t_unregister_table(struct xt_table *table)
2124{
2125 struct xt_table_info *private;
2126 void *loc_cpu_entry;
2127 struct module *table_owner = table->me;
2128
2129 private = xt_unregister_table(table);
2130
2131 /* Decrease module usage counts and free resources */
2132 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2133 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2134 if (private->number > private->initial_entries)
2135 module_put(table_owner);
2136 xt_free_table_info(private);
2137}
2138
2139/* Returns 1 if the type and code is matched by the range, 0 otherwise */
2140static inline bool
2141icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2142 u_int8_t type, u_int8_t code,
2143 bool invert)
2144{
2145 return (type == test_type && code >= min_code && code <= max_code)
2146 ^ invert;
2147}
2148
2149static bool
2150icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2151{
2152 const struct icmp6hdr *ic;
2153 struct icmp6hdr _icmph;
2154 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2155
2156 /* Must not be a fragment. */
2157 if (par->fragoff != 0)
2158 return false;
2159
2160 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2161 if (ic == NULL) {
2162 /* We've been asked to examine this packet, and we
2163 * can't. Hence, no choice but to drop.
2164 */
2165 duprintf("Dropping evil ICMP tinygram.\n");
2166 *par->hotdrop = true;
2167 return false;
2168 }
2169
2170 return icmp6_type_code_match(icmpinfo->type,
2171 icmpinfo->code[0],
2172 icmpinfo->code[1],
2173 ic->icmp6_type, ic->icmp6_code,
2174 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2175}
2176
2177/* Called when user tries to insert an entry of this type. */
2178static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2179{
2180 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2181
2182 /* Must specify no unknown invflags */
2183 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2184}
2185
2186/* The built-in targets: standard (NULL) and error. */
2187static struct xt_target ip6t_standard_target __read_mostly = {
2188 .name = IP6T_STANDARD_TARGET,
2189 .targetsize = sizeof(int),
2190 .family = AF_INET6,
2191#ifdef CONFIG_COMPAT
2192 .compatsize = sizeof(compat_int_t),
2193 .compat_from_user = compat_standard_from_user,
2194 .compat_to_user = compat_standard_to_user,
2195#endif
2196};
2197
2198static struct xt_target ip6t_error_target __read_mostly = {
2199 .name = IP6T_ERROR_TARGET,
2200 .target = ip6t_error,
2201 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2202 .family = AF_INET6,
2203};
2204
2205static struct nf_sockopt_ops ip6t_sockopts = {
2206 .pf = PF_INET6,
2207 .set_optmin = IP6T_BASE_CTL,
2208 .set_optmax = IP6T_SO_SET_MAX+1,
2209 .set = do_ip6t_set_ctl,
2210#ifdef CONFIG_COMPAT
2211 .compat_set = compat_do_ip6t_set_ctl,
2212#endif
2213 .get_optmin = IP6T_BASE_CTL,
2214 .get_optmax = IP6T_SO_GET_MAX+1,
2215 .get = do_ip6t_get_ctl,
2216#ifdef CONFIG_COMPAT
2217 .compat_get = compat_do_ip6t_get_ctl,
2218#endif
2219 .owner = THIS_MODULE,
2220};
2221
2222static struct xt_match icmp6_matchstruct __read_mostly = {
2223 .name = "icmp6",
2224 .match = icmp6_match,
2225 .matchsize = sizeof(struct ip6t_icmp),
2226 .checkentry = icmp6_checkentry,
2227 .proto = IPPROTO_ICMPV6,
2228 .family = AF_INET6,
2229};
2230
2231static int __net_init ip6_tables_net_init(struct net *net)
2232{
2233 return xt_proto_init(net, AF_INET6);
2234}
2235
2236static void __net_exit ip6_tables_net_exit(struct net *net)
2237{
2238 xt_proto_fini(net, AF_INET6);
2239}
2240
2241static struct pernet_operations ip6_tables_net_ops = {
2242 .init = ip6_tables_net_init,
2243 .exit = ip6_tables_net_exit,
2244};
2245
2246static int __init ip6_tables_init(void)
2247{
2248 int ret;
2249
2250 ret = register_pernet_subsys(&ip6_tables_net_ops);
2251 if (ret < 0)
2252 goto err1;
2253
2254 /* Noone else will be downing sem now, so we won't sleep */
2255 ret = xt_register_target(&ip6t_standard_target);
2256 if (ret < 0)
2257 goto err2;
2258 ret = xt_register_target(&ip6t_error_target);
2259 if (ret < 0)
2260 goto err3;
2261 ret = xt_register_match(&icmp6_matchstruct);
2262 if (ret < 0)
2263 goto err4;
2264
2265 /* Register setsockopt */
2266 ret = nf_register_sockopt(&ip6t_sockopts);
2267 if (ret < 0)
2268 goto err5;
2269
2270 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2271 return 0;
2272
2273err5:
2274 xt_unregister_match(&icmp6_matchstruct);
2275err4:
2276 xt_unregister_target(&ip6t_error_target);
2277err3:
2278 xt_unregister_target(&ip6t_standard_target);
2279err2:
2280 unregister_pernet_subsys(&ip6_tables_net_ops);
2281err1:
2282 return ret;
2283}
2284
2285static void __exit ip6_tables_fini(void)
2286{
2287 nf_unregister_sockopt(&ip6t_sockopts);
2288
2289 xt_unregister_match(&icmp6_matchstruct);
2290 xt_unregister_target(&ip6t_error_target);
2291 xt_unregister_target(&ip6t_standard_target);
2292
2293 unregister_pernet_subsys(&ip6_tables_net_ops);
2294}
2295
2296/*
2297 * find the offset to specified header or the protocol number of last header
2298 * if target < 0. "last header" is transport protocol header, ESP, or
2299 * "No next header".
2300 *
2301 * If target header is found, its offset is set in *offset and return protocol
2302 * number. Otherwise, return -1.
2303 *
2304 * If the first fragment doesn't contain the final protocol header or
2305 * NEXTHDR_NONE it is considered invalid.
2306 *
2307 * Note that non-1st fragment is special case that "the protocol number
2308 * of last header" is "next header" field in Fragment header. In this case,
2309 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2310 * isn't NULL.
2311 *
2312 */
2313int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2314 int target, unsigned short *fragoff)
2315{
2316 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2317 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2318 unsigned int len = skb->len - start;
2319
2320 if (fragoff)
2321 *fragoff = 0;
2322
2323 while (nexthdr != target) {
2324 struct ipv6_opt_hdr _hdr, *hp;
2325 unsigned int hdrlen;
2326
2327 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2328 if (target < 0)
2329 break;
2330 return -ENOENT;
2331 }
2332
2333 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2334 if (hp == NULL)
2335 return -EBADMSG;
2336 if (nexthdr == NEXTHDR_FRAGMENT) {
2337 unsigned short _frag_off;
2338 __be16 *fp;
2339 fp = skb_header_pointer(skb,
2340 start+offsetof(struct frag_hdr,
2341 frag_off),
2342 sizeof(_frag_off),
2343 &_frag_off);
2344 if (fp == NULL)
2345 return -EBADMSG;
2346
2347 _frag_off = ntohs(*fp) & ~0x7;
2348 if (_frag_off) {
2349 if (target < 0 &&
2350 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2351 hp->nexthdr == NEXTHDR_NONE)) {
2352 if (fragoff)
2353 *fragoff = _frag_off;
2354 return hp->nexthdr;
2355 }
2356 return -ENOENT;
2357 }
2358 hdrlen = 8;
2359 } else if (nexthdr == NEXTHDR_AUTH)
2360 hdrlen = (hp->hdrlen + 2) << 2;
2361 else
2362 hdrlen = ipv6_optlen(hp);
2363
2364 nexthdr = hp->nexthdr;
2365 len -= hdrlen;
2366 start += hdrlen;
2367 }
2368
2369 *offset = start;
2370 return nexthdr;
2371}
2372
2373EXPORT_SYMBOL(ip6t_register_table);
2374EXPORT_SYMBOL(ip6t_unregister_table);
2375EXPORT_SYMBOL(ip6t_do_table);
2376EXPORT_SYMBOL(ip6t_ext_hdr);
2377EXPORT_SYMBOL(ipv6_find_hdr);
2378
2379module_init(ip6_tables_init);
2380module_exit(ip6_tables_fini);
This page took 0.039826 seconds and 5 git commands to generate.