netfilter: x_table: speedup compat operations
[deliverable/linux.git] / net / ipv4 / netfilter / ip_tables.c
CommitLineData
1da177e4
LT
1/*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
2e4e6a17 5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
1da177e4
LT
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
1da177e4 10 */
90e7d4ab 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1da177e4 12#include <linux/cache.h>
4fc268d2 13#include <linux/capability.h>
1da177e4
LT
14#include <linux/skbuff.h>
15#include <linux/kmod.h>
16#include <linux/vmalloc.h>
17#include <linux/netdevice.h>
18#include <linux/module.h>
1da177e4
LT
19#include <linux/icmp.h>
20#include <net/ip.h>
2722971c 21#include <net/compat.h>
1da177e4 22#include <asm/uaccess.h>
57b47a53 23#include <linux/mutex.h>
1da177e4
LT
24#include <linux/proc_fs.h>
25#include <linux/err.h>
c8923c6b 26#include <linux/cpumask.h>
1da177e4 27
2e4e6a17 28#include <linux/netfilter/x_tables.h>
1da177e4 29#include <linux/netfilter_ipv4/ip_tables.h>
f01ffbd6 30#include <net/netfilter/nf_log.h>
e3eaa991 31#include "../../netfilter/xt_repldata.h"
1da177e4
LT
32
33MODULE_LICENSE("GPL");
34MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35MODULE_DESCRIPTION("IPv4 packet filter");
36
37/*#define DEBUG_IP_FIREWALL*/
38/*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39/*#define DEBUG_IP_FIREWALL_USER*/
40
41#ifdef DEBUG_IP_FIREWALL
ff67e4e4 42#define dprintf(format, args...) pr_info(format , ## args)
1da177e4
LT
43#else
44#define dprintf(format, args...)
45#endif
46
47#ifdef DEBUG_IP_FIREWALL_USER
ff67e4e4 48#define duprintf(format, args...) pr_info(format , ## args)
1da177e4
LT
49#else
50#define duprintf(format, args...)
51#endif
52
53#ifdef CONFIG_NETFILTER_DEBUG
af567603 54#define IP_NF_ASSERT(x) WARN_ON(!(x))
1da177e4
LT
55#else
56#define IP_NF_ASSERT(x)
57#endif
1da177e4
LT
58
59#if 0
60/* All the better to debug you with... */
61#define static
62#define inline
63#endif
64
e3eaa991
JE
65void *ipt_alloc_initial_table(const struct xt_table *info)
66{
67 return xt_alloc_initial_table(ipt, IPT);
68}
69EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
70
1da177e4
LT
71/*
72 We keep a set of rules for each CPU, so we can avoid write-locking
73 them in the softirq when updating the counters and therefore
74 only need to read-lock in the softirq; doing a write_lock_bh() in user
75 context stops packets coming through and allows user context to read
76 the counters or update the rules.
77
1da177e4
LT
78 Hence the start of any table is given by get_table() below. */
79
1da177e4 80/* Returns whether matches rule or not. */
022748a9 81/* Performance critical - called for every packet */
9c547959 82static inline bool
1da177e4
LT
83ip_packet_match(const struct iphdr *ip,
84 const char *indev,
85 const char *outdev,
86 const struct ipt_ip *ipinfo,
87 int isfrag)
88{
1da177e4
LT
89 unsigned long ret;
90
e79ec50b 91#define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
1da177e4
LT
92
93 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
3666ed1c
JP
94 IPT_INV_SRCIP) ||
95 FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
96 IPT_INV_DSTIP)) {
1da177e4
LT
97 dprintf("Source or dest mismatch.\n");
98
cffee385
HH
99 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
100 &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr,
1da177e4 101 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
cffee385
HH
102 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
103 &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr,
1da177e4 104 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
9c547959 105 return false;
1da177e4
LT
106 }
107
b8dfe498 108 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
1da177e4
LT
109
110 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
111 dprintf("VIA in mismatch (%s vs %s).%s\n",
112 indev, ipinfo->iniface,
113 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
9c547959 114 return false;
1da177e4
LT
115 }
116
b8dfe498 117 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
1da177e4
LT
118
119 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
120 dprintf("VIA out mismatch (%s vs %s).%s\n",
121 outdev, ipinfo->outiface,
122 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
9c547959 123 return false;
1da177e4
LT
124 }
125
126 /* Check specific protocol */
3666ed1c
JP
127 if (ipinfo->proto &&
128 FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
1da177e4
LT
129 dprintf("Packet protocol %hi does not match %hi.%s\n",
130 ip->protocol, ipinfo->proto,
131 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
9c547959 132 return false;
1da177e4
LT
133 }
134
135 /* If we have a fragment rule but the packet is not a fragment
136 * then we return zero */
137 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
138 dprintf("Fragment rule but not fragment.%s\n",
139 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
9c547959 140 return false;
1da177e4
LT
141 }
142
9c547959 143 return true;
1da177e4
LT
144}
145
022748a9 146static bool
1da177e4
LT
147ip_checkentry(const struct ipt_ip *ip)
148{
149 if (ip->flags & ~IPT_F_MASK) {
150 duprintf("Unknown flag bits set: %08X\n",
151 ip->flags & ~IPT_F_MASK);
ccb79bdc 152 return false;
1da177e4
LT
153 }
154 if (ip->invflags & ~IPT_INV_MASK) {
155 duprintf("Unknown invflag bits set: %08X\n",
156 ip->invflags & ~IPT_INV_MASK);
ccb79bdc 157 return false;
1da177e4 158 }
ccb79bdc 159 return true;
1da177e4
LT
160}
161
162static unsigned int
4b560b44 163ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
1da177e4
LT
164{
165 if (net_ratelimit())
ff67e4e4 166 pr_info("error: `%s'\n", (const char *)par->targinfo);
1da177e4
LT
167
168 return NF_DROP;
169}
170
022748a9 171/* Performance critical */
1da177e4 172static inline struct ipt_entry *
d5d1baa1 173get_entry(const void *base, unsigned int offset)
1da177e4
LT
174{
175 return (struct ipt_entry *)(base + offset);
176}
177
ba9dda3a 178/* All zeroes == unconditional rule. */
022748a9 179/* Mildly perf critical (only if packet tracing is on) */
47901dc2 180static inline bool unconditional(const struct ipt_ip *ip)
ba9dda3a 181{
47901dc2 182 static const struct ipt_ip uncond;
ba9dda3a 183
47901dc2 184 return memcmp(ip, &uncond, sizeof(uncond)) == 0;
e79ec50b 185#undef FWINV
ba9dda3a
JK
186}
187
d5d1baa1 188/* for const-correctness */
87a2e70d 189static inline const struct xt_entry_target *
d5d1baa1
JE
190ipt_get_target_c(const struct ipt_entry *e)
191{
192 return ipt_get_target((struct ipt_entry *)e);
193}
194
ba9dda3a
JK
195#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
196 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
022748a9 197static const char *const hooknames[] = {
6e23ae2a
PM
198 [NF_INET_PRE_ROUTING] = "PREROUTING",
199 [NF_INET_LOCAL_IN] = "INPUT",
9c547959 200 [NF_INET_FORWARD] = "FORWARD",
6e23ae2a
PM
201 [NF_INET_LOCAL_OUT] = "OUTPUT",
202 [NF_INET_POST_ROUTING] = "POSTROUTING",
ba9dda3a
JK
203};
204
205enum nf_ip_trace_comments {
206 NF_IP_TRACE_COMMENT_RULE,
207 NF_IP_TRACE_COMMENT_RETURN,
208 NF_IP_TRACE_COMMENT_POLICY,
209};
210
022748a9 211static const char *const comments[] = {
ba9dda3a
JK
212 [NF_IP_TRACE_COMMENT_RULE] = "rule",
213 [NF_IP_TRACE_COMMENT_RETURN] = "return",
214 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
215};
216
217static struct nf_loginfo trace_loginfo = {
218 .type = NF_LOG_TYPE_LOG,
219 .u = {
220 .log = {
221 .level = 4,
222 .logflags = NF_LOG_MASK,
223 },
224 },
225};
226
022748a9 227/* Mildly perf critical (only if packet tracing is on) */
ba9dda3a 228static inline int
d5d1baa1 229get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
4f2f6f23
JE
230 const char *hookname, const char **chainname,
231 const char **comment, unsigned int *rulenum)
ba9dda3a 232{
87a2e70d 233 const struct xt_standard_target *t = (void *)ipt_get_target_c(s);
ba9dda3a 234
243bf6e2 235 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
ba9dda3a
JK
236 /* Head of user chain: ERROR target with chainname */
237 *chainname = t->target.data;
238 (*rulenum) = 0;
239 } else if (s == e) {
240 (*rulenum)++;
241
3666ed1c
JP
242 if (s->target_offset == sizeof(struct ipt_entry) &&
243 strcmp(t->target.u.kernel.target->name,
243bf6e2 244 XT_STANDARD_TARGET) == 0 &&
3666ed1c
JP
245 t->verdict < 0 &&
246 unconditional(&s->ip)) {
ba9dda3a
JK
247 /* Tail of chains: STANDARD target (return/policy) */
248 *comment = *chainname == hookname
4f2f6f23
JE
249 ? comments[NF_IP_TRACE_COMMENT_POLICY]
250 : comments[NF_IP_TRACE_COMMENT_RETURN];
ba9dda3a
JK
251 }
252 return 1;
253 } else
254 (*rulenum)++;
255
256 return 0;
257}
258
d5d1baa1 259static void trace_packet(const struct sk_buff *skb,
ba9dda3a
JK
260 unsigned int hook,
261 const struct net_device *in,
262 const struct net_device *out,
ecb6f85e 263 const char *tablename,
d5d1baa1
JE
264 const struct xt_table_info *private,
265 const struct ipt_entry *e)
ba9dda3a 266{
d5d1baa1 267 const void *table_base;
5452e425 268 const struct ipt_entry *root;
4f2f6f23 269 const char *hookname, *chainname, *comment;
72b2b1dd 270 const struct ipt_entry *iter;
ba9dda3a
JK
271 unsigned int rulenum = 0;
272
ccf5bd8c 273 table_base = private->entries[smp_processor_id()];
ba9dda3a
JK
274 root = get_entry(table_base, private->hook_entry[hook]);
275
4f2f6f23
JE
276 hookname = chainname = hooknames[hook];
277 comment = comments[NF_IP_TRACE_COMMENT_RULE];
ba9dda3a 278
72b2b1dd
JE
279 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
280 if (get_chainname_rulenum(iter, e, hookname,
281 &chainname, &comment, &rulenum) != 0)
282 break;
ba9dda3a
JK
283
284 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
285 "TRACE: %s:%s:%s:%u ",
286 tablename, chainname, comment, rulenum);
287}
288#endif
289
98e86403
JE
290static inline __pure
291struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
292{
293 return (void *)entry + entry->next_offset;
294}
295
1da177e4
LT
296/* Returns one of the generic firewall policies, like NF_ACCEPT. */
297unsigned int
3db05fea 298ipt_do_table(struct sk_buff *skb,
1da177e4
LT
299 unsigned int hook,
300 const struct net_device *in,
301 const struct net_device *out,
e60a13e0 302 struct xt_table *table)
1da177e4
LT
303{
304 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
5452e425 305 const struct iphdr *ip;
1da177e4
LT
306 /* Initializing verdict to NF_DROP keeps gcc happy. */
307 unsigned int verdict = NF_DROP;
308 const char *indev, *outdev;
d5d1baa1 309 const void *table_base;
f3c5c1bf
JE
310 struct ipt_entry *e, **jumpstack;
311 unsigned int *stackptr, origptr, cpu;
d5d1baa1 312 const struct xt_table_info *private;
de74c169 313 struct xt_action_param acpar;
1da177e4
LT
314
315 /* Initialization */
3db05fea 316 ip = ip_hdr(skb);
1da177e4
LT
317 indev = in ? in->name : nulldevname;
318 outdev = out ? out->name : nulldevname;
319 /* We handle fragments by dealing with the first fragment as
320 * if it was a normal packet. All other fragments are treated
321 * normally, except that they will NEVER match rules that ask
322 * things we don't know, ie. tcp syn flag or ports). If the
323 * rule is also a fragment-specific rule, non-fragments won't
324 * match it. */
de74c169
JE
325 acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
326 acpar.thoff = ip_hdrlen(skb);
b4ba2611 327 acpar.hotdrop = false;
de74c169
JE
328 acpar.in = in;
329 acpar.out = out;
330 acpar.family = NFPROTO_IPV4;
331 acpar.hooknum = hook;
1da177e4 332
1da177e4 333 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
942e4a2b
SH
334 xt_info_rdlock_bh();
335 private = table->private;
f3c5c1bf
JE
336 cpu = smp_processor_id();
337 table_base = private->entries[cpu];
338 jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
7489aec8 339 stackptr = per_cpu_ptr(private->stackptr, cpu);
f3c5c1bf 340 origptr = *stackptr;
78454473 341
2e4e6a17 342 e = get_entry(table_base, private->hook_entry[hook]);
1da177e4 343
cecc74de 344 pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
f3c5c1bf
JE
345 table->name, hook, origptr,
346 get_entry(table_base, private->underflow[hook]));
1da177e4
LT
347
348 do {
87a2e70d 349 const struct xt_entry_target *t;
dcea992a 350 const struct xt_entry_match *ematch;
a1ff4ac8 351
1da177e4 352 IP_NF_ASSERT(e);
a1ff4ac8 353 if (!ip_packet_match(ip, indev, outdev,
de74c169 354 &e->ip, acpar.fragoff)) {
dcea992a 355 no_match:
a1ff4ac8
JE
356 e = ipt_next_entry(e);
357 continue;
358 }
1da177e4 359
ef53d702 360 xt_ematch_foreach(ematch, e) {
de74c169
JE
361 acpar.match = ematch->u.kernel.match;
362 acpar.matchinfo = ematch->data;
363 if (!acpar.match->match(skb, &acpar))
dcea992a 364 goto no_match;
ef53d702 365 }
dcea992a 366
7df0884c 367 ADD_COUNTER(e->counters, skb->len, 1);
1da177e4 368
a1ff4ac8
JE
369 t = ipt_get_target(e);
370 IP_NF_ASSERT(t->u.kernel.target);
ba9dda3a
JK
371
372#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
373 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
a1ff4ac8
JE
374 /* The packet is traced: log it */
375 if (unlikely(skb->nf_trace))
376 trace_packet(skb, hook, in, out,
377 table->name, private, e);
ba9dda3a 378#endif
a1ff4ac8
JE
379 /* Standard target? */
380 if (!t->u.kernel.target->target) {
381 int v;
382
87a2e70d 383 v = ((struct xt_standard_target *)t)->verdict;
a1ff4ac8
JE
384 if (v < 0) {
385 /* Pop from stack? */
243bf6e2 386 if (v != XT_RETURN) {
a1ff4ac8
JE
387 verdict = (unsigned)(-v) - 1;
388 break;
1da177e4 389 }
f3c5c1bf
JE
390 if (*stackptr == 0) {
391 e = get_entry(table_base,
392 private->underflow[hook]);
cecc74de 393 pr_debug("Underflow (this is normal) "
f3c5c1bf
JE
394 "to %p\n", e);
395 } else {
396 e = jumpstack[--*stackptr];
cecc74de 397 pr_debug("Pulled %p out from pos %u\n",
f3c5c1bf
JE
398 e, *stackptr);
399 e = ipt_next_entry(e);
400 }
a1ff4ac8
JE
401 continue;
402 }
3666ed1c
JP
403 if (table_base + v != ipt_next_entry(e) &&
404 !(e->ip.flags & IPT_F_GOTO)) {
f3c5c1bf
JE
405 if (*stackptr >= private->stacksize) {
406 verdict = NF_DROP;
407 break;
408 }
409 jumpstack[(*stackptr)++] = e;
cecc74de 410 pr_debug("Pushed %p into pos %u\n",
f3c5c1bf 411 e, *stackptr - 1);
a1ff4ac8 412 }
1da177e4 413
a1ff4ac8 414 e = get_entry(table_base, v);
7a6b1c46
JE
415 continue;
416 }
417
de74c169
JE
418 acpar.target = t->u.kernel.target;
419 acpar.targinfo = t->data;
bb70dfa5 420
de74c169 421 verdict = t->u.kernel.target->target(skb, &acpar);
7a6b1c46
JE
422 /* Target might have changed stuff. */
423 ip = ip_hdr(skb);
243bf6e2 424 if (verdict == XT_CONTINUE)
7a6b1c46
JE
425 e = ipt_next_entry(e);
426 else
427 /* Verdict */
428 break;
b4ba2611 429 } while (!acpar.hotdrop);
942e4a2b 430 xt_info_rdunlock_bh();
cecc74de 431 pr_debug("Exiting %s; resetting sp from %u to %u\n",
f3c5c1bf
JE
432 __func__, *stackptr, origptr);
433 *stackptr = origptr;
1da177e4
LT
434#ifdef DEBUG_ALLOW_ALL
435 return NF_ACCEPT;
436#else
b4ba2611 437 if (acpar.hotdrop)
1da177e4
LT
438 return NF_DROP;
439 else return verdict;
440#endif
441}
442
1da177e4
LT
443/* Figures out from what hook each rule can be called: returns 0 if
444 there are loops. Puts hook bitmask in comefrom. */
445static int
d5d1baa1 446mark_source_chains(const struct xt_table_info *newinfo,
31836064 447 unsigned int valid_hooks, void *entry0)
1da177e4
LT
448{
449 unsigned int hook;
450
451 /* No recursion; use packet counter to save back ptrs (reset
452 to 0 as we leave), and comefrom to save source hook bitmask */
6e23ae2a 453 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
1da177e4 454 unsigned int pos = newinfo->hook_entry[hook];
9c547959 455 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
1da177e4
LT
456
457 if (!(valid_hooks & (1 << hook)))
458 continue;
459
460 /* Set initial back pointer. */
461 e->counters.pcnt = pos;
462
463 for (;;) {
87a2e70d 464 const struct xt_standard_target *t
d5d1baa1 465 = (void *)ipt_get_target_c(e);
e1b4b9f3 466 int visited = e->comefrom & (1 << hook);
1da177e4 467
6e23ae2a 468 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
654d0fbd 469 pr_err("iptables: loop hook %u pos %u %08X.\n",
1da177e4
LT
470 hook, pos, e->comefrom);
471 return 0;
472 }
9c547959 473 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
1da177e4
LT
474
475 /* Unconditional return/END. */
3666ed1c
JP
476 if ((e->target_offset == sizeof(struct ipt_entry) &&
477 (strcmp(t->target.u.user.name,
243bf6e2 478 XT_STANDARD_TARGET) == 0) &&
3666ed1c
JP
479 t->verdict < 0 && unconditional(&e->ip)) ||
480 visited) {
1da177e4
LT
481 unsigned int oldpos, size;
482
1f9352ae 483 if ((strcmp(t->target.u.user.name,
243bf6e2 484 XT_STANDARD_TARGET) == 0) &&
1f9352ae 485 t->verdict < -NF_MAX_VERDICT - 1) {
74c9c0c1
DM
486 duprintf("mark_source_chains: bad "
487 "negative verdict (%i)\n",
488 t->verdict);
489 return 0;
490 }
491
1da177e4
LT
492 /* Return: backtrack through the last
493 big jump. */
494 do {
6e23ae2a 495 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
1da177e4
LT
496#ifdef DEBUG_IP_FIREWALL_USER
497 if (e->comefrom
6e23ae2a 498 & (1 << NF_INET_NUMHOOKS)) {
1da177e4
LT
499 duprintf("Back unset "
500 "on hook %u "
501 "rule %u\n",
502 hook, pos);
503 }
504#endif
505 oldpos = pos;
506 pos = e->counters.pcnt;
507 e->counters.pcnt = 0;
508
509 /* We're at the start. */
510 if (pos == oldpos)
511 goto next;
512
513 e = (struct ipt_entry *)
31836064 514 (entry0 + pos);
1da177e4
LT
515 } while (oldpos == pos + e->next_offset);
516
517 /* Move along one */
518 size = e->next_offset;
519 e = (struct ipt_entry *)
31836064 520 (entry0 + pos + size);
1da177e4
LT
521 e->counters.pcnt = pos;
522 pos += size;
523 } else {
524 int newpos = t->verdict;
525
526 if (strcmp(t->target.u.user.name,
243bf6e2 527 XT_STANDARD_TARGET) == 0 &&
3666ed1c 528 newpos >= 0) {
74c9c0c1
DM
529 if (newpos > newinfo->size -
530 sizeof(struct ipt_entry)) {
531 duprintf("mark_source_chains: "
532 "bad verdict (%i)\n",
533 newpos);
534 return 0;
535 }
1da177e4
LT
536 /* This a jump; chase it. */
537 duprintf("Jump rule %u -> %u\n",
538 pos, newpos);
539 } else {
540 /* ... this is a fallthru */
541 newpos = pos + e->next_offset;
542 }
543 e = (struct ipt_entry *)
31836064 544 (entry0 + newpos);
1da177e4
LT
545 e->counters.pcnt = pos;
546 pos = newpos;
547 }
548 }
549 next:
550 duprintf("Finished chain %u\n", hook);
551 }
552 return 1;
553}
554
87a2e70d 555static void cleanup_match(struct xt_entry_match *m, struct net *net)
1da177e4 556{
6be3d859
JE
557 struct xt_mtdtor_param par;
558
f54e9367 559 par.net = net;
6be3d859
JE
560 par.match = m->u.kernel.match;
561 par.matchinfo = m->data;
916a917d 562 par.family = NFPROTO_IPV4;
6be3d859
JE
563 if (par.match->destroy != NULL)
564 par.match->destroy(&par);
565 module_put(par.match->me);
1da177e4
LT
566}
567
022748a9 568static int
d5d1baa1 569check_entry(const struct ipt_entry *e, const char *name)
a96be246 570{
87a2e70d 571 const struct xt_entry_target *t;
a96be246
DM
572
573 if (!ip_checkentry(&e->ip)) {
b5cad0df 574 duprintf("ip check failed %p %s.\n", e, par->match->name);
a96be246
DM
575 return -EINVAL;
576 }
577
87a2e70d 578 if (e->target_offset + sizeof(struct xt_entry_target) >
9c547959 579 e->next_offset)
a96be246
DM
580 return -EINVAL;
581
d5d1baa1 582 t = ipt_get_target_c(e);
a96be246
DM
583 if (e->target_offset + t->u.target_size > e->next_offset)
584 return -EINVAL;
585
586 return 0;
587}
588
022748a9 589static int
87a2e70d 590check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
a96be246 591{
9b4fce7a 592 const struct ipt_ip *ip = par->entryinfo;
a96be246
DM
593 int ret;
594
9b4fce7a
JE
595 par->match = m->u.kernel.match;
596 par->matchinfo = m->data;
597
916a917d 598 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
9b4fce7a 599 ip->proto, ip->invflags & IPT_INV_PROTO);
367c6790 600 if (ret < 0) {
b5cad0df 601 duprintf("check failed for `%s'.\n", par->match->name);
367c6790 602 return ret;
a96be246 603 }
367c6790 604 return 0;
a96be246
DM
605}
606
022748a9 607static int
87a2e70d 608find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
1da177e4 609{
6709dbbb 610 struct xt_match *match;
3cdc7c95 611 int ret;
1da177e4 612
fd0ec0e6
JE
613 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
614 m->u.user.revision);
615 if (IS_ERR(match)) {
a96be246 616 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
fd0ec0e6 617 return PTR_ERR(match);
1da177e4
LT
618 }
619 m->u.kernel.match = match;
620
6bdb331b 621 ret = check_match(m, par);
3cdc7c95
PM
622 if (ret)
623 goto err;
624
1da177e4 625 return 0;
3cdc7c95
PM
626err:
627 module_put(m->u.kernel.match->me);
628 return ret;
1da177e4
LT
629}
630
add67461 631static int check_target(struct ipt_entry *e, struct net *net, const char *name)
a96be246 632{
87a2e70d 633 struct xt_entry_target *t = ipt_get_target(e);
af5d6dc2 634 struct xt_tgchk_param par = {
add67461 635 .net = net,
af5d6dc2
JE
636 .table = name,
637 .entryinfo = e,
638 .target = t->u.kernel.target,
639 .targinfo = t->data,
640 .hook_mask = e->comefrom,
916a917d 641 .family = NFPROTO_IPV4,
af5d6dc2 642 };
e905a9ed 643 int ret;
a96be246 644
916a917d 645 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
af5d6dc2 646 e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
367c6790 647 if (ret < 0) {
ff67e4e4 648 duprintf("check failed for `%s'.\n",
a96be246 649 t->u.kernel.target->name);
367c6790 650 return ret;
a96be246 651 }
367c6790 652 return 0;
a96be246 653}
1da177e4 654
022748a9 655static int
a83d8e8d 656find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
0559518b 657 unsigned int size)
1da177e4 658{
87a2e70d 659 struct xt_entry_target *t;
6709dbbb 660 struct xt_target *target;
1da177e4
LT
661 int ret;
662 unsigned int j;
9b4fce7a 663 struct xt_mtchk_param mtpar;
dcea992a 664 struct xt_entry_match *ematch;
1da177e4 665
a96be246
DM
666 ret = check_entry(e, name);
667 if (ret)
668 return ret;
590bdf7f 669
1da177e4 670 j = 0;
a83d8e8d 671 mtpar.net = net;
9b4fce7a
JE
672 mtpar.table = name;
673 mtpar.entryinfo = &e->ip;
674 mtpar.hook_mask = e->comefrom;
916a917d 675 mtpar.family = NFPROTO_IPV4;
dcea992a 676 xt_ematch_foreach(ematch, e) {
6bdb331b 677 ret = find_check_match(ematch, &mtpar);
dcea992a 678 if (ret != 0)
6bdb331b
JE
679 goto cleanup_matches;
680 ++j;
dcea992a 681 }
1da177e4
LT
682
683 t = ipt_get_target(e);
d2a7b6ba
JE
684 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
685 t->u.user.revision);
686 if (IS_ERR(target)) {
a96be246 687 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
d2a7b6ba 688 ret = PTR_ERR(target);
1da177e4
LT
689 goto cleanup_matches;
690 }
691 t->u.kernel.target = target;
692
add67461 693 ret = check_target(e, net, name);
3cdc7c95
PM
694 if (ret)
695 goto err;
1da177e4 696 return 0;
3cdc7c95
PM
697 err:
698 module_put(t->u.kernel.target->me);
1da177e4 699 cleanup_matches:
6bdb331b
JE
700 xt_ematch_foreach(ematch, e) {
701 if (j-- == 0)
dcea992a 702 break;
6bdb331b
JE
703 cleanup_match(ematch, net);
704 }
1da177e4
LT
705 return ret;
706}
707
d5d1baa1 708static bool check_underflow(const struct ipt_entry *e)
e2fe35c1 709{
87a2e70d 710 const struct xt_entry_target *t;
e2fe35c1
JE
711 unsigned int verdict;
712
713 if (!unconditional(&e->ip))
714 return false;
d5d1baa1 715 t = ipt_get_target_c(e);
e2fe35c1
JE
716 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
717 return false;
87a2e70d 718 verdict = ((struct xt_standard_target *)t)->verdict;
e2fe35c1
JE
719 verdict = -verdict - 1;
720 return verdict == NF_DROP || verdict == NF_ACCEPT;
721}
722
022748a9 723static int
1da177e4 724check_entry_size_and_hooks(struct ipt_entry *e,
2e4e6a17 725 struct xt_table_info *newinfo,
d5d1baa1
JE
726 const unsigned char *base,
727 const unsigned char *limit,
1da177e4
LT
728 const unsigned int *hook_entries,
729 const unsigned int *underflows,
0559518b 730 unsigned int valid_hooks)
1da177e4
LT
731{
732 unsigned int h;
733
3666ed1c
JP
734 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
735 (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
1da177e4
LT
736 duprintf("Bad offset %p\n", e);
737 return -EINVAL;
738 }
739
740 if (e->next_offset
87a2e70d 741 < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) {
1da177e4
LT
742 duprintf("checking: element %p size %u\n",
743 e, e->next_offset);
744 return -EINVAL;
745 }
746
747 /* Check hooks & underflows */
6e23ae2a 748 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
a7d51738
JE
749 if (!(valid_hooks & (1 << h)))
750 continue;
1da177e4
LT
751 if ((unsigned char *)e - base == hook_entries[h])
752 newinfo->hook_entry[h] = hook_entries[h];
90e7d4ab 753 if ((unsigned char *)e - base == underflows[h]) {
e2fe35c1
JE
754 if (!check_underflow(e)) {
755 pr_err("Underflows must be unconditional and "
756 "use the STANDARD target with "
757 "ACCEPT/DROP\n");
90e7d4ab
JE
758 return -EINVAL;
759 }
1da177e4 760 newinfo->underflow[h] = underflows[h];
90e7d4ab 761 }
1da177e4
LT
762 }
763
1da177e4 764 /* Clear counters and comefrom */
2e4e6a17 765 e->counters = ((struct xt_counters) { 0, 0 });
1da177e4 766 e->comefrom = 0;
1da177e4
LT
767 return 0;
768}
769
0559518b
JE
770static void
771cleanup_entry(struct ipt_entry *e, struct net *net)
1da177e4 772{
a2df1648 773 struct xt_tgdtor_param par;
87a2e70d 774 struct xt_entry_target *t;
dcea992a 775 struct xt_entry_match *ematch;
1da177e4 776
1da177e4 777 /* Cleanup all matches */
dcea992a 778 xt_ematch_foreach(ematch, e)
6bdb331b 779 cleanup_match(ematch, net);
1da177e4 780 t = ipt_get_target(e);
a2df1648 781
add67461 782 par.net = net;
a2df1648
JE
783 par.target = t->u.kernel.target;
784 par.targinfo = t->data;
916a917d 785 par.family = NFPROTO_IPV4;
a2df1648
JE
786 if (par.target->destroy != NULL)
787 par.target->destroy(&par);
788 module_put(par.target->me);
1da177e4
LT
789}
790
791/* Checks and translates the user-supplied table segment (held in
792 newinfo) */
793static int
0f234214
JE
794translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
795 const struct ipt_replace *repl)
1da177e4 796{
72b2b1dd 797 struct ipt_entry *iter;
1da177e4 798 unsigned int i;
72b2b1dd 799 int ret = 0;
1da177e4 800
0f234214
JE
801 newinfo->size = repl->size;
802 newinfo->number = repl->num_entries;
1da177e4
LT
803
804 /* Init all hooks to impossible value. */
6e23ae2a 805 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1da177e4
LT
806 newinfo->hook_entry[i] = 0xFFFFFFFF;
807 newinfo->underflow[i] = 0xFFFFFFFF;
808 }
809
810 duprintf("translate_table: size %u\n", newinfo->size);
811 i = 0;
812 /* Walk through entries, checking offsets. */
72b2b1dd
JE
813 xt_entry_foreach(iter, entry0, newinfo->size) {
814 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
6b4ff2d7
JE
815 entry0 + repl->size,
816 repl->hook_entry,
817 repl->underflow,
818 repl->valid_hooks);
72b2b1dd 819 if (ret != 0)
0559518b
JE
820 return ret;
821 ++i;
f3c5c1bf
JE
822 if (strcmp(ipt_get_target(iter)->u.user.name,
823 XT_ERROR_TARGET) == 0)
824 ++newinfo->stacksize;
72b2b1dd 825 }
1da177e4 826
0f234214 827 if (i != repl->num_entries) {
1da177e4 828 duprintf("translate_table: %u not %u entries\n",
0f234214 829 i, repl->num_entries);
1da177e4
LT
830 return -EINVAL;
831 }
832
833 /* Check hooks all assigned */
6e23ae2a 834 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1da177e4 835 /* Only hooks which are valid */
0f234214 836 if (!(repl->valid_hooks & (1 << i)))
1da177e4
LT
837 continue;
838 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
839 duprintf("Invalid hook entry %u %u\n",
0f234214 840 i, repl->hook_entry[i]);
1da177e4
LT
841 return -EINVAL;
842 }
843 if (newinfo->underflow[i] == 0xFFFFFFFF) {
844 duprintf("Invalid underflow %u %u\n",
0f234214 845 i, repl->underflow[i]);
1da177e4
LT
846 return -EINVAL;
847 }
848 }
849
0f234214 850 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
74c9c0c1
DM
851 return -ELOOP;
852
1da177e4
LT
853 /* Finally, each sanity check must pass */
854 i = 0;
72b2b1dd 855 xt_entry_foreach(iter, entry0, newinfo->size) {
0f234214 856 ret = find_check_entry(iter, net, repl->name, repl->size);
72b2b1dd
JE
857 if (ret != 0)
858 break;
0559518b 859 ++i;
72b2b1dd 860 }
1da177e4 861
74c9c0c1 862 if (ret != 0) {
0559518b
JE
863 xt_entry_foreach(iter, entry0, newinfo->size) {
864 if (i-- == 0)
72b2b1dd 865 break;
0559518b
JE
866 cleanup_entry(iter, net);
867 }
74c9c0c1
DM
868 return ret;
869 }
1da177e4
LT
870
871 /* And one copy for every other CPU */
6f912042 872 for_each_possible_cpu(i) {
31836064
ED
873 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
874 memcpy(newinfo->entries[i], entry0, newinfo->size);
1da177e4
LT
875 }
876
877 return ret;
878}
879
1da177e4 880static void
2e4e6a17
HW
881get_counters(const struct xt_table_info *t,
882 struct xt_counters counters[])
1da177e4 883{
72b2b1dd 884 struct ipt_entry *iter;
1da177e4
LT
885 unsigned int cpu;
886 unsigned int i;
24b36f01 887 unsigned int curcpu = get_cpu();
31836064
ED
888
889 /* Instead of clearing (by a previous call to memset())
890 * the counters and using adds, we set the counters
942e4a2b
SH
891 * with data used by 'current' CPU.
892 *
893 * Bottom half has to be disabled to prevent deadlock
894 * if new softirq were to run and call ipt_do_table
31836064 895 */
942e4a2b 896 local_bh_disable();
31836064 897 i = 0;
0559518b
JE
898 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
899 SET_COUNTER(counters[i], iter->counters.bcnt,
6b4ff2d7 900 iter->counters.pcnt);
0559518b
JE
901 ++i;
902 }
24b36f01
ED
903 local_bh_enable();
904 /* Processing counters from other cpus, we can let bottom half enabled,
905 * (preemption is disabled)
906 */
1da177e4 907
6f912042 908 for_each_possible_cpu(cpu) {
31836064
ED
909 if (cpu == curcpu)
910 continue;
1da177e4 911 i = 0;
001389b9 912 local_bh_disable();
942e4a2b 913 xt_info_wrlock(cpu);
0559518b
JE
914 xt_entry_foreach(iter, t->entries[cpu], t->size) {
915 ADD_COUNTER(counters[i], iter->counters.bcnt,
6b4ff2d7 916 iter->counters.pcnt);
0559518b
JE
917 ++i; /* macro does multi eval of i */
918 }
942e4a2b 919 xt_info_wrunlock(cpu);
001389b9 920 local_bh_enable();
1da177e4 921 }
24b36f01 922 put_cpu();
78454473
SH
923}
924
d5d1baa1 925static struct xt_counters *alloc_counters(const struct xt_table *table)
1da177e4 926{
2722971c 927 unsigned int countersize;
2e4e6a17 928 struct xt_counters *counters;
d5d1baa1 929 const struct xt_table_info *private = table->private;
1da177e4
LT
930
931 /* We need atomic snapshot of counters: rest doesn't change
932 (other than comefrom, which userspace doesn't care
933 about). */
2e4e6a17 934 countersize = sizeof(struct xt_counters) * private->number;
e12f8e29 935 counters = vmalloc(countersize);
1da177e4
LT
936
937 if (counters == NULL)
942e4a2b 938 return ERR_PTR(-ENOMEM);
78454473 939
942e4a2b 940 get_counters(private, counters);
1da177e4 941
2722971c
DM
942 return counters;
943}
944
945static int
946copy_entries_to_user(unsigned int total_size,
d5d1baa1 947 const struct xt_table *table,
2722971c
DM
948 void __user *userptr)
949{
950 unsigned int off, num;
d5d1baa1 951 const struct ipt_entry *e;
2722971c 952 struct xt_counters *counters;
5452e425 953 const struct xt_table_info *private = table->private;
2722971c 954 int ret = 0;
5452e425 955 const void *loc_cpu_entry;
2722971c
DM
956
957 counters = alloc_counters(table);
958 if (IS_ERR(counters))
959 return PTR_ERR(counters);
960
31836064
ED
961 /* choose the copy that is on our node/cpu, ...
962 * This choice is lazy (because current thread is
963 * allowed to migrate to another cpu)
964 */
2e4e6a17 965 loc_cpu_entry = private->entries[raw_smp_processor_id()];
31836064 966 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1da177e4
LT
967 ret = -EFAULT;
968 goto free_counters;
969 }
970
971 /* FIXME: use iterator macros --RR */
972 /* ... then go back and fix counters and names */
973 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
974 unsigned int i;
87a2e70d
JE
975 const struct xt_entry_match *m;
976 const struct xt_entry_target *t;
1da177e4 977
31836064 978 e = (struct ipt_entry *)(loc_cpu_entry + off);
1da177e4
LT
979 if (copy_to_user(userptr + off
980 + offsetof(struct ipt_entry, counters),
981 &counters[num],
982 sizeof(counters[num])) != 0) {
983 ret = -EFAULT;
984 goto free_counters;
985 }
986
987 for (i = sizeof(struct ipt_entry);
988 i < e->target_offset;
989 i += m->u.match_size) {
990 m = (void *)e + i;
991
992 if (copy_to_user(userptr + off + i
87a2e70d 993 + offsetof(struct xt_entry_match,
1da177e4
LT
994 u.user.name),
995 m->u.kernel.match->name,
996 strlen(m->u.kernel.match->name)+1)
997 != 0) {
998 ret = -EFAULT;
999 goto free_counters;
1000 }
1001 }
1002
d5d1baa1 1003 t = ipt_get_target_c(e);
1da177e4 1004 if (copy_to_user(userptr + off + e->target_offset
87a2e70d 1005 + offsetof(struct xt_entry_target,
1da177e4
LT
1006 u.user.name),
1007 t->u.kernel.target->name,
1008 strlen(t->u.kernel.target->name)+1) != 0) {
1009 ret = -EFAULT;
1010 goto free_counters;
1011 }
1012 }
1013
1014 free_counters:
1015 vfree(counters);
1016 return ret;
1017}
1018
2722971c 1019#ifdef CONFIG_COMPAT
739674fb 1020static void compat_standard_from_user(void *dst, const void *src)
2722971c 1021{
9fa492cd 1022 int v = *(compat_int_t *)src;
2722971c 1023
9fa492cd 1024 if (v > 0)
b386d9f5 1025 v += xt_compat_calc_jump(AF_INET, v);
9fa492cd
PM
1026 memcpy(dst, &v, sizeof(v));
1027}
46c5ea3c 1028
739674fb 1029static int compat_standard_to_user(void __user *dst, const void *src)
2722971c 1030{
9fa492cd 1031 compat_int_t cv = *(int *)src;
2722971c 1032
9fa492cd 1033 if (cv > 0)
b386d9f5 1034 cv -= xt_compat_calc_jump(AF_INET, cv);
9fa492cd 1035 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
2722971c
DM
1036}
1037
d5d1baa1 1038static int compat_calc_entry(const struct ipt_entry *e,
4b478248 1039 const struct xt_table_info *info,
d5d1baa1 1040 const void *base, struct xt_table_info *newinfo)
2722971c 1041{
dcea992a 1042 const struct xt_entry_match *ematch;
87a2e70d 1043 const struct xt_entry_target *t;
e5b5ef7d 1044 unsigned int entry_offset;
2722971c
DM
1045 int off, i, ret;
1046
30c08c41 1047 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
2722971c 1048 entry_offset = (void *)e - base;
dcea992a 1049 xt_ematch_foreach(ematch, e)
6bdb331b 1050 off += xt_compat_match_offset(ematch->u.kernel.match);
d5d1baa1 1051 t = ipt_get_target_c(e);
9fa492cd 1052 off += xt_compat_target_offset(t->u.kernel.target);
2722971c 1053 newinfo->size -= off;
b386d9f5 1054 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
2722971c
DM
1055 if (ret)
1056 return ret;
1057
6e23ae2a 1058 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
4b478248
PM
1059 if (info->hook_entry[i] &&
1060 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
2722971c 1061 newinfo->hook_entry[i] -= off;
4b478248
PM
1062 if (info->underflow[i] &&
1063 (e < (struct ipt_entry *)(base + info->underflow[i])))
2722971c
DM
1064 newinfo->underflow[i] -= off;
1065 }
1066 return 0;
1067}
1068
259d4e41 1069static int compat_table_info(const struct xt_table_info *info,
4b478248 1070 struct xt_table_info *newinfo)
2722971c 1071{
72b2b1dd 1072 struct ipt_entry *iter;
2722971c 1073 void *loc_cpu_entry;
0559518b 1074 int ret;
2722971c
DM
1075
1076 if (!newinfo || !info)
1077 return -EINVAL;
1078
259d4e41
ED
1079 /* we dont care about newinfo->entries[] */
1080 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1081 newinfo->initial_entries = 0;
2722971c 1082 loc_cpu_entry = info->entries[raw_smp_processor_id()];
255d0dc3 1083 xt_compat_init_offsets(AF_INET, info->number);
72b2b1dd
JE
1084 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1085 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1086 if (ret != 0)
0559518b 1087 return ret;
72b2b1dd 1088 }
0559518b 1089 return 0;
2722971c
DM
1090}
1091#endif
1092
d5d1baa1
JE
1093static int get_info(struct net *net, void __user *user,
1094 const int *len, int compat)
2722971c 1095{
12b00c2c 1096 char name[XT_TABLE_MAXNAMELEN];
e60a13e0 1097 struct xt_table *t;
2722971c
DM
1098 int ret;
1099
1100 if (*len != sizeof(struct ipt_getinfo)) {
c9d8fe13
PM
1101 duprintf("length %u != %zu\n", *len,
1102 sizeof(struct ipt_getinfo));
2722971c
DM
1103 return -EINVAL;
1104 }
1105
1106 if (copy_from_user(name, user, sizeof(name)) != 0)
1107 return -EFAULT;
1108
12b00c2c 1109 name[XT_TABLE_MAXNAMELEN-1] = '\0';
2722971c
DM
1110#ifdef CONFIG_COMPAT
1111 if (compat)
1112 xt_compat_lock(AF_INET);
1113#endif
34bd137b 1114 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
4b478248 1115 "iptable_%s", name);
2722971c
DM
1116 if (t && !IS_ERR(t)) {
1117 struct ipt_getinfo info;
5452e425 1118 const struct xt_table_info *private = t->private;
2722971c 1119#ifdef CONFIG_COMPAT
14c7dbe0
AD
1120 struct xt_table_info tmp;
1121
2722971c 1122 if (compat) {
2722971c 1123 ret = compat_table_info(private, &tmp);
b386d9f5 1124 xt_compat_flush_offsets(AF_INET);
4b478248 1125 private = &tmp;
2722971c
DM
1126 }
1127#endif
b5f15ac4 1128 memset(&info, 0, sizeof(info));
2722971c
DM
1129 info.valid_hooks = t->valid_hooks;
1130 memcpy(info.hook_entry, private->hook_entry,
4b478248 1131 sizeof(info.hook_entry));
2722971c 1132 memcpy(info.underflow, private->underflow,
4b478248 1133 sizeof(info.underflow));
2722971c
DM
1134 info.num_entries = private->number;
1135 info.size = private->size;
1136 strcpy(info.name, name);
1137
1138 if (copy_to_user(user, &info, *len) != 0)
1139 ret = -EFAULT;
1140 else
1141 ret = 0;
1142
1143 xt_table_unlock(t);
1144 module_put(t->me);
1145 } else
1146 ret = t ? PTR_ERR(t) : -ENOENT;
1147#ifdef CONFIG_COMPAT
1148 if (compat)
1149 xt_compat_unlock(AF_INET);
1150#endif
1151 return ret;
1152}
1153
1154static int
d5d1baa1
JE
1155get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1156 const int *len)
2722971c
DM
1157{
1158 int ret;
1159 struct ipt_get_entries get;
e60a13e0 1160 struct xt_table *t;
2722971c
DM
1161
1162 if (*len < sizeof(get)) {
c9d8fe13 1163 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
2722971c
DM
1164 return -EINVAL;
1165 }
1166 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1167 return -EFAULT;
1168 if (*len != sizeof(struct ipt_get_entries) + get.size) {
c9d8fe13
PM
1169 duprintf("get_entries: %u != %zu\n",
1170 *len, sizeof(get) + get.size);
2722971c
DM
1171 return -EINVAL;
1172 }
1173
34bd137b 1174 t = xt_find_table_lock(net, AF_INET, get.name);
2722971c 1175 if (t && !IS_ERR(t)) {
5452e425 1176 const struct xt_table_info *private = t->private;
9c547959 1177 duprintf("t->private->number = %u\n", private->number);
2722971c
DM
1178 if (get.size == private->size)
1179 ret = copy_entries_to_user(private->size,
1180 t, uptr->entrytable);
1181 else {
1182 duprintf("get_entries: I've got %u not %u!\n",
9c547959 1183 private->size, get.size);
544473c1 1184 ret = -EAGAIN;
2722971c
DM
1185 }
1186 module_put(t->me);
1187 xt_table_unlock(t);
1188 } else
1189 ret = t ? PTR_ERR(t) : -ENOENT;
1190
1191 return ret;
1192}
1193
1194static int
34bd137b 1195__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
4b478248
PM
1196 struct xt_table_info *newinfo, unsigned int num_counters,
1197 void __user *counters_ptr)
2722971c
DM
1198{
1199 int ret;
e60a13e0 1200 struct xt_table *t;
2722971c
DM
1201 struct xt_table_info *oldinfo;
1202 struct xt_counters *counters;
1203 void *loc_cpu_old_entry;
72b2b1dd 1204 struct ipt_entry *iter;
2722971c
DM
1205
1206 ret = 0;
1207 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1208 if (!counters) {
1209 ret = -ENOMEM;
1210 goto out;
1211 }
1212
34bd137b 1213 t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
2722971c
DM
1214 "iptable_%s", name);
1215 if (!t || IS_ERR(t)) {
1216 ret = t ? PTR_ERR(t) : -ENOENT;
1217 goto free_newinfo_counters_untrans;
1218 }
1219
1220 /* You lied! */
1221 if (valid_hooks != t->valid_hooks) {
1222 duprintf("Valid hook crap: %08X vs %08X\n",
1223 valid_hooks, t->valid_hooks);
1224 ret = -EINVAL;
1225 goto put_module;
1226 }
1227
1228 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1229 if (!oldinfo)
1230 goto put_module;
1231
1232 /* Update module usage count based on number of rules */
1233 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1234 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1235 if ((oldinfo->number > oldinfo->initial_entries) ||
1236 (newinfo->number <= oldinfo->initial_entries))
1237 module_put(t->me);
1238 if ((oldinfo->number > oldinfo->initial_entries) &&
1239 (newinfo->number <= oldinfo->initial_entries))
1240 module_put(t->me);
1241
942e4a2b 1242 /* Get the old counters, and synchronize with replace */
2722971c 1243 get_counters(oldinfo, counters);
942e4a2b 1244
2722971c
DM
1245 /* Decrease module usage counts and free resource */
1246 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
72b2b1dd 1247 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
0559518b 1248 cleanup_entry(iter, net);
72b2b1dd 1249
2722971c
DM
1250 xt_free_table_info(oldinfo);
1251 if (copy_to_user(counters_ptr, counters,
1252 sizeof(struct xt_counters) * num_counters) != 0)
1253 ret = -EFAULT;
1254 vfree(counters);
1255 xt_table_unlock(t);
1256 return ret;
1257
1258 put_module:
1259 module_put(t->me);
1260 xt_table_unlock(t);
1261 free_newinfo_counters_untrans:
1262 vfree(counters);
1263 out:
1264 return ret;
1265}
1266
1267static int
d5d1baa1 1268do_replace(struct net *net, const void __user *user, unsigned int len)
2722971c
DM
1269{
1270 int ret;
1271 struct ipt_replace tmp;
1272 struct xt_table_info *newinfo;
1273 void *loc_cpu_entry;
72b2b1dd 1274 struct ipt_entry *iter;
2722971c
DM
1275
1276 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1277 return -EFAULT;
1278
2722971c 1279 /* overflow check */
2722971c
DM
1280 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1281 return -ENOMEM;
1282
1283 newinfo = xt_alloc_table_info(tmp.size);
1284 if (!newinfo)
1285 return -ENOMEM;
1286
9c547959 1287 /* choose the copy that is on our node/cpu */
2722971c
DM
1288 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1289 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1290 tmp.size) != 0) {
1291 ret = -EFAULT;
1292 goto free_newinfo;
1293 }
1294
0f234214 1295 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
2722971c
DM
1296 if (ret != 0)
1297 goto free_newinfo;
1298
ff67e4e4 1299 duprintf("Translated table\n");
2722971c 1300
34bd137b 1301 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
4b478248 1302 tmp.num_counters, tmp.counters);
2722971c
DM
1303 if (ret)
1304 goto free_newinfo_untrans;
1305 return 0;
1306
1307 free_newinfo_untrans:
72b2b1dd 1308 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
0559518b 1309 cleanup_entry(iter, net);
2722971c
DM
1310 free_newinfo:
1311 xt_free_table_info(newinfo);
1312 return ret;
1313}
1314
2722971c 1315static int
d5d1baa1
JE
1316do_add_counters(struct net *net, const void __user *user,
1317 unsigned int len, int compat)
2722971c 1318{
942e4a2b 1319 unsigned int i, curcpu;
2722971c
DM
1320 struct xt_counters_info tmp;
1321 struct xt_counters *paddc;
1322 unsigned int num_counters;
5452e425 1323 const char *name;
2722971c
DM
1324 int size;
1325 void *ptmp;
e60a13e0 1326 struct xt_table *t;
5452e425 1327 const struct xt_table_info *private;
2722971c
DM
1328 int ret = 0;
1329 void *loc_cpu_entry;
72b2b1dd 1330 struct ipt_entry *iter;
2722971c
DM
1331#ifdef CONFIG_COMPAT
1332 struct compat_xt_counters_info compat_tmp;
1333
1334 if (compat) {
1335 ptmp = &compat_tmp;
1336 size = sizeof(struct compat_xt_counters_info);
1337 } else
1338#endif
1339 {
1340 ptmp = &tmp;
1341 size = sizeof(struct xt_counters_info);
1342 }
1343
1344 if (copy_from_user(ptmp, user, size) != 0)
1345 return -EFAULT;
1346
1347#ifdef CONFIG_COMPAT
1348 if (compat) {
1349 num_counters = compat_tmp.num_counters;
1350 name = compat_tmp.name;
1351 } else
1352#endif
1353 {
1354 num_counters = tmp.num_counters;
1355 name = tmp.name;
1356 }
1357
1358 if (len != size + num_counters * sizeof(struct xt_counters))
1359 return -EINVAL;
1360
e12f8e29 1361 paddc = vmalloc(len - size);
2722971c
DM
1362 if (!paddc)
1363 return -ENOMEM;
1364
1365 if (copy_from_user(paddc, user + size, len - size) != 0) {
1366 ret = -EFAULT;
1367 goto free;
1368 }
1369
34bd137b 1370 t = xt_find_table_lock(net, AF_INET, name);
2722971c
DM
1371 if (!t || IS_ERR(t)) {
1372 ret = t ? PTR_ERR(t) : -ENOENT;
1373 goto free;
1374 }
1375
942e4a2b 1376 local_bh_disable();
2722971c
DM
1377 private = t->private;
1378 if (private->number != num_counters) {
1379 ret = -EINVAL;
1380 goto unlock_up_free;
1381 }
1382
1383 i = 0;
1384 /* Choose the copy that is on our node */
942e4a2b
SH
1385 curcpu = smp_processor_id();
1386 loc_cpu_entry = private->entries[curcpu];
1387 xt_info_wrlock(curcpu);
0559518b
JE
1388 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1389 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1390 ++i;
1391 }
942e4a2b 1392 xt_info_wrunlock(curcpu);
2722971c 1393 unlock_up_free:
942e4a2b 1394 local_bh_enable();
2722971c
DM
1395 xt_table_unlock(t);
1396 module_put(t->me);
1397 free:
1398 vfree(paddc);
1399
1400 return ret;
1401}
1402
1403#ifdef CONFIG_COMPAT
1404struct compat_ipt_replace {
12b00c2c 1405 char name[XT_TABLE_MAXNAMELEN];
2722971c
DM
1406 u32 valid_hooks;
1407 u32 num_entries;
1408 u32 size;
6e23ae2a
PM
1409 u32 hook_entry[NF_INET_NUMHOOKS];
1410 u32 underflow[NF_INET_NUMHOOKS];
2722971c 1411 u32 num_counters;
87a2e70d 1412 compat_uptr_t counters; /* struct xt_counters * */
2722971c
DM
1413 struct compat_ipt_entry entries[0];
1414};
1415
a18aa31b
PM
1416static int
1417compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
b0a6363c 1418 unsigned int *size, struct xt_counters *counters,
0559518b 1419 unsigned int i)
2722971c 1420{
87a2e70d 1421 struct xt_entry_target *t;
2722971c
DM
1422 struct compat_ipt_entry __user *ce;
1423 u_int16_t target_offset, next_offset;
1424 compat_uint_t origsize;
dcea992a
JE
1425 const struct xt_entry_match *ematch;
1426 int ret = 0;
2722971c 1427
2722971c
DM
1428 origsize = *size;
1429 ce = (struct compat_ipt_entry __user *)*dstptr;
0559518b
JE
1430 if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
1431 copy_to_user(&ce->counters, &counters[i],
1432 sizeof(counters[i])) != 0)
1433 return -EFAULT;
a18aa31b 1434
2722971c 1435 *dstptr += sizeof(struct compat_ipt_entry);
30c08c41
PM
1436 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1437
dcea992a
JE
1438 xt_ematch_foreach(ematch, e) {
1439 ret = xt_compat_match_to_user(ematch, dstptr, size);
1440 if (ret != 0)
6bdb331b 1441 return ret;
dcea992a 1442 }
2722971c 1443 target_offset = e->target_offset - (origsize - *size);
2722971c 1444 t = ipt_get_target(e);
9fa492cd 1445 ret = xt_compat_target_to_user(t, dstptr, size);
2722971c 1446 if (ret)
0559518b 1447 return ret;
2722971c 1448 next_offset = e->next_offset - (origsize - *size);
0559518b
JE
1449 if (put_user(target_offset, &ce->target_offset) != 0 ||
1450 put_user(next_offset, &ce->next_offset) != 0)
1451 return -EFAULT;
2722971c 1452 return 0;
2722971c
DM
1453}
1454
022748a9 1455static int
87a2e70d 1456compat_find_calc_match(struct xt_entry_match *m,
4b478248
PM
1457 const char *name,
1458 const struct ipt_ip *ip,
1459 unsigned int hookmask,
6bdb331b 1460 int *size)
2722971c 1461{
6709dbbb 1462 struct xt_match *match;
2722971c 1463
fd0ec0e6
JE
1464 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
1465 m->u.user.revision);
1466 if (IS_ERR(match)) {
2722971c 1467 duprintf("compat_check_calc_match: `%s' not found\n",
4b478248 1468 m->u.user.name);
fd0ec0e6 1469 return PTR_ERR(match);
2722971c
DM
1470 }
1471 m->u.kernel.match = match;
9fa492cd 1472 *size += xt_compat_match_offset(match);
4c1b52bc
DM
1473 return 0;
1474}
1475
0559518b 1476static void compat_release_entry(struct compat_ipt_entry *e)
4c1b52bc 1477{
87a2e70d 1478 struct xt_entry_target *t;
dcea992a 1479 struct xt_entry_match *ematch;
4c1b52bc 1480
4c1b52bc 1481 /* Cleanup all matches */
dcea992a 1482 xt_ematch_foreach(ematch, e)
6bdb331b 1483 module_put(ematch->u.kernel.match->me);
73cd598d 1484 t = compat_ipt_get_target(e);
4c1b52bc 1485 module_put(t->u.kernel.target->me);
4c1b52bc
DM
1486}
1487
022748a9 1488static int
73cd598d 1489check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
4b478248
PM
1490 struct xt_table_info *newinfo,
1491 unsigned int *size,
d5d1baa1
JE
1492 const unsigned char *base,
1493 const unsigned char *limit,
1494 const unsigned int *hook_entries,
1495 const unsigned int *underflows,
4b478248 1496 const char *name)
2722971c 1497{
dcea992a 1498 struct xt_entry_match *ematch;
87a2e70d 1499 struct xt_entry_target *t;
6709dbbb 1500 struct xt_target *target;
e5b5ef7d 1501 unsigned int entry_offset;
b0a6363c
PM
1502 unsigned int j;
1503 int ret, off, h;
2722971c
DM
1504
1505 duprintf("check_compat_entry_size_and_hooks %p\n", e);
3666ed1c
JP
1506 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
1507 (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
2722971c
DM
1508 duprintf("Bad offset %p, limit = %p\n", e, limit);
1509 return -EINVAL;
1510 }
1511
1512 if (e->next_offset < sizeof(struct compat_ipt_entry) +
4b478248 1513 sizeof(struct compat_xt_entry_target)) {
2722971c
DM
1514 duprintf("checking: element %p size %u\n",
1515 e, e->next_offset);
1516 return -EINVAL;
1517 }
1518
73cd598d
PM
1519 /* For purposes of check_entry casting the compat entry is fine */
1520 ret = check_entry((struct ipt_entry *)e, name);
a96be246
DM
1521 if (ret)
1522 return ret;
590bdf7f 1523
30c08c41 1524 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
2722971c
DM
1525 entry_offset = (void *)e - (void *)base;
1526 j = 0;
dcea992a
JE
1527 xt_ematch_foreach(ematch, e) {
1528 ret = compat_find_calc_match(ematch, name,
6b4ff2d7 1529 &e->ip, e->comefrom, &off);
dcea992a 1530 if (ret != 0)
6bdb331b
JE
1531 goto release_matches;
1532 ++j;
dcea992a 1533 }
2722971c 1534
73cd598d 1535 t = compat_ipt_get_target(e);
d2a7b6ba
JE
1536 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
1537 t->u.user.revision);
1538 if (IS_ERR(target)) {
a96be246 1539 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
4b478248 1540 t->u.user.name);
d2a7b6ba 1541 ret = PTR_ERR(target);
4c1b52bc 1542 goto release_matches;
2722971c
DM
1543 }
1544 t->u.kernel.target = target;
1545
9fa492cd 1546 off += xt_compat_target_offset(target);
2722971c 1547 *size += off;
b386d9f5 1548 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
2722971c
DM
1549 if (ret)
1550 goto out;
1551
1552 /* Check hooks & underflows */
6e23ae2a 1553 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
2722971c
DM
1554 if ((unsigned char *)e - base == hook_entries[h])
1555 newinfo->hook_entry[h] = hook_entries[h];
1556 if ((unsigned char *)e - base == underflows[h])
1557 newinfo->underflow[h] = underflows[h];
1558 }
1559
1560 /* Clear counters and comefrom */
73cd598d 1561 memset(&e->counters, 0, sizeof(e->counters));
2722971c 1562 e->comefrom = 0;
2722971c 1563 return 0;
bec71b16 1564
2722971c 1565out:
bec71b16 1566 module_put(t->u.kernel.target->me);
4c1b52bc 1567release_matches:
6bdb331b
JE
1568 xt_ematch_foreach(ematch, e) {
1569 if (j-- == 0)
dcea992a 1570 break;
6bdb331b
JE
1571 module_put(ematch->u.kernel.match->me);
1572 }
2722971c
DM
1573 return ret;
1574}
1575
4b478248 1576static int
73cd598d 1577compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
4b478248
PM
1578 unsigned int *size, const char *name,
1579 struct xt_table_info *newinfo, unsigned char *base)
2722971c 1580{
87a2e70d 1581 struct xt_entry_target *t;
6709dbbb 1582 struct xt_target *target;
2722971c
DM
1583 struct ipt_entry *de;
1584 unsigned int origsize;
920b868a 1585 int ret, h;
dcea992a 1586 struct xt_entry_match *ematch;
2722971c
DM
1587
1588 ret = 0;
1589 origsize = *size;
1590 de = (struct ipt_entry *)*dstptr;
1591 memcpy(de, e, sizeof(struct ipt_entry));
73cd598d 1592 memcpy(&de->counters, &e->counters, sizeof(e->counters));
2722971c 1593
73cd598d 1594 *dstptr += sizeof(struct ipt_entry);
30c08c41
PM
1595 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1596
dcea992a
JE
1597 xt_ematch_foreach(ematch, e) {
1598 ret = xt_compat_match_from_user(ematch, dstptr, size);
1599 if (ret != 0)
6bdb331b 1600 return ret;
dcea992a 1601 }
2722971c 1602 de->target_offset = e->target_offset - (origsize - *size);
73cd598d 1603 t = compat_ipt_get_target(e);
2722971c 1604 target = t->u.kernel.target;
9fa492cd 1605 xt_compat_target_from_user(t, dstptr, size);
2722971c
DM
1606
1607 de->next_offset = e->next_offset - (origsize - *size);
6e23ae2a 1608 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
2722971c
DM
1609 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1610 newinfo->hook_entry[h] -= origsize - *size;
1611 if ((unsigned char *)de - base < newinfo->underflow[h])
1612 newinfo->underflow[h] -= origsize - *size;
1613 }
f6677f43
DM
1614 return ret;
1615}
1616
022748a9 1617static int
0559518b 1618compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
f6677f43 1619{
dcea992a 1620 struct xt_entry_match *ematch;
9b4fce7a 1621 struct xt_mtchk_param mtpar;
b0a6363c 1622 unsigned int j;
dcea992a 1623 int ret = 0;
f6677f43 1624
4c1b52bc 1625 j = 0;
a83d8e8d 1626 mtpar.net = net;
9b4fce7a
JE
1627 mtpar.table = name;
1628 mtpar.entryinfo = &e->ip;
1629 mtpar.hook_mask = e->comefrom;
916a917d 1630 mtpar.family = NFPROTO_IPV4;
dcea992a 1631 xt_ematch_foreach(ematch, e) {
6bdb331b 1632 ret = check_match(ematch, &mtpar);
dcea992a 1633 if (ret != 0)
6bdb331b
JE
1634 goto cleanup_matches;
1635 ++j;
dcea992a 1636 }
4c1b52bc 1637
add67461 1638 ret = check_target(e, net, name);
4c1b52bc
DM
1639 if (ret)
1640 goto cleanup_matches;
4c1b52bc
DM
1641 return 0;
1642
1643 cleanup_matches:
6bdb331b
JE
1644 xt_ematch_foreach(ematch, e) {
1645 if (j-- == 0)
dcea992a 1646 break;
6bdb331b
JE
1647 cleanup_match(ematch, net);
1648 }
4c1b52bc 1649 return ret;
f6677f43
DM
1650}
1651
1da177e4 1652static int
a83d8e8d
AD
1653translate_compat_table(struct net *net,
1654 const char *name,
4b478248
PM
1655 unsigned int valid_hooks,
1656 struct xt_table_info **pinfo,
1657 void **pentry0,
1658 unsigned int total_size,
1659 unsigned int number,
1660 unsigned int *hook_entries,
1661 unsigned int *underflows)
1da177e4 1662{
920b868a 1663 unsigned int i, j;
2722971c
DM
1664 struct xt_table_info *newinfo, *info;
1665 void *pos, *entry0, *entry1;
72b2b1dd
JE
1666 struct compat_ipt_entry *iter0;
1667 struct ipt_entry *iter1;
2722971c 1668 unsigned int size;
0559518b 1669 int ret;
1da177e4 1670
2722971c
DM
1671 info = *pinfo;
1672 entry0 = *pentry0;
1673 size = total_size;
1674 info->number = number;
1675
1676 /* Init all hooks to impossible value. */
6e23ae2a 1677 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
2722971c
DM
1678 info->hook_entry[i] = 0xFFFFFFFF;
1679 info->underflow[i] = 0xFFFFFFFF;
1680 }
1681
1682 duprintf("translate_compat_table: size %u\n", info->size);
920b868a 1683 j = 0;
2722971c 1684 xt_compat_lock(AF_INET);
255d0dc3 1685 xt_compat_init_offsets(AF_INET, number);
2722971c 1686 /* Walk through entries, checking offsets. */
72b2b1dd
JE
1687 xt_entry_foreach(iter0, entry0, total_size) {
1688 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
6b4ff2d7
JE
1689 entry0,
1690 entry0 + total_size,
1691 hook_entries,
1692 underflows,
1693 name);
72b2b1dd 1694 if (ret != 0)
0559518b
JE
1695 goto out_unlock;
1696 ++j;
72b2b1dd 1697 }
2722971c
DM
1698
1699 ret = -EINVAL;
920b868a 1700 if (j != number) {
2722971c 1701 duprintf("translate_compat_table: %u not %u entries\n",
920b868a 1702 j, number);
2722971c
DM
1703 goto out_unlock;
1704 }
1705
1706 /* Check hooks all assigned */
6e23ae2a 1707 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
2722971c
DM
1708 /* Only hooks which are valid */
1709 if (!(valid_hooks & (1 << i)))
1710 continue;
1711 if (info->hook_entry[i] == 0xFFFFFFFF) {
1712 duprintf("Invalid hook entry %u %u\n",
1713 i, hook_entries[i]);
1714 goto out_unlock;
1da177e4 1715 }
2722971c
DM
1716 if (info->underflow[i] == 0xFFFFFFFF) {
1717 duprintf("Invalid underflow %u %u\n",
1718 i, underflows[i]);
1719 goto out_unlock;
1720 }
1721 }
1722
1723 ret = -ENOMEM;
1724 newinfo = xt_alloc_table_info(size);
1725 if (!newinfo)
1726 goto out_unlock;
1727
1728 newinfo->number = number;
6e23ae2a 1729 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
2722971c
DM
1730 newinfo->hook_entry[i] = info->hook_entry[i];
1731 newinfo->underflow[i] = info->underflow[i];
1732 }
1733 entry1 = newinfo->entries[raw_smp_processor_id()];
1734 pos = entry1;
4b478248 1735 size = total_size;
72b2b1dd 1736 xt_entry_foreach(iter0, entry0, total_size) {
6b4ff2d7
JE
1737 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1738 name, newinfo, entry1);
72b2b1dd
JE
1739 if (ret != 0)
1740 break;
1741 }
b386d9f5 1742 xt_compat_flush_offsets(AF_INET);
2722971c
DM
1743 xt_compat_unlock(AF_INET);
1744 if (ret)
1745 goto free_newinfo;
1746
1747 ret = -ELOOP;
1748 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1749 goto free_newinfo;
1750
4c1b52bc 1751 i = 0;
72b2b1dd 1752 xt_entry_foreach(iter1, entry1, newinfo->size) {
0559518b 1753 ret = compat_check_entry(iter1, net, name);
72b2b1dd
JE
1754 if (ret != 0)
1755 break;
0559518b 1756 ++i;
cca77b7c
FW
1757 if (strcmp(ipt_get_target(iter1)->u.user.name,
1758 XT_ERROR_TARGET) == 0)
1759 ++newinfo->stacksize;
72b2b1dd 1760 }
4c1b52bc 1761 if (ret) {
72b2b1dd
JE
1762 /*
1763 * The first i matches need cleanup_entry (calls ->destroy)
1764 * because they had called ->check already. The other j-i
1765 * entries need only release.
1766 */
1767 int skip = i;
4c1b52bc 1768 j -= i;
72b2b1dd
JE
1769 xt_entry_foreach(iter0, entry0, newinfo->size) {
1770 if (skip-- > 0)
1771 continue;
0559518b 1772 if (j-- == 0)
72b2b1dd 1773 break;
0559518b 1774 compat_release_entry(iter0);
72b2b1dd 1775 }
0559518b
JE
1776 xt_entry_foreach(iter1, entry1, newinfo->size) {
1777 if (i-- == 0)
72b2b1dd 1778 break;
0559518b
JE
1779 cleanup_entry(iter1, net);
1780 }
4c1b52bc
DM
1781 xt_free_table_info(newinfo);
1782 return ret;
1783 }
f6677f43 1784
2722971c 1785 /* And one copy for every other CPU */
fb1bb34d 1786 for_each_possible_cpu(i)
2722971c
DM
1787 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1788 memcpy(newinfo->entries[i], entry1, newinfo->size);
1789
1790 *pinfo = newinfo;
1791 *pentry0 = entry1;
1792 xt_free_table_info(info);
1793 return 0;
1da177e4 1794
2722971c
DM
1795free_newinfo:
1796 xt_free_table_info(newinfo);
1797out:
0559518b
JE
1798 xt_entry_foreach(iter0, entry0, total_size) {
1799 if (j-- == 0)
72b2b1dd 1800 break;
0559518b
JE
1801 compat_release_entry(iter0);
1802 }
1da177e4 1803 return ret;
2722971c 1804out_unlock:
b386d9f5 1805 xt_compat_flush_offsets(AF_INET);
2722971c
DM
1806 xt_compat_unlock(AF_INET);
1807 goto out;
1da177e4
LT
1808}
1809
1810static int
34bd137b 1811compat_do_replace(struct net *net, void __user *user, unsigned int len)
1da177e4
LT
1812{
1813 int ret;
2722971c
DM
1814 struct compat_ipt_replace tmp;
1815 struct xt_table_info *newinfo;
1816 void *loc_cpu_entry;
72b2b1dd 1817 struct ipt_entry *iter;
1da177e4
LT
1818
1819 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1820 return -EFAULT;
1821
ee4bb818 1822 /* overflow check */
259d4e41 1823 if (tmp.size >= INT_MAX / num_possible_cpus())
ee4bb818
KK
1824 return -ENOMEM;
1825 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1826 return -ENOMEM;
1827
2e4e6a17 1828 newinfo = xt_alloc_table_info(tmp.size);
1da177e4
LT
1829 if (!newinfo)
1830 return -ENOMEM;
1831
9c547959 1832 /* choose the copy that is on our node/cpu */
31836064
ED
1833 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1834 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1da177e4
LT
1835 tmp.size) != 0) {
1836 ret = -EFAULT;
1837 goto free_newinfo;
1838 }
1839
a83d8e8d 1840 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
4b478248
PM
1841 &newinfo, &loc_cpu_entry, tmp.size,
1842 tmp.num_entries, tmp.hook_entry,
1843 tmp.underflow);
2722971c 1844 if (ret != 0)
1da177e4 1845 goto free_newinfo;
1da177e4 1846
2722971c 1847 duprintf("compat_do_replace: Translated table\n");
1da177e4 1848
34bd137b 1849 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
4b478248 1850 tmp.num_counters, compat_ptr(tmp.counters));
2722971c
DM
1851 if (ret)
1852 goto free_newinfo_untrans;
1853 return 0;
1da177e4 1854
2722971c 1855 free_newinfo_untrans:
72b2b1dd 1856 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
0559518b 1857 cleanup_entry(iter, net);
2722971c
DM
1858 free_newinfo:
1859 xt_free_table_info(newinfo);
1860 return ret;
1861}
1da177e4 1862
2722971c
DM
1863static int
1864compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
4b478248 1865 unsigned int len)
2722971c
DM
1866{
1867 int ret;
1da177e4 1868
2722971c
DM
1869 if (!capable(CAP_NET_ADMIN))
1870 return -EPERM;
1da177e4 1871
2722971c
DM
1872 switch (cmd) {
1873 case IPT_SO_SET_REPLACE:
3b1e0a65 1874 ret = compat_do_replace(sock_net(sk), user, len);
2722971c 1875 break;
1da177e4 1876
2722971c 1877 case IPT_SO_SET_ADD_COUNTERS:
3b1e0a65 1878 ret = do_add_counters(sock_net(sk), user, len, 1);
2722971c
DM
1879 break;
1880
1881 default:
1882 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1883 ret = -EINVAL;
1884 }
1da177e4 1885
1da177e4
LT
1886 return ret;
1887}
1888
4b478248 1889struct compat_ipt_get_entries {
12b00c2c 1890 char name[XT_TABLE_MAXNAMELEN];
2722971c
DM
1891 compat_uint_t size;
1892 struct compat_ipt_entry entrytable[0];
1893};
1da177e4 1894
4b478248
PM
1895static int
1896compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1897 void __user *userptr)
2722971c 1898{
2722971c 1899 struct xt_counters *counters;
5452e425 1900 const struct xt_table_info *private = table->private;
2722971c
DM
1901 void __user *pos;
1902 unsigned int size;
1903 int ret = 0;
5452e425 1904 const void *loc_cpu_entry;
a18aa31b 1905 unsigned int i = 0;
72b2b1dd 1906 struct ipt_entry *iter;
1da177e4 1907
2722971c
DM
1908 counters = alloc_counters(table);
1909 if (IS_ERR(counters))
1910 return PTR_ERR(counters);
1911
1912 /* choose the copy that is on our node/cpu, ...
1913 * This choice is lazy (because current thread is
1914 * allowed to migrate to another cpu)
1915 */
1916 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1917 pos = userptr;
1918 size = total_size;
72b2b1dd
JE
1919 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1920 ret = compat_copy_entry_to_user(iter, &pos,
6b4ff2d7 1921 &size, counters, i++);
72b2b1dd
JE
1922 if (ret != 0)
1923 break;
1924 }
2722971c 1925
2722971c
DM
1926 vfree(counters);
1927 return ret;
1da177e4
LT
1928}
1929
1930static int
34bd137b
AD
1931compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1932 int *len)
1da177e4 1933{
2722971c
DM
1934 int ret;
1935 struct compat_ipt_get_entries get;
e60a13e0 1936 struct xt_table *t;
1da177e4 1937
2722971c 1938 if (*len < sizeof(get)) {
c9d8fe13 1939 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1da177e4 1940 return -EINVAL;
2722971c 1941 }
1da177e4 1942
2722971c
DM
1943 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1944 return -EFAULT;
1da177e4 1945
2722971c 1946 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
c9d8fe13
PM
1947 duprintf("compat_get_entries: %u != %zu\n",
1948 *len, sizeof(get) + get.size);
2722971c 1949 return -EINVAL;
1da177e4
LT
1950 }
1951
2722971c 1952 xt_compat_lock(AF_INET);
34bd137b 1953 t = xt_find_table_lock(net, AF_INET, get.name);
2722971c 1954 if (t && !IS_ERR(t)) {
5452e425 1955 const struct xt_table_info *private = t->private;
2722971c 1956 struct xt_table_info info;
9c547959 1957 duprintf("t->private->number = %u\n", private->number);
2722971c
DM
1958 ret = compat_table_info(private, &info);
1959 if (!ret && get.size == info.size) {
1960 ret = compat_copy_entries_to_user(private->size,
4b478248 1961 t, uptr->entrytable);
2722971c
DM
1962 } else if (!ret) {
1963 duprintf("compat_get_entries: I've got %u not %u!\n",
9c547959 1964 private->size, get.size);
544473c1 1965 ret = -EAGAIN;
2722971c 1966 }
b386d9f5 1967 xt_compat_flush_offsets(AF_INET);
2722971c
DM
1968 module_put(t->me);
1969 xt_table_unlock(t);
1970 } else
1da177e4 1971 ret = t ? PTR_ERR(t) : -ENOENT;
1da177e4 1972
2722971c
DM
1973 xt_compat_unlock(AF_INET);
1974 return ret;
1975}
1da177e4 1976
79030ed0
PM
1977static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1978
2722971c
DM
1979static int
1980compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1981{
1982 int ret;
1da177e4 1983
82fac054
BS
1984 if (!capable(CAP_NET_ADMIN))
1985 return -EPERM;
1986
2722971c
DM
1987 switch (cmd) {
1988 case IPT_SO_GET_INFO:
3b1e0a65 1989 ret = get_info(sock_net(sk), user, len, 1);
2722971c
DM
1990 break;
1991 case IPT_SO_GET_ENTRIES:
3b1e0a65 1992 ret = compat_get_entries(sock_net(sk), user, len);
2722971c
DM
1993 break;
1994 default:
79030ed0 1995 ret = do_ipt_get_ctl(sk, cmd, user, len);
2722971c 1996 }
1da177e4
LT
1997 return ret;
1998}
2722971c 1999#endif
1da177e4
LT
2000
2001static int
9c547959 2002do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1da177e4
LT
2003{
2004 int ret;
2005
2006 if (!capable(CAP_NET_ADMIN))
2007 return -EPERM;
2008
2009 switch (cmd) {
2010 case IPT_SO_SET_REPLACE:
3b1e0a65 2011 ret = do_replace(sock_net(sk), user, len);
1da177e4
LT
2012 break;
2013
2014 case IPT_SO_SET_ADD_COUNTERS:
3b1e0a65 2015 ret = do_add_counters(sock_net(sk), user, len, 0);
1da177e4
LT
2016 break;
2017
2018 default:
2019 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2020 ret = -EINVAL;
2021 }
2022
2023 return ret;
2024}
2025
2026static int
2027do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2028{
2029 int ret;
2030
2031 if (!capable(CAP_NET_ADMIN))
2032 return -EPERM;
2033
2034 switch (cmd) {
2722971c 2035 case IPT_SO_GET_INFO:
3b1e0a65 2036 ret = get_info(sock_net(sk), user, len, 0);
2722971c 2037 break;
1da177e4 2038
2722971c 2039 case IPT_SO_GET_ENTRIES:
3b1e0a65 2040 ret = get_entries(sock_net(sk), user, len);
1da177e4 2041 break;
1da177e4
LT
2042
2043 case IPT_SO_GET_REVISION_MATCH:
2044 case IPT_SO_GET_REVISION_TARGET: {
12b00c2c 2045 struct xt_get_revision rev;
2e4e6a17 2046 int target;
1da177e4
LT
2047
2048 if (*len != sizeof(rev)) {
2049 ret = -EINVAL;
2050 break;
2051 }
2052 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2053 ret = -EFAULT;
2054 break;
2055 }
2056
2057 if (cmd == IPT_SO_GET_REVISION_TARGET)
2e4e6a17 2058 target = 1;
1da177e4 2059 else
2e4e6a17 2060 target = 0;
1da177e4 2061
2e4e6a17
HW
2062 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2063 rev.revision,
2064 target, &ret),
1da177e4
LT
2065 "ipt_%s", rev.name);
2066 break;
2067 }
2068
2069 default:
2070 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2071 ret = -EINVAL;
2072 }
2073
2074 return ret;
2075}
2076
35aad0ff
JE
2077struct xt_table *ipt_register_table(struct net *net,
2078 const struct xt_table *table,
44d34e72 2079 const struct ipt_replace *repl)
1da177e4
LT
2080{
2081 int ret;
2e4e6a17 2082 struct xt_table_info *newinfo;
f3c5c1bf 2083 struct xt_table_info bootstrap = {0};
31836064 2084 void *loc_cpu_entry;
a98da11d 2085 struct xt_table *new_table;
1da177e4 2086
2e4e6a17 2087 newinfo = xt_alloc_table_info(repl->size);
44d34e72
AD
2088 if (!newinfo) {
2089 ret = -ENOMEM;
2090 goto out;
2091 }
1da177e4 2092
9c547959 2093 /* choose the copy on our node/cpu, but dont care about preemption */
31836064
ED
2094 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2095 memcpy(loc_cpu_entry, repl->entries, repl->size);
1da177e4 2096
0f234214 2097 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
44d34e72
AD
2098 if (ret != 0)
2099 goto out_free;
1da177e4 2100
44d34e72 2101 new_table = xt_register_table(net, table, &bootstrap, newinfo);
a98da11d 2102 if (IS_ERR(new_table)) {
44d34e72
AD
2103 ret = PTR_ERR(new_table);
2104 goto out_free;
1da177e4
LT
2105 }
2106
44d34e72
AD
2107 return new_table;
2108
2109out_free:
2110 xt_free_table_info(newinfo);
2111out:
2112 return ERR_PTR(ret);
1da177e4
LT
2113}
2114
f54e9367 2115void ipt_unregister_table(struct net *net, struct xt_table *table)
1da177e4 2116{
2e4e6a17 2117 struct xt_table_info *private;
31836064 2118 void *loc_cpu_entry;
df200969 2119 struct module *table_owner = table->me;
72b2b1dd 2120 struct ipt_entry *iter;
31836064 2121
e905a9ed 2122 private = xt_unregister_table(table);
1da177e4
LT
2123
2124 /* Decrease module usage counts and free resources */
2e4e6a17 2125 loc_cpu_entry = private->entries[raw_smp_processor_id()];
72b2b1dd 2126 xt_entry_foreach(iter, loc_cpu_entry, private->size)
0559518b 2127 cleanup_entry(iter, net);
df200969
AD
2128 if (private->number > private->initial_entries)
2129 module_put(table_owner);
2e4e6a17 2130 xt_free_table_info(private);
1da177e4
LT
2131}
2132
2133/* Returns 1 if the type and code is matched by the range, 0 otherwise */
1d93a9cb 2134static inline bool
1da177e4
LT
2135icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2136 u_int8_t type, u_int8_t code,
1d93a9cb 2137 bool invert)
1da177e4 2138{
9c547959
PM
2139 return ((test_type == 0xFF) ||
2140 (type == test_type && code >= min_code && code <= max_code))
1da177e4
LT
2141 ^ invert;
2142}
2143
1d93a9cb 2144static bool
62fc8051 2145icmp_match(const struct sk_buff *skb, struct xt_action_param *par)
1da177e4 2146{
5452e425
JE
2147 const struct icmphdr *ic;
2148 struct icmphdr _icmph;
f7108a20 2149 const struct ipt_icmp *icmpinfo = par->matchinfo;
1da177e4
LT
2150
2151 /* Must not be a fragment. */
f7108a20 2152 if (par->fragoff != 0)
1d93a9cb 2153 return false;
1da177e4 2154
f7108a20 2155 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
1da177e4
LT
2156 if (ic == NULL) {
2157 /* We've been asked to examine this packet, and we
2158 * can't. Hence, no choice but to drop.
2159 */
2160 duprintf("Dropping evil ICMP tinygram.\n");
b4ba2611 2161 par->hotdrop = true;
1d93a9cb 2162 return false;
1da177e4
LT
2163 }
2164
2165 return icmp_type_code_match(icmpinfo->type,
2166 icmpinfo->code[0],
2167 icmpinfo->code[1],
2168 ic->type, ic->code,
2169 !!(icmpinfo->invflags&IPT_ICMP_INV));
2170}
2171
b0f38452 2172static int icmp_checkentry(const struct xt_mtchk_param *par)
1da177e4 2173{
9b4fce7a 2174 const struct ipt_icmp *icmpinfo = par->matchinfo;
1da177e4 2175
1d5cd909 2176 /* Must specify no unknown invflags */
bd414ee6 2177 return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0;
1da177e4
LT
2178}
2179
4538506b
JE
2180static struct xt_target ipt_builtin_tg[] __read_mostly = {
2181 {
243bf6e2 2182 .name = XT_STANDARD_TARGET,
4538506b
JE
2183 .targetsize = sizeof(int),
2184 .family = NFPROTO_IPV4,
2722971c 2185#ifdef CONFIG_COMPAT
4538506b
JE
2186 .compatsize = sizeof(compat_int_t),
2187 .compat_from_user = compat_standard_from_user,
2188 .compat_to_user = compat_standard_to_user,
2722971c 2189#endif
4538506b
JE
2190 },
2191 {
243bf6e2 2192 .name = XT_ERROR_TARGET,
4538506b 2193 .target = ipt_error,
12b00c2c 2194 .targetsize = XT_FUNCTION_MAXNAMELEN,
4538506b
JE
2195 .family = NFPROTO_IPV4,
2196 },
1da177e4
LT
2197};
2198
2199static struct nf_sockopt_ops ipt_sockopts = {
2200 .pf = PF_INET,
2201 .set_optmin = IPT_BASE_CTL,
2202 .set_optmax = IPT_SO_SET_MAX+1,
2203 .set = do_ipt_set_ctl,
2722971c
DM
2204#ifdef CONFIG_COMPAT
2205 .compat_set = compat_do_ipt_set_ctl,
2206#endif
1da177e4
LT
2207 .get_optmin = IPT_BASE_CTL,
2208 .get_optmax = IPT_SO_GET_MAX+1,
2209 .get = do_ipt_get_ctl,
2722971c
DM
2210#ifdef CONFIG_COMPAT
2211 .compat_get = compat_do_ipt_get_ctl,
2212#endif
16fcec35 2213 .owner = THIS_MODULE,
1da177e4
LT
2214};
2215
4538506b
JE
2216static struct xt_match ipt_builtin_mt[] __read_mostly = {
2217 {
2218 .name = "icmp",
2219 .match = icmp_match,
2220 .matchsize = sizeof(struct ipt_icmp),
2221 .checkentry = icmp_checkentry,
2222 .proto = IPPROTO_ICMP,
2223 .family = NFPROTO_IPV4,
2224 },
1da177e4
LT
2225};
2226
3cb609d5
AD
2227static int __net_init ip_tables_net_init(struct net *net)
2228{
383ca5b8 2229 return xt_proto_init(net, NFPROTO_IPV4);
3cb609d5
AD
2230}
2231
2232static void __net_exit ip_tables_net_exit(struct net *net)
2233{
383ca5b8 2234 xt_proto_fini(net, NFPROTO_IPV4);
3cb609d5
AD
2235}
2236
2237static struct pernet_operations ip_tables_net_ops = {
2238 .init = ip_tables_net_init,
2239 .exit = ip_tables_net_exit,
2240};
2241
65b4b4e8 2242static int __init ip_tables_init(void)
1da177e4
LT
2243{
2244 int ret;
2245
3cb609d5 2246 ret = register_pernet_subsys(&ip_tables_net_ops);
0eff66e6
PM
2247 if (ret < 0)
2248 goto err1;
2e4e6a17 2249
1da177e4 2250 /* Noone else will be downing sem now, so we won't sleep */
4538506b 2251 ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
0eff66e6
PM
2252 if (ret < 0)
2253 goto err2;
4538506b 2254 ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
0eff66e6
PM
2255 if (ret < 0)
2256 goto err4;
1da177e4
LT
2257
2258 /* Register setsockopt */
2259 ret = nf_register_sockopt(&ipt_sockopts);
0eff66e6
PM
2260 if (ret < 0)
2261 goto err5;
1da177e4 2262
ff67e4e4 2263 pr_info("(C) 2000-2006 Netfilter Core Team\n");
1da177e4 2264 return 0;
0eff66e6
PM
2265
2266err5:
4538506b 2267 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
0eff66e6 2268err4:
4538506b 2269 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
0eff66e6 2270err2:
3cb609d5 2271 unregister_pernet_subsys(&ip_tables_net_ops);
0eff66e6
PM
2272err1:
2273 return ret;
1da177e4
LT
2274}
2275
65b4b4e8 2276static void __exit ip_tables_fini(void)
1da177e4
LT
2277{
2278 nf_unregister_sockopt(&ipt_sockopts);
2e4e6a17 2279
4538506b
JE
2280 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
2281 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
3cb609d5 2282 unregister_pernet_subsys(&ip_tables_net_ops);
1da177e4
LT
2283}
2284
2285EXPORT_SYMBOL(ipt_register_table);
2286EXPORT_SYMBOL(ipt_unregister_table);
1da177e4 2287EXPORT_SYMBOL(ipt_do_table);
65b4b4e8
AM
2288module_init(ip_tables_init);
2289module_exit(ip_tables_fini);
This page took 0.753498 seconds and 5 git commands to generate.