Commit | Line | Data |
---|---|---|
2e4e6a17 HW |
1 | /* |
2 | * x_tables core - Backend for {ip,ip6,arp}_tables | |
3 | * | |
4 | * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org> | |
f229f6ce | 5 | * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net> |
2e4e6a17 HW |
6 | * |
7 | * Based on existing ip_tables code which is | |
8 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling | |
9 | * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify | |
12 | * it under the terms of the GNU General Public License version 2 as | |
13 | * published by the Free Software Foundation. | |
14 | * | |
15 | */ | |
be91fd5e | 16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
2e4e6a17 | 17 | #include <linux/kernel.h> |
3a9a231d | 18 | #include <linux/module.h> |
2e4e6a17 HW |
19 | #include <linux/socket.h> |
20 | #include <linux/net.h> | |
21 | #include <linux/proc_fs.h> | |
22 | #include <linux/seq_file.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/vmalloc.h> | |
9e19bb6d | 25 | #include <linux/mutex.h> |
d7fe0f24 | 26 | #include <linux/mm.h> |
5a0e3ad6 | 27 | #include <linux/slab.h> |
fbabf31e | 28 | #include <linux/audit.h> |
f13f2aee | 29 | #include <linux/user_namespace.h> |
457c4cbc | 30 | #include <net/net_namespace.h> |
2e4e6a17 HW |
31 | |
32 | #include <linux/netfilter/x_tables.h> | |
33 | #include <linux/netfilter_arp.h> | |
e3eaa991 JE |
34 | #include <linux/netfilter_ipv4/ip_tables.h> |
35 | #include <linux/netfilter_ipv6/ip6_tables.h> | |
36 | #include <linux/netfilter_arp/arp_tables.h> | |
9e19bb6d | 37 | |
2e4e6a17 HW |
38 | MODULE_LICENSE("GPL"); |
39 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | |
043ef46c | 40 | MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); |
2e4e6a17 HW |
41 | |
42 | #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) | |
43 | ||
b386d9f5 | 44 | struct compat_delta { |
255d0dc3 ED |
45 | unsigned int offset; /* offset in kernel */ |
46 | int delta; /* delta in 32bit user land */ | |
b386d9f5 PM |
47 | }; |
48 | ||
2e4e6a17 | 49 | struct xt_af { |
9e19bb6d | 50 | struct mutex mutex; |
2e4e6a17 HW |
51 | struct list_head match; |
52 | struct list_head target; | |
b386d9f5 | 53 | #ifdef CONFIG_COMPAT |
2722971c | 54 | struct mutex compat_mutex; |
255d0dc3 ED |
55 | struct compat_delta *compat_tab; |
56 | unsigned int number; /* number of slots in compat_tab[] */ | |
57 | unsigned int cur; /* number of used slots in compat_tab[] */ | |
b386d9f5 | 58 | #endif |
2e4e6a17 HW |
59 | }; |
60 | ||
61 | static struct xt_af *xt; | |
62 | ||
7e9c6eeb JE |
63 | static const char *const xt_prefix[NFPROTO_NUMPROTO] = { |
64 | [NFPROTO_UNSPEC] = "x", | |
65 | [NFPROTO_IPV4] = "ip", | |
66 | [NFPROTO_ARP] = "arp", | |
67 | [NFPROTO_BRIDGE] = "eb", | |
68 | [NFPROTO_IPV6] = "ip6", | |
37f9f733 PM |
69 | }; |
70 | ||
2e4e6a17 | 71 | /* Registration hooks for targets. */ |
7926dbfa | 72 | int xt_register_target(struct xt_target *target) |
2e4e6a17 | 73 | { |
76108cea | 74 | u_int8_t af = target->family; |
2e4e6a17 | 75 | |
7926dbfa | 76 | mutex_lock(&xt[af].mutex); |
2e4e6a17 | 77 | list_add(&target->list, &xt[af].target); |
9e19bb6d | 78 | mutex_unlock(&xt[af].mutex); |
7926dbfa | 79 | return 0; |
2e4e6a17 HW |
80 | } |
81 | EXPORT_SYMBOL(xt_register_target); | |
82 | ||
83 | void | |
a45049c5 | 84 | xt_unregister_target(struct xt_target *target) |
2e4e6a17 | 85 | { |
76108cea | 86 | u_int8_t af = target->family; |
a45049c5 | 87 | |
9e19bb6d | 88 | mutex_lock(&xt[af].mutex); |
df0933dc | 89 | list_del(&target->list); |
9e19bb6d | 90 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
91 | } |
92 | EXPORT_SYMBOL(xt_unregister_target); | |
93 | ||
52d9c42e PM |
94 | int |
95 | xt_register_targets(struct xt_target *target, unsigned int n) | |
96 | { | |
97 | unsigned int i; | |
98 | int err = 0; | |
99 | ||
100 | for (i = 0; i < n; i++) { | |
101 | err = xt_register_target(&target[i]); | |
102 | if (err) | |
103 | goto err; | |
104 | } | |
105 | return err; | |
106 | ||
107 | err: | |
108 | if (i > 0) | |
109 | xt_unregister_targets(target, i); | |
110 | return err; | |
111 | } | |
112 | EXPORT_SYMBOL(xt_register_targets); | |
113 | ||
114 | void | |
115 | xt_unregister_targets(struct xt_target *target, unsigned int n) | |
116 | { | |
f68c5301 CG |
117 | while (n-- > 0) |
118 | xt_unregister_target(&target[n]); | |
52d9c42e PM |
119 | } |
120 | EXPORT_SYMBOL(xt_unregister_targets); | |
121 | ||
7926dbfa | 122 | int xt_register_match(struct xt_match *match) |
2e4e6a17 | 123 | { |
76108cea | 124 | u_int8_t af = match->family; |
2e4e6a17 | 125 | |
7926dbfa | 126 | mutex_lock(&xt[af].mutex); |
2e4e6a17 | 127 | list_add(&match->list, &xt[af].match); |
9e19bb6d | 128 | mutex_unlock(&xt[af].mutex); |
7926dbfa | 129 | return 0; |
2e4e6a17 HW |
130 | } |
131 | EXPORT_SYMBOL(xt_register_match); | |
132 | ||
133 | void | |
a45049c5 | 134 | xt_unregister_match(struct xt_match *match) |
2e4e6a17 | 135 | { |
76108cea | 136 | u_int8_t af = match->family; |
a45049c5 | 137 | |
9e19bb6d | 138 | mutex_lock(&xt[af].mutex); |
df0933dc | 139 | list_del(&match->list); |
9e19bb6d | 140 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
141 | } |
142 | EXPORT_SYMBOL(xt_unregister_match); | |
143 | ||
52d9c42e PM |
144 | int |
145 | xt_register_matches(struct xt_match *match, unsigned int n) | |
146 | { | |
147 | unsigned int i; | |
148 | int err = 0; | |
149 | ||
150 | for (i = 0; i < n; i++) { | |
151 | err = xt_register_match(&match[i]); | |
152 | if (err) | |
153 | goto err; | |
154 | } | |
155 | return err; | |
156 | ||
157 | err: | |
158 | if (i > 0) | |
159 | xt_unregister_matches(match, i); | |
160 | return err; | |
161 | } | |
162 | EXPORT_SYMBOL(xt_register_matches); | |
163 | ||
164 | void | |
165 | xt_unregister_matches(struct xt_match *match, unsigned int n) | |
166 | { | |
f68c5301 CG |
167 | while (n-- > 0) |
168 | xt_unregister_match(&match[n]); | |
52d9c42e PM |
169 | } |
170 | EXPORT_SYMBOL(xt_unregister_matches); | |
171 | ||
2e4e6a17 HW |
172 | |
173 | /* | |
174 | * These are weird, but module loading must not be done with mutex | |
175 | * held (since they will register), and we have to have a single | |
adb00ae2 | 176 | * function to use. |
2e4e6a17 HW |
177 | */ |
178 | ||
179 | /* Find match, grabs ref. Returns ERR_PTR() on error. */ | |
76108cea | 180 | struct xt_match *xt_find_match(u8 af, const char *name, u8 revision) |
2e4e6a17 HW |
181 | { |
182 | struct xt_match *m; | |
42046e2e | 183 | int err = -ENOENT; |
2e4e6a17 | 184 | |
7926dbfa | 185 | mutex_lock(&xt[af].mutex); |
2e4e6a17 HW |
186 | list_for_each_entry(m, &xt[af].match, list) { |
187 | if (strcmp(m->name, name) == 0) { | |
188 | if (m->revision == revision) { | |
189 | if (try_module_get(m->me)) { | |
9e19bb6d | 190 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
191 | return m; |
192 | } | |
193 | } else | |
194 | err = -EPROTOTYPE; /* Found something. */ | |
195 | } | |
196 | } | |
9e19bb6d | 197 | mutex_unlock(&xt[af].mutex); |
55b69e91 JE |
198 | |
199 | if (af != NFPROTO_UNSPEC) | |
200 | /* Try searching again in the family-independent list */ | |
201 | return xt_find_match(NFPROTO_UNSPEC, name, revision); | |
202 | ||
2e4e6a17 HW |
203 | return ERR_PTR(err); |
204 | } | |
205 | EXPORT_SYMBOL(xt_find_match); | |
206 | ||
fd0ec0e6 JE |
207 | struct xt_match * |
208 | xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision) | |
209 | { | |
210 | struct xt_match *match; | |
211 | ||
adb00ae2 SH |
212 | match = xt_find_match(nfproto, name, revision); |
213 | if (IS_ERR(match)) { | |
214 | request_module("%st_%s", xt_prefix[nfproto], name); | |
215 | match = xt_find_match(nfproto, name, revision); | |
216 | } | |
217 | ||
218 | return match; | |
fd0ec0e6 JE |
219 | } |
220 | EXPORT_SYMBOL_GPL(xt_request_find_match); | |
221 | ||
2e4e6a17 | 222 | /* Find target, grabs ref. Returns ERR_PTR() on error. */ |
76108cea | 223 | struct xt_target *xt_find_target(u8 af, const char *name, u8 revision) |
2e4e6a17 HW |
224 | { |
225 | struct xt_target *t; | |
42046e2e | 226 | int err = -ENOENT; |
2e4e6a17 | 227 | |
7926dbfa | 228 | mutex_lock(&xt[af].mutex); |
2e4e6a17 HW |
229 | list_for_each_entry(t, &xt[af].target, list) { |
230 | if (strcmp(t->name, name) == 0) { | |
231 | if (t->revision == revision) { | |
232 | if (try_module_get(t->me)) { | |
9e19bb6d | 233 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
234 | return t; |
235 | } | |
236 | } else | |
237 | err = -EPROTOTYPE; /* Found something. */ | |
238 | } | |
239 | } | |
9e19bb6d | 240 | mutex_unlock(&xt[af].mutex); |
55b69e91 JE |
241 | |
242 | if (af != NFPROTO_UNSPEC) | |
243 | /* Try searching again in the family-independent list */ | |
244 | return xt_find_target(NFPROTO_UNSPEC, name, revision); | |
245 | ||
2e4e6a17 HW |
246 | return ERR_PTR(err); |
247 | } | |
248 | EXPORT_SYMBOL(xt_find_target); | |
249 | ||
76108cea | 250 | struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision) |
2e4e6a17 HW |
251 | { |
252 | struct xt_target *target; | |
253 | ||
adb00ae2 SH |
254 | target = xt_find_target(af, name, revision); |
255 | if (IS_ERR(target)) { | |
256 | request_module("%st_%s", xt_prefix[af], name); | |
257 | target = xt_find_target(af, name, revision); | |
258 | } | |
259 | ||
260 | return target; | |
2e4e6a17 HW |
261 | } |
262 | EXPORT_SYMBOL_GPL(xt_request_find_target); | |
263 | ||
76108cea | 264 | static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) |
2e4e6a17 | 265 | { |
5452e425 | 266 | const struct xt_match *m; |
2e4e6a17 HW |
267 | int have_rev = 0; |
268 | ||
269 | list_for_each_entry(m, &xt[af].match, list) { | |
270 | if (strcmp(m->name, name) == 0) { | |
271 | if (m->revision > *bestp) | |
272 | *bestp = m->revision; | |
273 | if (m->revision == revision) | |
274 | have_rev = 1; | |
275 | } | |
276 | } | |
656caff2 PM |
277 | |
278 | if (af != NFPROTO_UNSPEC && !have_rev) | |
279 | return match_revfn(NFPROTO_UNSPEC, name, revision, bestp); | |
280 | ||
2e4e6a17 HW |
281 | return have_rev; |
282 | } | |
283 | ||
76108cea | 284 | static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) |
2e4e6a17 | 285 | { |
5452e425 | 286 | const struct xt_target *t; |
2e4e6a17 HW |
287 | int have_rev = 0; |
288 | ||
289 | list_for_each_entry(t, &xt[af].target, list) { | |
290 | if (strcmp(t->name, name) == 0) { | |
291 | if (t->revision > *bestp) | |
292 | *bestp = t->revision; | |
293 | if (t->revision == revision) | |
294 | have_rev = 1; | |
295 | } | |
296 | } | |
656caff2 PM |
297 | |
298 | if (af != NFPROTO_UNSPEC && !have_rev) | |
299 | return target_revfn(NFPROTO_UNSPEC, name, revision, bestp); | |
300 | ||
2e4e6a17 HW |
301 | return have_rev; |
302 | } | |
303 | ||
304 | /* Returns true or false (if no such extension at all) */ | |
76108cea | 305 | int xt_find_revision(u8 af, const char *name, u8 revision, int target, |
2e4e6a17 HW |
306 | int *err) |
307 | { | |
308 | int have_rev, best = -1; | |
309 | ||
7926dbfa | 310 | mutex_lock(&xt[af].mutex); |
2e4e6a17 HW |
311 | if (target == 1) |
312 | have_rev = target_revfn(af, name, revision, &best); | |
313 | else | |
314 | have_rev = match_revfn(af, name, revision, &best); | |
9e19bb6d | 315 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
316 | |
317 | /* Nothing at all? Return 0 to try loading module. */ | |
318 | if (best == -1) { | |
319 | *err = -ENOENT; | |
320 | return 0; | |
321 | } | |
322 | ||
323 | *err = best; | |
324 | if (!have_rev) | |
325 | *err = -EPROTONOSUPPORT; | |
326 | return 1; | |
327 | } | |
328 | EXPORT_SYMBOL_GPL(xt_find_revision); | |
329 | ||
5b76c494 JE |
330 | static char * |
331 | textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto) | |
45185364 | 332 | { |
5b76c494 | 333 | static const char *const inetbr_names[] = { |
45185364 JE |
334 | "PREROUTING", "INPUT", "FORWARD", |
335 | "OUTPUT", "POSTROUTING", "BROUTING", | |
336 | }; | |
5b76c494 JE |
337 | static const char *const arp_names[] = { |
338 | "INPUT", "FORWARD", "OUTPUT", | |
339 | }; | |
340 | const char *const *names; | |
341 | unsigned int i, max; | |
45185364 JE |
342 | char *p = buf; |
343 | bool np = false; | |
344 | int res; | |
345 | ||
5b76c494 JE |
346 | names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names; |
347 | max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) : | |
348 | ARRAY_SIZE(inetbr_names); | |
45185364 | 349 | *p = '\0'; |
5b76c494 | 350 | for (i = 0; i < max; ++i) { |
45185364 JE |
351 | if (!(mask & (1 << i))) |
352 | continue; | |
353 | res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]); | |
354 | if (res > 0) { | |
355 | size -= res; | |
356 | p += res; | |
357 | } | |
358 | np = true; | |
359 | } | |
360 | ||
361 | return buf; | |
362 | } | |
363 | ||
916a917d | 364 | int xt_check_match(struct xt_mtchk_param *par, |
9b4fce7a | 365 | unsigned int size, u_int8_t proto, bool inv_proto) |
37f9f733 | 366 | { |
bd414ee6 JE |
367 | int ret; |
368 | ||
9b4fce7a JE |
369 | if (XT_ALIGN(par->match->matchsize) != size && |
370 | par->match->matchsize != -1) { | |
043ef46c JE |
371 | /* |
372 | * ebt_among is exempt from centralized matchsize checking | |
373 | * because it uses a dynamic-size data set. | |
374 | */ | |
b402405d JE |
375 | pr_err("%s_tables: %s.%u match: invalid size " |
376 | "%u (kernel) != (user) %u\n", | |
916a917d | 377 | xt_prefix[par->family], par->match->name, |
b402405d | 378 | par->match->revision, |
9b4fce7a | 379 | XT_ALIGN(par->match->matchsize), size); |
37f9f733 PM |
380 | return -EINVAL; |
381 | } | |
9b4fce7a JE |
382 | if (par->match->table != NULL && |
383 | strcmp(par->match->table, par->table) != 0) { | |
3dd5d7e3 | 384 | pr_err("%s_tables: %s match: only valid in %s table, not %s\n", |
916a917d | 385 | xt_prefix[par->family], par->match->name, |
9b4fce7a | 386 | par->match->table, par->table); |
37f9f733 PM |
387 | return -EINVAL; |
388 | } | |
9b4fce7a | 389 | if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) { |
45185364 JE |
390 | char used[64], allow[64]; |
391 | ||
3dd5d7e3 | 392 | pr_err("%s_tables: %s match: used from hooks %s, but only " |
45185364 | 393 | "valid from %s\n", |
916a917d | 394 | xt_prefix[par->family], par->match->name, |
5b76c494 JE |
395 | textify_hooks(used, sizeof(used), par->hook_mask, |
396 | par->family), | |
397 | textify_hooks(allow, sizeof(allow), par->match->hooks, | |
398 | par->family)); | |
37f9f733 PM |
399 | return -EINVAL; |
400 | } | |
9b4fce7a | 401 | if (par->match->proto && (par->match->proto != proto || inv_proto)) { |
3dd5d7e3 | 402 | pr_err("%s_tables: %s match: only valid for protocol %u\n", |
916a917d JE |
403 | xt_prefix[par->family], par->match->name, |
404 | par->match->proto); | |
37f9f733 PM |
405 | return -EINVAL; |
406 | } | |
bd414ee6 JE |
407 | if (par->match->checkentry != NULL) { |
408 | ret = par->match->checkentry(par); | |
409 | if (ret < 0) | |
410 | return ret; | |
411 | else if (ret > 0) | |
412 | /* Flag up potential errors. */ | |
413 | return -EIO; | |
414 | } | |
37f9f733 PM |
415 | return 0; |
416 | } | |
417 | EXPORT_SYMBOL_GPL(xt_check_match); | |
418 | ||
13631bfc FW |
419 | /** xt_check_entry_match - check that matches end before start of target |
420 | * | |
421 | * @match: beginning of xt_entry_match | |
422 | * @target: beginning of this rules target (alleged end of matches) | |
423 | * @alignment: alignment requirement of match structures | |
424 | * | |
425 | * Validates that all matches add up to the beginning of the target, | |
426 | * and that each match covers at least the base structure size. | |
427 | * | |
428 | * Return: 0 on success, negative errno on failure. | |
429 | */ | |
430 | static int xt_check_entry_match(const char *match, const char *target, | |
431 | const size_t alignment) | |
432 | { | |
433 | const struct xt_entry_match *pos; | |
434 | int length = target - match; | |
435 | ||
436 | if (length == 0) /* no matches */ | |
437 | return 0; | |
438 | ||
439 | pos = (struct xt_entry_match *)match; | |
440 | do { | |
441 | if ((unsigned long)pos % alignment) | |
442 | return -EINVAL; | |
443 | ||
444 | if (length < (int)sizeof(struct xt_entry_match)) | |
445 | return -EINVAL; | |
446 | ||
447 | if (pos->u.match_size < sizeof(struct xt_entry_match)) | |
448 | return -EINVAL; | |
449 | ||
450 | if (pos->u.match_size > length) | |
451 | return -EINVAL; | |
452 | ||
453 | length -= pos->u.match_size; | |
454 | pos = ((void *)((char *)(pos) + (pos)->u.match_size)); | |
455 | } while (length > 0); | |
456 | ||
457 | return 0; | |
458 | } | |
459 | ||
2722971c | 460 | #ifdef CONFIG_COMPAT |
255d0dc3 | 461 | int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta) |
b386d9f5 | 462 | { |
255d0dc3 | 463 | struct xt_af *xp = &xt[af]; |
b386d9f5 | 464 | |
255d0dc3 ED |
465 | if (!xp->compat_tab) { |
466 | if (!xp->number) | |
467 | return -EINVAL; | |
468 | xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number); | |
469 | if (!xp->compat_tab) | |
470 | return -ENOMEM; | |
471 | xp->cur = 0; | |
472 | } | |
b386d9f5 | 473 | |
255d0dc3 ED |
474 | if (xp->cur >= xp->number) |
475 | return -EINVAL; | |
b386d9f5 | 476 | |
255d0dc3 ED |
477 | if (xp->cur) |
478 | delta += xp->compat_tab[xp->cur - 1].delta; | |
479 | xp->compat_tab[xp->cur].offset = offset; | |
480 | xp->compat_tab[xp->cur].delta = delta; | |
481 | xp->cur++; | |
b386d9f5 PM |
482 | return 0; |
483 | } | |
484 | EXPORT_SYMBOL_GPL(xt_compat_add_offset); | |
485 | ||
76108cea | 486 | void xt_compat_flush_offsets(u_int8_t af) |
b386d9f5 | 487 | { |
255d0dc3 ED |
488 | if (xt[af].compat_tab) { |
489 | vfree(xt[af].compat_tab); | |
490 | xt[af].compat_tab = NULL; | |
491 | xt[af].number = 0; | |
5a6351ee | 492 | xt[af].cur = 0; |
b386d9f5 PM |
493 | } |
494 | } | |
495 | EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); | |
496 | ||
3e5e524f | 497 | int xt_compat_calc_jump(u_int8_t af, unsigned int offset) |
b386d9f5 | 498 | { |
255d0dc3 ED |
499 | struct compat_delta *tmp = xt[af].compat_tab; |
500 | int mid, left = 0, right = xt[af].cur - 1; | |
501 | ||
502 | while (left <= right) { | |
503 | mid = (left + right) >> 1; | |
504 | if (offset > tmp[mid].offset) | |
505 | left = mid + 1; | |
506 | else if (offset < tmp[mid].offset) | |
507 | right = mid - 1; | |
508 | else | |
509 | return mid ? tmp[mid - 1].delta : 0; | |
510 | } | |
5a6351ee | 511 | return left ? tmp[left - 1].delta : 0; |
b386d9f5 PM |
512 | } |
513 | EXPORT_SYMBOL_GPL(xt_compat_calc_jump); | |
514 | ||
255d0dc3 ED |
515 | void xt_compat_init_offsets(u_int8_t af, unsigned int number) |
516 | { | |
517 | xt[af].number = number; | |
518 | xt[af].cur = 0; | |
519 | } | |
520 | EXPORT_SYMBOL(xt_compat_init_offsets); | |
521 | ||
5452e425 | 522 | int xt_compat_match_offset(const struct xt_match *match) |
2722971c | 523 | { |
9fa492cd PM |
524 | u_int16_t csize = match->compatsize ? : match->matchsize; |
525 | return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize); | |
526 | } | |
527 | EXPORT_SYMBOL_GPL(xt_compat_match_offset); | |
528 | ||
0188346f FW |
529 | void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, |
530 | unsigned int *size) | |
9fa492cd | 531 | { |
5452e425 | 532 | const struct xt_match *match = m->u.kernel.match; |
9fa492cd PM |
533 | struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; |
534 | int pad, off = xt_compat_match_offset(match); | |
535 | u_int16_t msize = cm->u.user.match_size; | |
09d96860 | 536 | char name[sizeof(m->u.user.name)]; |
9fa492cd PM |
537 | |
538 | m = *dstptr; | |
539 | memcpy(m, cm, sizeof(*cm)); | |
540 | if (match->compat_from_user) | |
541 | match->compat_from_user(m->data, cm->data); | |
542 | else | |
543 | memcpy(m->data, cm->data, msize - sizeof(*cm)); | |
544 | pad = XT_ALIGN(match->matchsize) - match->matchsize; | |
545 | if (pad > 0) | |
546 | memset(m->data + match->matchsize, 0, pad); | |
547 | ||
548 | msize += off; | |
549 | m->u.user.match_size = msize; | |
09d96860 FW |
550 | strlcpy(name, match->name, sizeof(name)); |
551 | module_put(match->me); | |
552 | strncpy(m->u.user.name, name, sizeof(m->u.user.name)); | |
9fa492cd PM |
553 | |
554 | *size += off; | |
555 | *dstptr += msize; | |
556 | } | |
557 | EXPORT_SYMBOL_GPL(xt_compat_match_from_user); | |
558 | ||
739674fb JE |
559 | int xt_compat_match_to_user(const struct xt_entry_match *m, |
560 | void __user **dstptr, unsigned int *size) | |
9fa492cd | 561 | { |
5452e425 | 562 | const struct xt_match *match = m->u.kernel.match; |
9fa492cd PM |
563 | struct compat_xt_entry_match __user *cm = *dstptr; |
564 | int off = xt_compat_match_offset(match); | |
565 | u_int16_t msize = m->u.user.match_size - off; | |
566 | ||
567 | if (copy_to_user(cm, m, sizeof(*cm)) || | |
a18aa31b PM |
568 | put_user(msize, &cm->u.user.match_size) || |
569 | copy_to_user(cm->u.user.name, m->u.kernel.match->name, | |
570 | strlen(m->u.kernel.match->name) + 1)) | |
601e68e1 | 571 | return -EFAULT; |
9fa492cd PM |
572 | |
573 | if (match->compat_to_user) { | |
574 | if (match->compat_to_user((void __user *)cm->data, m->data)) | |
575 | return -EFAULT; | |
576 | } else { | |
577 | if (copy_to_user(cm->data, m->data, msize - sizeof(*cm))) | |
578 | return -EFAULT; | |
2722971c | 579 | } |
9fa492cd PM |
580 | |
581 | *size -= off; | |
582 | *dstptr += msize; | |
583 | return 0; | |
2722971c | 584 | } |
9fa492cd | 585 | EXPORT_SYMBOL_GPL(xt_compat_match_to_user); |
fc1221b3 | 586 | |
7ed2abdd FW |
587 | /* non-compat version may have padding after verdict */ |
588 | struct compat_xt_standard_target { | |
589 | struct compat_xt_entry_target t; | |
590 | compat_uint_t verdict; | |
591 | }; | |
592 | ||
ce683e5f | 593 | int xt_compat_check_entry_offsets(const void *base, const char *elems, |
fc1221b3 FW |
594 | unsigned int target_offset, |
595 | unsigned int next_offset) | |
596 | { | |
ce683e5f | 597 | long size_of_base_struct = elems - (const char *)base; |
fc1221b3 FW |
598 | const struct compat_xt_entry_target *t; |
599 | const char *e = base; | |
600 | ||
ce683e5f FW |
601 | if (target_offset < size_of_base_struct) |
602 | return -EINVAL; | |
603 | ||
fc1221b3 FW |
604 | if (target_offset + sizeof(*t) > next_offset) |
605 | return -EINVAL; | |
606 | ||
607 | t = (void *)(e + target_offset); | |
608 | if (t->u.target_size < sizeof(*t)) | |
609 | return -EINVAL; | |
610 | ||
611 | if (target_offset + t->u.target_size > next_offset) | |
612 | return -EINVAL; | |
613 | ||
7ed2abdd | 614 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && |
7b7eba0f | 615 | COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset) |
7ed2abdd FW |
616 | return -EINVAL; |
617 | ||
13631bfc FW |
618 | /* compat_xt_entry match has less strict aligment requirements, |
619 | * otherwise they are identical. In case of padding differences | |
620 | * we need to add compat version of xt_check_entry_match. | |
621 | */ | |
622 | BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match)); | |
623 | ||
624 | return xt_check_entry_match(elems, base + target_offset, | |
625 | __alignof__(struct compat_xt_entry_match)); | |
fc1221b3 FW |
626 | } |
627 | EXPORT_SYMBOL(xt_compat_check_entry_offsets); | |
9fa492cd | 628 | #endif /* CONFIG_COMPAT */ |
2722971c | 629 | |
7d35812c FW |
630 | /** |
631 | * xt_check_entry_offsets - validate arp/ip/ip6t_entry | |
632 | * | |
633 | * @base: pointer to arp/ip/ip6t_entry | |
ce683e5f | 634 | * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems |
7d35812c FW |
635 | * @target_offset: the arp/ip/ip6_t->target_offset |
636 | * @next_offset: the arp/ip/ip6_t->next_offset | |
637 | * | |
13631bfc FW |
638 | * validates that target_offset and next_offset are sane and that all |
639 | * match sizes (if any) align with the target offset. | |
7d35812c | 640 | * |
ce683e5f | 641 | * This function does not validate the targets or matches themselves, it |
13631bfc FW |
642 | * only tests that all the offsets and sizes are correct, that all |
643 | * match structures are aligned, and that the last structure ends where | |
644 | * the target structure begins. | |
645 | * | |
646 | * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version. | |
ce683e5f | 647 | * |
7d35812c FW |
648 | * The arp/ip/ip6t_entry structure @base must have passed following tests: |
649 | * - it must point to a valid memory location | |
650 | * - base to base + next_offset must be accessible, i.e. not exceed allocated | |
651 | * length. | |
652 | * | |
13631bfc FW |
653 | * A well-formed entry looks like this: |
654 | * | |
655 | * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry | |
656 | * e->elems[]-----' | | | |
657 | * matchsize | | | |
658 | * matchsize | | | |
659 | * | | | |
660 | * target_offset---------------------------------' | | |
661 | * next_offset---------------------------------------------------' | |
662 | * | |
663 | * elems[]: flexible array member at end of ip(6)/arpt_entry struct. | |
664 | * This is where matches (if any) and the target reside. | |
665 | * target_offset: beginning of target. | |
666 | * next_offset: start of the next rule; also: size of this rule. | |
667 | * Since targets have a minimum size, target_offset + minlen <= next_offset. | |
668 | * | |
669 | * Every match stores its size, sum of sizes must not exceed target_offset. | |
670 | * | |
7d35812c FW |
671 | * Return: 0 on success, negative errno on failure. |
672 | */ | |
673 | int xt_check_entry_offsets(const void *base, | |
ce683e5f | 674 | const char *elems, |
7d35812c FW |
675 | unsigned int target_offset, |
676 | unsigned int next_offset) | |
677 | { | |
ce683e5f | 678 | long size_of_base_struct = elems - (const char *)base; |
7d35812c FW |
679 | const struct xt_entry_target *t; |
680 | const char *e = base; | |
681 | ||
ce683e5f FW |
682 | /* target start is within the ip/ip6/arpt_entry struct */ |
683 | if (target_offset < size_of_base_struct) | |
684 | return -EINVAL; | |
685 | ||
7d35812c FW |
686 | if (target_offset + sizeof(*t) > next_offset) |
687 | return -EINVAL; | |
688 | ||
689 | t = (void *)(e + target_offset); | |
a08e4e19 FW |
690 | if (t->u.target_size < sizeof(*t)) |
691 | return -EINVAL; | |
692 | ||
7d35812c FW |
693 | if (target_offset + t->u.target_size > next_offset) |
694 | return -EINVAL; | |
695 | ||
7ed2abdd | 696 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && |
7b7eba0f | 697 | XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset) |
7ed2abdd FW |
698 | return -EINVAL; |
699 | ||
13631bfc FW |
700 | return xt_check_entry_match(elems, base + target_offset, |
701 | __alignof__(struct xt_entry_match)); | |
7d35812c FW |
702 | } |
703 | EXPORT_SYMBOL(xt_check_entry_offsets); | |
704 | ||
f4dc7771 FW |
705 | /** |
706 | * xt_alloc_entry_offsets - allocate array to store rule head offsets | |
707 | * | |
708 | * @size: number of entries | |
709 | * | |
710 | * Return: NULL or kmalloc'd or vmalloc'd array | |
711 | */ | |
712 | unsigned int *xt_alloc_entry_offsets(unsigned int size) | |
713 | { | |
714 | unsigned int *off; | |
715 | ||
716 | off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN); | |
717 | ||
718 | if (off) | |
719 | return off; | |
720 | ||
721 | if (size < (SIZE_MAX / sizeof(unsigned int))) | |
722 | off = vmalloc(size * sizeof(unsigned int)); | |
723 | ||
724 | return off; | |
725 | } | |
726 | EXPORT_SYMBOL(xt_alloc_entry_offsets); | |
727 | ||
728 | /** | |
729 | * xt_find_jump_offset - check if target is a valid jump offset | |
730 | * | |
731 | * @offsets: array containing all valid rule start offsets of a rule blob | |
732 | * @target: the jump target to search for | |
733 | * @size: entries in @offset | |
734 | */ | |
735 | bool xt_find_jump_offset(const unsigned int *offsets, | |
736 | unsigned int target, unsigned int size) | |
737 | { | |
738 | int m, low = 0, hi = size; | |
739 | ||
740 | while (hi > low) { | |
741 | m = (low + hi) / 2u; | |
742 | ||
743 | if (offsets[m] > target) | |
744 | hi = m; | |
745 | else if (offsets[m] < target) | |
746 | low = m + 1; | |
747 | else | |
748 | return true; | |
749 | } | |
750 | ||
751 | return false; | |
752 | } | |
753 | EXPORT_SYMBOL(xt_find_jump_offset); | |
754 | ||
916a917d | 755 | int xt_check_target(struct xt_tgchk_param *par, |
af5d6dc2 | 756 | unsigned int size, u_int8_t proto, bool inv_proto) |
37f9f733 | 757 | { |
d6b00a53 JE |
758 | int ret; |
759 | ||
af5d6dc2 | 760 | if (XT_ALIGN(par->target->targetsize) != size) { |
b402405d JE |
761 | pr_err("%s_tables: %s.%u target: invalid size " |
762 | "%u (kernel) != (user) %u\n", | |
916a917d | 763 | xt_prefix[par->family], par->target->name, |
b402405d | 764 | par->target->revision, |
af5d6dc2 | 765 | XT_ALIGN(par->target->targetsize), size); |
37f9f733 PM |
766 | return -EINVAL; |
767 | } | |
af5d6dc2 JE |
768 | if (par->target->table != NULL && |
769 | strcmp(par->target->table, par->table) != 0) { | |
3dd5d7e3 | 770 | pr_err("%s_tables: %s target: only valid in %s table, not %s\n", |
916a917d | 771 | xt_prefix[par->family], par->target->name, |
af5d6dc2 | 772 | par->target->table, par->table); |
37f9f733 PM |
773 | return -EINVAL; |
774 | } | |
af5d6dc2 | 775 | if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { |
45185364 JE |
776 | char used[64], allow[64]; |
777 | ||
3dd5d7e3 | 778 | pr_err("%s_tables: %s target: used from hooks %s, but only " |
45185364 | 779 | "usable from %s\n", |
916a917d | 780 | xt_prefix[par->family], par->target->name, |
5b76c494 JE |
781 | textify_hooks(used, sizeof(used), par->hook_mask, |
782 | par->family), | |
783 | textify_hooks(allow, sizeof(allow), par->target->hooks, | |
784 | par->family)); | |
37f9f733 PM |
785 | return -EINVAL; |
786 | } | |
af5d6dc2 | 787 | if (par->target->proto && (par->target->proto != proto || inv_proto)) { |
3dd5d7e3 | 788 | pr_err("%s_tables: %s target: only valid for protocol %u\n", |
916a917d | 789 | xt_prefix[par->family], par->target->name, |
af5d6dc2 | 790 | par->target->proto); |
37f9f733 PM |
791 | return -EINVAL; |
792 | } | |
d6b00a53 JE |
793 | if (par->target->checkentry != NULL) { |
794 | ret = par->target->checkentry(par); | |
795 | if (ret < 0) | |
796 | return ret; | |
797 | else if (ret > 0) | |
798 | /* Flag up potential errors. */ | |
799 | return -EIO; | |
800 | } | |
37f9f733 PM |
801 | return 0; |
802 | } | |
803 | EXPORT_SYMBOL_GPL(xt_check_target); | |
804 | ||
d7591f0c FW |
805 | /** |
806 | * xt_copy_counters_from_user - copy counters and metadata from userspace | |
807 | * | |
808 | * @user: src pointer to userspace memory | |
809 | * @len: alleged size of userspace memory | |
810 | * @info: where to store the xt_counters_info metadata | |
811 | * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel | |
812 | * | |
813 | * Copies counter meta data from @user and stores it in @info. | |
814 | * | |
815 | * vmallocs memory to hold the counters, then copies the counter data | |
816 | * from @user to the new memory and returns a pointer to it. | |
817 | * | |
818 | * If @compat is true, @info gets converted automatically to the 64bit | |
819 | * representation. | |
820 | * | |
821 | * The metadata associated with the counters is stored in @info. | |
822 | * | |
823 | * Return: returns pointer that caller has to test via IS_ERR(). | |
824 | * If IS_ERR is false, caller has to vfree the pointer. | |
825 | */ | |
826 | void *xt_copy_counters_from_user(const void __user *user, unsigned int len, | |
827 | struct xt_counters_info *info, bool compat) | |
828 | { | |
829 | void *mem; | |
830 | u64 size; | |
831 | ||
832 | #ifdef CONFIG_COMPAT | |
833 | if (compat) { | |
834 | /* structures only differ in size due to alignment */ | |
835 | struct compat_xt_counters_info compat_tmp; | |
836 | ||
837 | if (len <= sizeof(compat_tmp)) | |
838 | return ERR_PTR(-EINVAL); | |
839 | ||
840 | len -= sizeof(compat_tmp); | |
841 | if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0) | |
842 | return ERR_PTR(-EFAULT); | |
843 | ||
844 | strlcpy(info->name, compat_tmp.name, sizeof(info->name)); | |
845 | info->num_counters = compat_tmp.num_counters; | |
846 | user += sizeof(compat_tmp); | |
847 | } else | |
848 | #endif | |
849 | { | |
850 | if (len <= sizeof(*info)) | |
851 | return ERR_PTR(-EINVAL); | |
852 | ||
853 | len -= sizeof(*info); | |
854 | if (copy_from_user(info, user, sizeof(*info)) != 0) | |
855 | return ERR_PTR(-EFAULT); | |
856 | ||
857 | info->name[sizeof(info->name) - 1] = '\0'; | |
858 | user += sizeof(*info); | |
859 | } | |
860 | ||
861 | size = sizeof(struct xt_counters); | |
862 | size *= info->num_counters; | |
863 | ||
864 | if (size != (u64)len) | |
865 | return ERR_PTR(-EINVAL); | |
866 | ||
867 | mem = vmalloc(len); | |
868 | if (!mem) | |
869 | return ERR_PTR(-ENOMEM); | |
870 | ||
871 | if (copy_from_user(mem, user, len) == 0) | |
872 | return mem; | |
873 | ||
874 | vfree(mem); | |
875 | return ERR_PTR(-EFAULT); | |
876 | } | |
877 | EXPORT_SYMBOL_GPL(xt_copy_counters_from_user); | |
878 | ||
2722971c | 879 | #ifdef CONFIG_COMPAT |
5452e425 | 880 | int xt_compat_target_offset(const struct xt_target *target) |
2722971c | 881 | { |
9fa492cd PM |
882 | u_int16_t csize = target->compatsize ? : target->targetsize; |
883 | return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize); | |
884 | } | |
885 | EXPORT_SYMBOL_GPL(xt_compat_target_offset); | |
886 | ||
887 | void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, | |
b0a6363c | 888 | unsigned int *size) |
9fa492cd | 889 | { |
5452e425 | 890 | const struct xt_target *target = t->u.kernel.target; |
9fa492cd PM |
891 | struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; |
892 | int pad, off = xt_compat_target_offset(target); | |
893 | u_int16_t tsize = ct->u.user.target_size; | |
09d96860 | 894 | char name[sizeof(t->u.user.name)]; |
9fa492cd PM |
895 | |
896 | t = *dstptr; | |
897 | memcpy(t, ct, sizeof(*ct)); | |
898 | if (target->compat_from_user) | |
899 | target->compat_from_user(t->data, ct->data); | |
900 | else | |
901 | memcpy(t->data, ct->data, tsize - sizeof(*ct)); | |
902 | pad = XT_ALIGN(target->targetsize) - target->targetsize; | |
903 | if (pad > 0) | |
904 | memset(t->data + target->targetsize, 0, pad); | |
905 | ||
906 | tsize += off; | |
907 | t->u.user.target_size = tsize; | |
09d96860 FW |
908 | strlcpy(name, target->name, sizeof(name)); |
909 | module_put(target->me); | |
910 | strncpy(t->u.user.name, name, sizeof(t->u.user.name)); | |
9fa492cd PM |
911 | |
912 | *size += off; | |
913 | *dstptr += tsize; | |
914 | } | |
915 | EXPORT_SYMBOL_GPL(xt_compat_target_from_user); | |
916 | ||
739674fb JE |
917 | int xt_compat_target_to_user(const struct xt_entry_target *t, |
918 | void __user **dstptr, unsigned int *size) | |
9fa492cd | 919 | { |
5452e425 | 920 | const struct xt_target *target = t->u.kernel.target; |
9fa492cd PM |
921 | struct compat_xt_entry_target __user *ct = *dstptr; |
922 | int off = xt_compat_target_offset(target); | |
923 | u_int16_t tsize = t->u.user.target_size - off; | |
924 | ||
925 | if (copy_to_user(ct, t, sizeof(*ct)) || | |
a18aa31b PM |
926 | put_user(tsize, &ct->u.user.target_size) || |
927 | copy_to_user(ct->u.user.name, t->u.kernel.target->name, | |
928 | strlen(t->u.kernel.target->name) + 1)) | |
601e68e1 | 929 | return -EFAULT; |
9fa492cd PM |
930 | |
931 | if (target->compat_to_user) { | |
932 | if (target->compat_to_user((void __user *)ct->data, t->data)) | |
933 | return -EFAULT; | |
934 | } else { | |
935 | if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct))) | |
936 | return -EFAULT; | |
2722971c | 937 | } |
9fa492cd PM |
938 | |
939 | *size -= off; | |
940 | *dstptr += tsize; | |
941 | return 0; | |
2722971c | 942 | } |
9fa492cd | 943 | EXPORT_SYMBOL_GPL(xt_compat_target_to_user); |
2722971c DM |
944 | #endif |
945 | ||
2e4e6a17 HW |
946 | struct xt_table_info *xt_alloc_table_info(unsigned int size) |
947 | { | |
711bdde6 ED |
948 | struct xt_table_info *info = NULL; |
949 | size_t sz = sizeof(*info) + size; | |
2e4e6a17 | 950 | |
d157bd76 FW |
951 | if (sz < sizeof(*info)) |
952 | return NULL; | |
953 | ||
2e4e6a17 | 954 | /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ |
4481374c | 955 | if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages) |
2e4e6a17 HW |
956 | return NULL; |
957 | ||
711bdde6 ED |
958 | if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) |
959 | info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); | |
960 | if (!info) { | |
961 | info = vmalloc(sz); | |
962 | if (!info) | |
963 | return NULL; | |
2e4e6a17 | 964 | } |
711bdde6 ED |
965 | memset(info, 0, sizeof(*info)); |
966 | info->size = size; | |
967 | return info; | |
2e4e6a17 HW |
968 | } |
969 | EXPORT_SYMBOL(xt_alloc_table_info); | |
970 | ||
971 | void xt_free_table_info(struct xt_table_info *info) | |
972 | { | |
973 | int cpu; | |
974 | ||
f3c5c1bf | 975 | if (info->jumpstack != NULL) { |
f6b50824 ED |
976 | for_each_possible_cpu(cpu) |
977 | kvfree(info->jumpstack[cpu]); | |
978 | kvfree(info->jumpstack); | |
f3c5c1bf JE |
979 | } |
980 | ||
711bdde6 | 981 | kvfree(info); |
2e4e6a17 HW |
982 | } |
983 | EXPORT_SYMBOL(xt_free_table_info); | |
984 | ||
985 | /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */ | |
76108cea JE |
986 | struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, |
987 | const char *name) | |
2e4e6a17 | 988 | { |
b9e69e12 | 989 | struct xt_table *t, *found = NULL; |
2e4e6a17 | 990 | |
7926dbfa | 991 | mutex_lock(&xt[af].mutex); |
8d870052 | 992 | list_for_each_entry(t, &net->xt.tables[af], list) |
2e4e6a17 HW |
993 | if (strcmp(t->name, name) == 0 && try_module_get(t->me)) |
994 | return t; | |
b9e69e12 FW |
995 | |
996 | if (net == &init_net) | |
997 | goto out; | |
998 | ||
999 | /* Table doesn't exist in this netns, re-try init */ | |
1000 | list_for_each_entry(t, &init_net.xt.tables[af], list) { | |
1001 | if (strcmp(t->name, name)) | |
1002 | continue; | |
1003 | if (!try_module_get(t->me)) | |
1004 | return NULL; | |
1005 | ||
1006 | mutex_unlock(&xt[af].mutex); | |
1007 | if (t->table_init(net) != 0) { | |
1008 | module_put(t->me); | |
1009 | return NULL; | |
1010 | } | |
1011 | ||
1012 | found = t; | |
1013 | ||
1014 | mutex_lock(&xt[af].mutex); | |
1015 | break; | |
1016 | } | |
1017 | ||
1018 | if (!found) | |
1019 | goto out; | |
1020 | ||
1021 | /* and once again: */ | |
1022 | list_for_each_entry(t, &net->xt.tables[af], list) | |
1023 | if (strcmp(t->name, name) == 0) | |
1024 | return t; | |
1025 | ||
1026 | module_put(found->me); | |
1027 | out: | |
9e19bb6d | 1028 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
1029 | return NULL; |
1030 | } | |
1031 | EXPORT_SYMBOL_GPL(xt_find_table_lock); | |
1032 | ||
1033 | void xt_table_unlock(struct xt_table *table) | |
1034 | { | |
9e19bb6d | 1035 | mutex_unlock(&xt[table->af].mutex); |
2e4e6a17 HW |
1036 | } |
1037 | EXPORT_SYMBOL_GPL(xt_table_unlock); | |
1038 | ||
2722971c | 1039 | #ifdef CONFIG_COMPAT |
76108cea | 1040 | void xt_compat_lock(u_int8_t af) |
2722971c DM |
1041 | { |
1042 | mutex_lock(&xt[af].compat_mutex); | |
1043 | } | |
1044 | EXPORT_SYMBOL_GPL(xt_compat_lock); | |
1045 | ||
76108cea | 1046 | void xt_compat_unlock(u_int8_t af) |
2722971c DM |
1047 | { |
1048 | mutex_unlock(&xt[af].compat_mutex); | |
1049 | } | |
1050 | EXPORT_SYMBOL_GPL(xt_compat_unlock); | |
1051 | #endif | |
2e4e6a17 | 1052 | |
7f5c6d4f ED |
1053 | DEFINE_PER_CPU(seqcount_t, xt_recseq); |
1054 | EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq); | |
942e4a2b | 1055 | |
dcebd315 FW |
1056 | struct static_key xt_tee_enabled __read_mostly; |
1057 | EXPORT_SYMBOL_GPL(xt_tee_enabled); | |
1058 | ||
f3c5c1bf JE |
1059 | static int xt_jumpstack_alloc(struct xt_table_info *i) |
1060 | { | |
1061 | unsigned int size; | |
1062 | int cpu; | |
1063 | ||
f3c5c1bf JE |
1064 | size = sizeof(void **) * nr_cpu_ids; |
1065 | if (size > PAGE_SIZE) | |
3dbd4439 | 1066 | i->jumpstack = vzalloc(size); |
f3c5c1bf | 1067 | else |
3dbd4439 | 1068 | i->jumpstack = kzalloc(size, GFP_KERNEL); |
f3c5c1bf JE |
1069 | if (i->jumpstack == NULL) |
1070 | return -ENOMEM; | |
f3c5c1bf | 1071 | |
98d1bd80 FW |
1072 | /* ruleset without jumps -- no stack needed */ |
1073 | if (i->stacksize == 0) | |
1074 | return 0; | |
1075 | ||
7814b6ec FW |
1076 | /* Jumpstack needs to be able to record two full callchains, one |
1077 | * from the first rule set traversal, plus one table reentrancy | |
1078 | * via -j TEE without clobbering the callchain that brought us to | |
1079 | * TEE target. | |
1080 | * | |
1081 | * This is done by allocating two jumpstacks per cpu, on reentry | |
1082 | * the upper half of the stack is used. | |
1083 | * | |
1084 | * see the jumpstack setup in ipt_do_table() for more details. | |
1085 | */ | |
1086 | size = sizeof(void *) * i->stacksize * 2u; | |
f3c5c1bf JE |
1087 | for_each_possible_cpu(cpu) { |
1088 | if (size > PAGE_SIZE) | |
1089 | i->jumpstack[cpu] = vmalloc_node(size, | |
1090 | cpu_to_node(cpu)); | |
1091 | else | |
1092 | i->jumpstack[cpu] = kmalloc_node(size, | |
1093 | GFP_KERNEL, cpu_to_node(cpu)); | |
1094 | if (i->jumpstack[cpu] == NULL) | |
1095 | /* | |
1096 | * Freeing will be done later on by the callers. The | |
1097 | * chain is: xt_replace_table -> __do_replace -> | |
1098 | * do_replace -> xt_free_table_info. | |
1099 | */ | |
1100 | return -ENOMEM; | |
1101 | } | |
1102 | ||
1103 | return 0; | |
1104 | } | |
942e4a2b | 1105 | |
2e4e6a17 HW |
1106 | struct xt_table_info * |
1107 | xt_replace_table(struct xt_table *table, | |
1108 | unsigned int num_counters, | |
1109 | struct xt_table_info *newinfo, | |
1110 | int *error) | |
1111 | { | |
942e4a2b | 1112 | struct xt_table_info *private; |
f3c5c1bf | 1113 | int ret; |
2e4e6a17 | 1114 | |
d97a9e47 JE |
1115 | ret = xt_jumpstack_alloc(newinfo); |
1116 | if (ret < 0) { | |
1117 | *error = ret; | |
1118 | return NULL; | |
1119 | } | |
1120 | ||
2e4e6a17 | 1121 | /* Do the substitution. */ |
942e4a2b | 1122 | local_bh_disable(); |
2e4e6a17 | 1123 | private = table->private; |
942e4a2b | 1124 | |
2e4e6a17 HW |
1125 | /* Check inside lock: is the old number correct? */ |
1126 | if (num_counters != private->number) { | |
be91fd5e | 1127 | pr_debug("num_counters != table->private->number (%u/%u)\n", |
2e4e6a17 | 1128 | num_counters, private->number); |
942e4a2b | 1129 | local_bh_enable(); |
2e4e6a17 HW |
1130 | *error = -EAGAIN; |
1131 | return NULL; | |
1132 | } | |
2e4e6a17 | 1133 | |
942e4a2b | 1134 | newinfo->initial_entries = private->initial_entries; |
b416c144 WD |
1135 | /* |
1136 | * Ensure contents of newinfo are visible before assigning to | |
1137 | * private. | |
1138 | */ | |
1139 | smp_wmb(); | |
1140 | table->private = newinfo; | |
942e4a2b SH |
1141 | |
1142 | /* | |
1143 | * Even though table entries have now been swapped, other CPU's | |
1144 | * may still be using the old entries. This is okay, because | |
1145 | * resynchronization happens because of the locking done | |
1146 | * during the get_counters() routine. | |
1147 | */ | |
1148 | local_bh_enable(); | |
1149 | ||
fbabf31e TG |
1150 | #ifdef CONFIG_AUDIT |
1151 | if (audit_enabled) { | |
1152 | struct audit_buffer *ab; | |
1153 | ||
1154 | ab = audit_log_start(current->audit_context, GFP_KERNEL, | |
1155 | AUDIT_NETFILTER_CFG); | |
1156 | if (ab) { | |
1157 | audit_log_format(ab, "table=%s family=%u entries=%u", | |
1158 | table->name, table->af, | |
1159 | private->number); | |
1160 | audit_log_end(ab); | |
1161 | } | |
1162 | } | |
1163 | #endif | |
1164 | ||
942e4a2b | 1165 | return private; |
2e4e6a17 HW |
1166 | } |
1167 | EXPORT_SYMBOL_GPL(xt_replace_table); | |
1168 | ||
35aad0ff JE |
1169 | struct xt_table *xt_register_table(struct net *net, |
1170 | const struct xt_table *input_table, | |
a98da11d AD |
1171 | struct xt_table_info *bootstrap, |
1172 | struct xt_table_info *newinfo) | |
2e4e6a17 HW |
1173 | { |
1174 | int ret; | |
1175 | struct xt_table_info *private; | |
35aad0ff | 1176 | struct xt_table *t, *table; |
2e4e6a17 | 1177 | |
44d34e72 | 1178 | /* Don't add one object to multiple lists. */ |
35aad0ff | 1179 | table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); |
44d34e72 AD |
1180 | if (!table) { |
1181 | ret = -ENOMEM; | |
1182 | goto out; | |
1183 | } | |
1184 | ||
7926dbfa | 1185 | mutex_lock(&xt[table->af].mutex); |
2e4e6a17 | 1186 | /* Don't autoload: we'd eat our tail... */ |
8d870052 | 1187 | list_for_each_entry(t, &net->xt.tables[table->af], list) { |
df0933dc PM |
1188 | if (strcmp(t->name, table->name) == 0) { |
1189 | ret = -EEXIST; | |
1190 | goto unlock; | |
1191 | } | |
2e4e6a17 HW |
1192 | } |
1193 | ||
1194 | /* Simplifies replace_table code. */ | |
1195 | table->private = bootstrap; | |
78454473 | 1196 | |
2e4e6a17 HW |
1197 | if (!xt_replace_table(table, 0, newinfo, &ret)) |
1198 | goto unlock; | |
1199 | ||
1200 | private = table->private; | |
be91fd5e | 1201 | pr_debug("table->private->number = %u\n", private->number); |
2e4e6a17 HW |
1202 | |
1203 | /* save number of initial entries */ | |
1204 | private->initial_entries = private->number; | |
1205 | ||
8d870052 | 1206 | list_add(&table->list, &net->xt.tables[table->af]); |
a98da11d AD |
1207 | mutex_unlock(&xt[table->af].mutex); |
1208 | return table; | |
2e4e6a17 | 1209 | |
7926dbfa | 1210 | unlock: |
9e19bb6d | 1211 | mutex_unlock(&xt[table->af].mutex); |
44d34e72 | 1212 | kfree(table); |
a98da11d AD |
1213 | out: |
1214 | return ERR_PTR(ret); | |
2e4e6a17 HW |
1215 | } |
1216 | EXPORT_SYMBOL_GPL(xt_register_table); | |
1217 | ||
1218 | void *xt_unregister_table(struct xt_table *table) | |
1219 | { | |
1220 | struct xt_table_info *private; | |
1221 | ||
9e19bb6d | 1222 | mutex_lock(&xt[table->af].mutex); |
2e4e6a17 | 1223 | private = table->private; |
df0933dc | 1224 | list_del(&table->list); |
9e19bb6d | 1225 | mutex_unlock(&xt[table->af].mutex); |
44d34e72 | 1226 | kfree(table); |
2e4e6a17 HW |
1227 | |
1228 | return private; | |
1229 | } | |
1230 | EXPORT_SYMBOL_GPL(xt_unregister_table); | |
1231 | ||
1232 | #ifdef CONFIG_PROC_FS | |
715cf35a AD |
1233 | struct xt_names_priv { |
1234 | struct seq_net_private p; | |
76108cea | 1235 | u_int8_t af; |
715cf35a | 1236 | }; |
025d93d1 | 1237 | static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos) |
2e4e6a17 | 1238 | { |
715cf35a | 1239 | struct xt_names_priv *priv = seq->private; |
1218854a | 1240 | struct net *net = seq_file_net(seq); |
76108cea | 1241 | u_int8_t af = priv->af; |
2e4e6a17 | 1242 | |
025d93d1 | 1243 | mutex_lock(&xt[af].mutex); |
715cf35a | 1244 | return seq_list_start(&net->xt.tables[af], *pos); |
025d93d1 | 1245 | } |
2e4e6a17 | 1246 | |
025d93d1 AD |
1247 | static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
1248 | { | |
715cf35a | 1249 | struct xt_names_priv *priv = seq->private; |
1218854a | 1250 | struct net *net = seq_file_net(seq); |
76108cea | 1251 | u_int8_t af = priv->af; |
2e4e6a17 | 1252 | |
715cf35a | 1253 | return seq_list_next(v, &net->xt.tables[af], pos); |
2e4e6a17 HW |
1254 | } |
1255 | ||
025d93d1 | 1256 | static void xt_table_seq_stop(struct seq_file *seq, void *v) |
2e4e6a17 | 1257 | { |
715cf35a | 1258 | struct xt_names_priv *priv = seq->private; |
76108cea | 1259 | u_int8_t af = priv->af; |
2e4e6a17 | 1260 | |
025d93d1 AD |
1261 | mutex_unlock(&xt[af].mutex); |
1262 | } | |
2e4e6a17 | 1263 | |
025d93d1 AD |
1264 | static int xt_table_seq_show(struct seq_file *seq, void *v) |
1265 | { | |
1266 | struct xt_table *table = list_entry(v, struct xt_table, list); | |
2e4e6a17 | 1267 | |
861fb107 | 1268 | if (*table->name) |
e71456ae | 1269 | seq_printf(seq, "%s\n", table->name); |
861fb107 | 1270 | return 0; |
025d93d1 | 1271 | } |
601e68e1 | 1272 | |
025d93d1 AD |
1273 | static const struct seq_operations xt_table_seq_ops = { |
1274 | .start = xt_table_seq_start, | |
1275 | .next = xt_table_seq_next, | |
1276 | .stop = xt_table_seq_stop, | |
1277 | .show = xt_table_seq_show, | |
1278 | }; | |
1279 | ||
1280 | static int xt_table_open(struct inode *inode, struct file *file) | |
1281 | { | |
1282 | int ret; | |
715cf35a | 1283 | struct xt_names_priv *priv; |
025d93d1 | 1284 | |
715cf35a AD |
1285 | ret = seq_open_net(inode, file, &xt_table_seq_ops, |
1286 | sizeof(struct xt_names_priv)); | |
025d93d1 | 1287 | if (!ret) { |
715cf35a | 1288 | priv = ((struct seq_file *)file->private_data)->private; |
d9dda78b | 1289 | priv->af = (unsigned long)PDE_DATA(inode); |
025d93d1 AD |
1290 | } |
1291 | return ret; | |
2e4e6a17 HW |
1292 | } |
1293 | ||
025d93d1 AD |
1294 | static const struct file_operations xt_table_ops = { |
1295 | .owner = THIS_MODULE, | |
1296 | .open = xt_table_open, | |
1297 | .read = seq_read, | |
1298 | .llseek = seq_lseek, | |
0e93bb94 | 1299 | .release = seq_release_net, |
025d93d1 AD |
1300 | }; |
1301 | ||
eb132205 JE |
1302 | /* |
1303 | * Traverse state for ip{,6}_{tables,matches} for helping crossing | |
1304 | * the multi-AF mutexes. | |
1305 | */ | |
1306 | struct nf_mttg_trav { | |
1307 | struct list_head *head, *curr; | |
1308 | uint8_t class, nfproto; | |
1309 | }; | |
1310 | ||
1311 | enum { | |
1312 | MTTG_TRAV_INIT, | |
1313 | MTTG_TRAV_NFP_UNSPEC, | |
1314 | MTTG_TRAV_NFP_SPEC, | |
1315 | MTTG_TRAV_DONE, | |
1316 | }; | |
1317 | ||
1318 | static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, | |
1319 | bool is_target) | |
2e4e6a17 | 1320 | { |
eb132205 JE |
1321 | static const uint8_t next_class[] = { |
1322 | [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC, | |
1323 | [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE, | |
1324 | }; | |
1325 | struct nf_mttg_trav *trav = seq->private; | |
1326 | ||
1327 | switch (trav->class) { | |
1328 | case MTTG_TRAV_INIT: | |
1329 | trav->class = MTTG_TRAV_NFP_UNSPEC; | |
1330 | mutex_lock(&xt[NFPROTO_UNSPEC].mutex); | |
1331 | trav->head = trav->curr = is_target ? | |
1332 | &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match; | |
1333 | break; | |
1334 | case MTTG_TRAV_NFP_UNSPEC: | |
1335 | trav->curr = trav->curr->next; | |
1336 | if (trav->curr != trav->head) | |
1337 | break; | |
1338 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); | |
1339 | mutex_lock(&xt[trav->nfproto].mutex); | |
1340 | trav->head = trav->curr = is_target ? | |
1341 | &xt[trav->nfproto].target : &xt[trav->nfproto].match; | |
1342 | trav->class = next_class[trav->class]; | |
1343 | break; | |
1344 | case MTTG_TRAV_NFP_SPEC: | |
1345 | trav->curr = trav->curr->next; | |
1346 | if (trav->curr != trav->head) | |
1347 | break; | |
1348 | /* fallthru, _stop will unlock */ | |
1349 | default: | |
1350 | return NULL; | |
1351 | } | |
2e4e6a17 | 1352 | |
eb132205 JE |
1353 | if (ppos != NULL) |
1354 | ++*ppos; | |
1355 | return trav; | |
025d93d1 | 1356 | } |
601e68e1 | 1357 | |
eb132205 JE |
1358 | static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos, |
1359 | bool is_target) | |
025d93d1 | 1360 | { |
eb132205 JE |
1361 | struct nf_mttg_trav *trav = seq->private; |
1362 | unsigned int j; | |
2e4e6a17 | 1363 | |
eb132205 JE |
1364 | trav->class = MTTG_TRAV_INIT; |
1365 | for (j = 0; j < *pos; ++j) | |
1366 | if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL) | |
1367 | return NULL; | |
1368 | return trav; | |
2e4e6a17 HW |
1369 | } |
1370 | ||
eb132205 | 1371 | static void xt_mttg_seq_stop(struct seq_file *seq, void *v) |
2e4e6a17 | 1372 | { |
eb132205 JE |
1373 | struct nf_mttg_trav *trav = seq->private; |
1374 | ||
1375 | switch (trav->class) { | |
1376 | case MTTG_TRAV_NFP_UNSPEC: | |
1377 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); | |
1378 | break; | |
1379 | case MTTG_TRAV_NFP_SPEC: | |
1380 | mutex_unlock(&xt[trav->nfproto].mutex); | |
1381 | break; | |
1382 | } | |
1383 | } | |
2e4e6a17 | 1384 | |
eb132205 JE |
1385 | static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) |
1386 | { | |
1387 | return xt_mttg_seq_start(seq, pos, false); | |
2e4e6a17 HW |
1388 | } |
1389 | ||
eb132205 | 1390 | static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos) |
2e4e6a17 | 1391 | { |
eb132205 JE |
1392 | return xt_mttg_seq_next(seq, v, ppos, false); |
1393 | } | |
2e4e6a17 | 1394 | |
eb132205 JE |
1395 | static int xt_match_seq_show(struct seq_file *seq, void *v) |
1396 | { | |
1397 | const struct nf_mttg_trav *trav = seq->private; | |
1398 | const struct xt_match *match; | |
1399 | ||
1400 | switch (trav->class) { | |
1401 | case MTTG_TRAV_NFP_UNSPEC: | |
1402 | case MTTG_TRAV_NFP_SPEC: | |
1403 | if (trav->curr == trav->head) | |
1404 | return 0; | |
1405 | match = list_entry(trav->curr, struct xt_match, list); | |
861fb107 JP |
1406 | if (*match->name) |
1407 | seq_printf(seq, "%s\n", match->name); | |
eb132205 JE |
1408 | } |
1409 | return 0; | |
2e4e6a17 HW |
1410 | } |
1411 | ||
025d93d1 AD |
1412 | static const struct seq_operations xt_match_seq_ops = { |
1413 | .start = xt_match_seq_start, | |
1414 | .next = xt_match_seq_next, | |
eb132205 | 1415 | .stop = xt_mttg_seq_stop, |
025d93d1 | 1416 | .show = xt_match_seq_show, |
2e4e6a17 HW |
1417 | }; |
1418 | ||
025d93d1 | 1419 | static int xt_match_open(struct inode *inode, struct file *file) |
2e4e6a17 | 1420 | { |
eb132205 | 1421 | struct nf_mttg_trav *trav; |
772476df RJ |
1422 | trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav)); |
1423 | if (!trav) | |
eb132205 | 1424 | return -ENOMEM; |
2e4e6a17 | 1425 | |
d9dda78b | 1426 | trav->nfproto = (unsigned long)PDE_DATA(inode); |
eb132205 | 1427 | return 0; |
025d93d1 AD |
1428 | } |
1429 | ||
1430 | static const struct file_operations xt_match_ops = { | |
1431 | .owner = THIS_MODULE, | |
1432 | .open = xt_match_open, | |
1433 | .read = seq_read, | |
1434 | .llseek = seq_lseek, | |
eb132205 | 1435 | .release = seq_release_private, |
025d93d1 | 1436 | }; |
2e4e6a17 | 1437 | |
025d93d1 AD |
1438 | static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) |
1439 | { | |
eb132205 | 1440 | return xt_mttg_seq_start(seq, pos, true); |
025d93d1 AD |
1441 | } |
1442 | ||
eb132205 | 1443 | static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos) |
025d93d1 | 1444 | { |
eb132205 | 1445 | return xt_mttg_seq_next(seq, v, ppos, true); |
025d93d1 AD |
1446 | } |
1447 | ||
1448 | static int xt_target_seq_show(struct seq_file *seq, void *v) | |
1449 | { | |
eb132205 JE |
1450 | const struct nf_mttg_trav *trav = seq->private; |
1451 | const struct xt_target *target; | |
1452 | ||
1453 | switch (trav->class) { | |
1454 | case MTTG_TRAV_NFP_UNSPEC: | |
1455 | case MTTG_TRAV_NFP_SPEC: | |
1456 | if (trav->curr == trav->head) | |
1457 | return 0; | |
1458 | target = list_entry(trav->curr, struct xt_target, list); | |
861fb107 JP |
1459 | if (*target->name) |
1460 | seq_printf(seq, "%s\n", target->name); | |
eb132205 JE |
1461 | } |
1462 | return 0; | |
025d93d1 AD |
1463 | } |
1464 | ||
1465 | static const struct seq_operations xt_target_seq_ops = { | |
1466 | .start = xt_target_seq_start, | |
1467 | .next = xt_target_seq_next, | |
eb132205 | 1468 | .stop = xt_mttg_seq_stop, |
025d93d1 AD |
1469 | .show = xt_target_seq_show, |
1470 | }; | |
1471 | ||
1472 | static int xt_target_open(struct inode *inode, struct file *file) | |
1473 | { | |
eb132205 | 1474 | struct nf_mttg_trav *trav; |
772476df RJ |
1475 | trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav)); |
1476 | if (!trav) | |
eb132205 | 1477 | return -ENOMEM; |
025d93d1 | 1478 | |
d9dda78b | 1479 | trav->nfproto = (unsigned long)PDE_DATA(inode); |
eb132205 | 1480 | return 0; |
2e4e6a17 HW |
1481 | } |
1482 | ||
025d93d1 | 1483 | static const struct file_operations xt_target_ops = { |
2e4e6a17 | 1484 | .owner = THIS_MODULE, |
025d93d1 | 1485 | .open = xt_target_open, |
2e4e6a17 HW |
1486 | .read = seq_read, |
1487 | .llseek = seq_lseek, | |
eb132205 | 1488 | .release = seq_release_private, |
2e4e6a17 HW |
1489 | }; |
1490 | ||
1491 | #define FORMAT_TABLES "_tables_names" | |
1492 | #define FORMAT_MATCHES "_tables_matches" | |
1493 | #define FORMAT_TARGETS "_tables_targets" | |
1494 | ||
1495 | #endif /* CONFIG_PROC_FS */ | |
1496 | ||
2b95efe7 | 1497 | /** |
b9e69e12 | 1498 | * xt_hook_ops_alloc - set up hooks for a new table |
2b95efe7 JE |
1499 | * @table: table with metadata needed to set up hooks |
1500 | * @fn: Hook function | |
1501 | * | |
b9e69e12 FW |
1502 | * This function will create the nf_hook_ops that the x_table needs |
1503 | * to hand to xt_hook_link_net(). | |
2b95efe7 | 1504 | */ |
b9e69e12 FW |
1505 | struct nf_hook_ops * |
1506 | xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn) | |
2b95efe7 JE |
1507 | { |
1508 | unsigned int hook_mask = table->valid_hooks; | |
1509 | uint8_t i, num_hooks = hweight32(hook_mask); | |
1510 | uint8_t hooknum; | |
1511 | struct nf_hook_ops *ops; | |
2b95efe7 | 1512 | |
a6d0bae1 XL |
1513 | if (!num_hooks) |
1514 | return ERR_PTR(-EINVAL); | |
1515 | ||
2b95efe7 JE |
1516 | ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL); |
1517 | if (ops == NULL) | |
1518 | return ERR_PTR(-ENOMEM); | |
1519 | ||
1520 | for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0; | |
1521 | hook_mask >>= 1, ++hooknum) { | |
1522 | if (!(hook_mask & 1)) | |
1523 | continue; | |
1524 | ops[i].hook = fn; | |
2b95efe7 JE |
1525 | ops[i].pf = table->af; |
1526 | ops[i].hooknum = hooknum; | |
1527 | ops[i].priority = table->priority; | |
1528 | ++i; | |
1529 | } | |
1530 | ||
2b95efe7 JE |
1531 | return ops; |
1532 | } | |
b9e69e12 | 1533 | EXPORT_SYMBOL_GPL(xt_hook_ops_alloc); |
2b95efe7 | 1534 | |
76108cea | 1535 | int xt_proto_init(struct net *net, u_int8_t af) |
2e4e6a17 HW |
1536 | { |
1537 | #ifdef CONFIG_PROC_FS | |
1538 | char buf[XT_FUNCTION_MAXNAMELEN]; | |
1539 | struct proc_dir_entry *proc; | |
f13f2aee PW |
1540 | kuid_t root_uid; |
1541 | kgid_t root_gid; | |
2e4e6a17 HW |
1542 | #endif |
1543 | ||
7e9c6eeb | 1544 | if (af >= ARRAY_SIZE(xt_prefix)) |
2e4e6a17 HW |
1545 | return -EINVAL; |
1546 | ||
1547 | ||
1548 | #ifdef CONFIG_PROC_FS | |
f13f2aee PW |
1549 | root_uid = make_kuid(net->user_ns, 0); |
1550 | root_gid = make_kgid(net->user_ns, 0); | |
1551 | ||
ce18afe5 | 1552 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1553 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
8b169240 DL |
1554 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops, |
1555 | (void *)(unsigned long)af); | |
2e4e6a17 HW |
1556 | if (!proc) |
1557 | goto out; | |
f13f2aee PW |
1558 | if (uid_valid(root_uid) && gid_valid(root_gid)) |
1559 | proc_set_user(proc, root_uid, root_gid); | |
2e4e6a17 | 1560 | |
ce18afe5 | 1561 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1562 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
8b169240 DL |
1563 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops, |
1564 | (void *)(unsigned long)af); | |
2e4e6a17 HW |
1565 | if (!proc) |
1566 | goto out_remove_tables; | |
f13f2aee PW |
1567 | if (uid_valid(root_uid) && gid_valid(root_gid)) |
1568 | proc_set_user(proc, root_uid, root_gid); | |
2e4e6a17 | 1569 | |
ce18afe5 | 1570 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1571 | strlcat(buf, FORMAT_TARGETS, sizeof(buf)); |
8b169240 DL |
1572 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops, |
1573 | (void *)(unsigned long)af); | |
2e4e6a17 HW |
1574 | if (!proc) |
1575 | goto out_remove_matches; | |
f13f2aee PW |
1576 | if (uid_valid(root_uid) && gid_valid(root_gid)) |
1577 | proc_set_user(proc, root_uid, root_gid); | |
2e4e6a17 HW |
1578 | #endif |
1579 | ||
1580 | return 0; | |
1581 | ||
1582 | #ifdef CONFIG_PROC_FS | |
1583 | out_remove_matches: | |
ce18afe5 | 1584 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1585 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
ece31ffd | 1586 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 HW |
1587 | |
1588 | out_remove_tables: | |
ce18afe5 | 1589 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1590 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
ece31ffd | 1591 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 HW |
1592 | out: |
1593 | return -1; | |
1594 | #endif | |
1595 | } | |
1596 | EXPORT_SYMBOL_GPL(xt_proto_init); | |
1597 | ||
76108cea | 1598 | void xt_proto_fini(struct net *net, u_int8_t af) |
2e4e6a17 HW |
1599 | { |
1600 | #ifdef CONFIG_PROC_FS | |
1601 | char buf[XT_FUNCTION_MAXNAMELEN]; | |
1602 | ||
ce18afe5 | 1603 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1604 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
ece31ffd | 1605 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 | 1606 | |
ce18afe5 | 1607 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1608 | strlcat(buf, FORMAT_TARGETS, sizeof(buf)); |
ece31ffd | 1609 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 | 1610 | |
ce18afe5 | 1611 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1612 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
ece31ffd | 1613 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 HW |
1614 | #endif /*CONFIG_PROC_FS*/ |
1615 | } | |
1616 | EXPORT_SYMBOL_GPL(xt_proto_fini); | |
1617 | ||
8d870052 AD |
1618 | static int __net_init xt_net_init(struct net *net) |
1619 | { | |
1620 | int i; | |
1621 | ||
7e9c6eeb | 1622 | for (i = 0; i < NFPROTO_NUMPROTO; i++) |
8d870052 AD |
1623 | INIT_LIST_HEAD(&net->xt.tables[i]); |
1624 | return 0; | |
1625 | } | |
1626 | ||
1627 | static struct pernet_operations xt_net_ops = { | |
1628 | .init = xt_net_init, | |
1629 | }; | |
2e4e6a17 HW |
1630 | |
1631 | static int __init xt_init(void) | |
1632 | { | |
942e4a2b SH |
1633 | unsigned int i; |
1634 | int rv; | |
1635 | ||
1636 | for_each_possible_cpu(i) { | |
7f5c6d4f | 1637 | seqcount_init(&per_cpu(xt_recseq, i)); |
942e4a2b | 1638 | } |
2e4e6a17 | 1639 | |
7e9c6eeb | 1640 | xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); |
2e4e6a17 HW |
1641 | if (!xt) |
1642 | return -ENOMEM; | |
1643 | ||
7e9c6eeb | 1644 | for (i = 0; i < NFPROTO_NUMPROTO; i++) { |
9e19bb6d | 1645 | mutex_init(&xt[i].mutex); |
2722971c DM |
1646 | #ifdef CONFIG_COMPAT |
1647 | mutex_init(&xt[i].compat_mutex); | |
255d0dc3 | 1648 | xt[i].compat_tab = NULL; |
2722971c | 1649 | #endif |
2e4e6a17 HW |
1650 | INIT_LIST_HEAD(&xt[i].target); |
1651 | INIT_LIST_HEAD(&xt[i].match); | |
2e4e6a17 | 1652 | } |
8d870052 AD |
1653 | rv = register_pernet_subsys(&xt_net_ops); |
1654 | if (rv < 0) | |
1655 | kfree(xt); | |
1656 | return rv; | |
2e4e6a17 HW |
1657 | } |
1658 | ||
1659 | static void __exit xt_fini(void) | |
1660 | { | |
8d870052 | 1661 | unregister_pernet_subsys(&xt_net_ops); |
2e4e6a17 HW |
1662 | kfree(xt); |
1663 | } | |
1664 | ||
1665 | module_init(xt_init); | |
1666 | module_exit(xt_fini); | |
1667 |