netfilter: nf_conntrack: use is_vmalloc_addr()
[deliverable/linux.git] / net / netfilter / nf_conntrack_expect.c
1 /* Expectation handling for nf_conntrack. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/stddef.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/percpu.h>
21 #include <linux/kernel.h>
22 #include <linux/jhash.h>
23 #include <net/net_namespace.h>
24
25 #include <net/netfilter/nf_conntrack.h>
26 #include <net/netfilter/nf_conntrack_core.h>
27 #include <net/netfilter/nf_conntrack_expect.h>
28 #include <net/netfilter/nf_conntrack_helper.h>
29 #include <net/netfilter/nf_conntrack_tuple.h>
30 #include <net/netfilter/nf_conntrack_zones.h>
31
32 unsigned int nf_ct_expect_hsize __read_mostly;
33 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
34
35 unsigned int nf_ct_expect_max __read_mostly;
36
37 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
38
39 static HLIST_HEAD(nf_ct_userspace_expect_list);
40
41 /* nf_conntrack_expect helper functions */
42 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
43 u32 pid, int report)
44 {
45 struct nf_conn_help *master_help = nfct_help(exp->master);
46 struct net *net = nf_ct_exp_net(exp);
47
48 NF_CT_ASSERT(!timer_pending(&exp->timeout));
49
50 hlist_del_rcu(&exp->hnode);
51 net->ct.expect_count--;
52
53 hlist_del(&exp->lnode);
54 if (!(exp->flags & NF_CT_EXPECT_USERSPACE))
55 master_help->expecting[exp->class]--;
56
57 nf_ct_expect_event_report(IPEXP_DESTROY, exp, pid, report);
58 nf_ct_expect_put(exp);
59
60 NF_CT_STAT_INC(net, expect_delete);
61 }
62 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
63
64 static void nf_ct_expectation_timed_out(unsigned long ul_expect)
65 {
66 struct nf_conntrack_expect *exp = (void *)ul_expect;
67
68 spin_lock_bh(&nf_conntrack_lock);
69 nf_ct_unlink_expect(exp);
70 spin_unlock_bh(&nf_conntrack_lock);
71 nf_ct_expect_put(exp);
72 }
73
74 static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
75 {
76 unsigned int hash;
77
78 if (unlikely(!nf_conntrack_hash_rnd)) {
79 init_nf_conntrack_hash_rnd();
80 }
81
82 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
83 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
84 (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
85 return ((u64)hash * nf_ct_expect_hsize) >> 32;
86 }
87
88 struct nf_conntrack_expect *
89 __nf_ct_expect_find(struct net *net, u16 zone,
90 const struct nf_conntrack_tuple *tuple)
91 {
92 struct nf_conntrack_expect *i;
93 struct hlist_node *n;
94 unsigned int h;
95
96 if (!net->ct.expect_count)
97 return NULL;
98
99 h = nf_ct_expect_dst_hash(tuple);
100 hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
101 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
102 nf_ct_zone(i->master) == zone)
103 return i;
104 }
105 return NULL;
106 }
107 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
108
109 /* Just find a expectation corresponding to a tuple. */
110 struct nf_conntrack_expect *
111 nf_ct_expect_find_get(struct net *net, u16 zone,
112 const struct nf_conntrack_tuple *tuple)
113 {
114 struct nf_conntrack_expect *i;
115
116 rcu_read_lock();
117 i = __nf_ct_expect_find(net, zone, tuple);
118 if (i && !atomic_inc_not_zero(&i->use))
119 i = NULL;
120 rcu_read_unlock();
121
122 return i;
123 }
124 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
125
126 /* If an expectation for this connection is found, it gets delete from
127 * global list then returned. */
128 struct nf_conntrack_expect *
129 nf_ct_find_expectation(struct net *net, u16 zone,
130 const struct nf_conntrack_tuple *tuple)
131 {
132 struct nf_conntrack_expect *i, *exp = NULL;
133 struct hlist_node *n;
134 unsigned int h;
135
136 if (!net->ct.expect_count)
137 return NULL;
138
139 h = nf_ct_expect_dst_hash(tuple);
140 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
141 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
142 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
143 nf_ct_zone(i->master) == zone) {
144 exp = i;
145 break;
146 }
147 }
148 if (!exp)
149 return NULL;
150
151 /* If master is not in hash table yet (ie. packet hasn't left
152 this machine yet), how can other end know about expected?
153 Hence these are not the droids you are looking for (if
154 master ct never got confirmed, we'd hold a reference to it
155 and weird things would happen to future packets). */
156 if (!nf_ct_is_confirmed(exp->master))
157 return NULL;
158
159 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
160 atomic_inc(&exp->use);
161 return exp;
162 } else if (del_timer(&exp->timeout)) {
163 nf_ct_unlink_expect(exp);
164 return exp;
165 }
166
167 return NULL;
168 }
169
170 /* delete all expectations for this conntrack */
171 void nf_ct_remove_expectations(struct nf_conn *ct)
172 {
173 struct nf_conn_help *help = nfct_help(ct);
174 struct nf_conntrack_expect *exp;
175 struct hlist_node *n, *next;
176
177 /* Optimization: most connection never expect any others. */
178 if (!help)
179 return;
180
181 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
182 if (del_timer(&exp->timeout)) {
183 nf_ct_unlink_expect(exp);
184 nf_ct_expect_put(exp);
185 }
186 }
187 }
188 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
189
190 /* Would two expected things clash? */
191 static inline int expect_clash(const struct nf_conntrack_expect *a,
192 const struct nf_conntrack_expect *b)
193 {
194 /* Part covered by intersection of masks must be unequal,
195 otherwise they clash */
196 struct nf_conntrack_tuple_mask intersect_mask;
197 int count;
198
199 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
200
201 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
202 intersect_mask.src.u3.all[count] =
203 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
204 }
205
206 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
207 }
208
209 static inline int expect_matches(const struct nf_conntrack_expect *a,
210 const struct nf_conntrack_expect *b)
211 {
212 return a->master == b->master && a->class == b->class &&
213 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
214 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
215 nf_ct_zone(a->master) == nf_ct_zone(b->master);
216 }
217
218 /* Generally a bad idea to call this: could have matched already. */
219 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
220 {
221 spin_lock_bh(&nf_conntrack_lock);
222 if (del_timer(&exp->timeout)) {
223 nf_ct_unlink_expect(exp);
224 nf_ct_expect_put(exp);
225 }
226 spin_unlock_bh(&nf_conntrack_lock);
227 }
228 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
229
230 /* We don't increase the master conntrack refcount for non-fulfilled
231 * conntracks. During the conntrack destruction, the expectations are
232 * always killed before the conntrack itself */
233 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
234 {
235 struct nf_conntrack_expect *new;
236
237 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
238 if (!new)
239 return NULL;
240
241 new->master = me;
242 atomic_set(&new->use, 1);
243 return new;
244 }
245 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
246
247 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
248 u_int8_t family,
249 const union nf_inet_addr *saddr,
250 const union nf_inet_addr *daddr,
251 u_int8_t proto, const __be16 *src, const __be16 *dst)
252 {
253 int len;
254
255 if (family == AF_INET)
256 len = 4;
257 else
258 len = 16;
259
260 exp->flags = 0;
261 exp->class = class;
262 exp->expectfn = NULL;
263 exp->helper = NULL;
264 exp->tuple.src.l3num = family;
265 exp->tuple.dst.protonum = proto;
266
267 if (saddr) {
268 memcpy(&exp->tuple.src.u3, saddr, len);
269 if (sizeof(exp->tuple.src.u3) > len)
270 /* address needs to be cleared for nf_ct_tuple_equal */
271 memset((void *)&exp->tuple.src.u3 + len, 0x00,
272 sizeof(exp->tuple.src.u3) - len);
273 memset(&exp->mask.src.u3, 0xFF, len);
274 if (sizeof(exp->mask.src.u3) > len)
275 memset((void *)&exp->mask.src.u3 + len, 0x00,
276 sizeof(exp->mask.src.u3) - len);
277 } else {
278 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
279 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
280 }
281
282 if (src) {
283 exp->tuple.src.u.all = *src;
284 exp->mask.src.u.all = htons(0xFFFF);
285 } else {
286 exp->tuple.src.u.all = 0;
287 exp->mask.src.u.all = 0;
288 }
289
290 memcpy(&exp->tuple.dst.u3, daddr, len);
291 if (sizeof(exp->tuple.dst.u3) > len)
292 /* address needs to be cleared for nf_ct_tuple_equal */
293 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
294 sizeof(exp->tuple.dst.u3) - len);
295
296 exp->tuple.dst.u.all = *dst;
297 }
298 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
299
300 static void nf_ct_expect_free_rcu(struct rcu_head *head)
301 {
302 struct nf_conntrack_expect *exp;
303
304 exp = container_of(head, struct nf_conntrack_expect, rcu);
305 kmem_cache_free(nf_ct_expect_cachep, exp);
306 }
307
308 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
309 {
310 if (atomic_dec_and_test(&exp->use))
311 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
312 }
313 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
314
315 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
316 {
317 struct nf_conn_help *master_help = nfct_help(exp->master);
318 struct net *net = nf_ct_exp_net(exp);
319 const struct nf_conntrack_expect_policy *p;
320 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
321
322 /* two references : one for hash insert, one for the timer */
323 atomic_add(2, &exp->use);
324
325 if (master_help) {
326 hlist_add_head(&exp->lnode, &master_help->expectations);
327 master_help->expecting[exp->class]++;
328 } else if (exp->flags & NF_CT_EXPECT_USERSPACE)
329 hlist_add_head(&exp->lnode, &nf_ct_userspace_expect_list);
330
331 hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
332 net->ct.expect_count++;
333
334 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
335 (unsigned long)exp);
336 if (master_help) {
337 p = &rcu_dereference_protected(
338 master_help->helper,
339 lockdep_is_held(&nf_conntrack_lock)
340 )->expect_policy[exp->class];
341 exp->timeout.expires = jiffies + p->timeout * HZ;
342 }
343 add_timer(&exp->timeout);
344
345 NF_CT_STAT_INC(net, expect_create);
346 }
347
348 /* Race with expectations being used means we could have none to find; OK. */
349 static void evict_oldest_expect(struct nf_conn *master,
350 struct nf_conntrack_expect *new)
351 {
352 struct nf_conn_help *master_help = nfct_help(master);
353 struct nf_conntrack_expect *exp, *last = NULL;
354 struct hlist_node *n;
355
356 hlist_for_each_entry(exp, n, &master_help->expectations, lnode) {
357 if (exp->class == new->class)
358 last = exp;
359 }
360
361 if (last && del_timer(&last->timeout)) {
362 nf_ct_unlink_expect(last);
363 nf_ct_expect_put(last);
364 }
365 }
366
367 static inline int refresh_timer(struct nf_conntrack_expect *i)
368 {
369 struct nf_conn_help *master_help = nfct_help(i->master);
370 const struct nf_conntrack_expect_policy *p;
371
372 if (!del_timer(&i->timeout))
373 return 0;
374
375 p = &rcu_dereference_protected(
376 master_help->helper,
377 lockdep_is_held(&nf_conntrack_lock)
378 )->expect_policy[i->class];
379 i->timeout.expires = jiffies + p->timeout * HZ;
380 add_timer(&i->timeout);
381 return 1;
382 }
383
384 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
385 {
386 const struct nf_conntrack_expect_policy *p;
387 struct nf_conntrack_expect *i;
388 struct nf_conn *master = expect->master;
389 struct nf_conn_help *master_help = nfct_help(master);
390 struct net *net = nf_ct_exp_net(expect);
391 struct hlist_node *n;
392 unsigned int h;
393 int ret = 1;
394
395 /* Don't allow expectations created from kernel-space with no helper */
396 if (!(expect->flags & NF_CT_EXPECT_USERSPACE) &&
397 (!master_help || (master_help && !master_help->helper))) {
398 ret = -ESHUTDOWN;
399 goto out;
400 }
401 h = nf_ct_expect_dst_hash(&expect->tuple);
402 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
403 if (expect_matches(i, expect)) {
404 /* Refresh timer: if it's dying, ignore.. */
405 if (refresh_timer(i)) {
406 ret = 0;
407 goto out;
408 }
409 } else if (expect_clash(i, expect)) {
410 ret = -EBUSY;
411 goto out;
412 }
413 }
414 /* Will be over limit? */
415 if (master_help) {
416 p = &rcu_dereference_protected(
417 master_help->helper,
418 lockdep_is_held(&nf_conntrack_lock)
419 )->expect_policy[expect->class];
420 if (p->max_expected &&
421 master_help->expecting[expect->class] >= p->max_expected) {
422 evict_oldest_expect(master, expect);
423 if (master_help->expecting[expect->class]
424 >= p->max_expected) {
425 ret = -EMFILE;
426 goto out;
427 }
428 }
429 }
430
431 if (net->ct.expect_count >= nf_ct_expect_max) {
432 if (net_ratelimit())
433 printk(KERN_WARNING
434 "nf_conntrack: expectation table full\n");
435 ret = -EMFILE;
436 }
437 out:
438 return ret;
439 }
440
441 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
442 u32 pid, int report)
443 {
444 int ret;
445
446 spin_lock_bh(&nf_conntrack_lock);
447 ret = __nf_ct_expect_check(expect);
448 if (ret <= 0)
449 goto out;
450
451 ret = 0;
452 nf_ct_expect_insert(expect);
453 spin_unlock_bh(&nf_conntrack_lock);
454 nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report);
455 return ret;
456 out:
457 spin_unlock_bh(&nf_conntrack_lock);
458 return ret;
459 }
460 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
461
462 void nf_ct_remove_userspace_expectations(void)
463 {
464 struct nf_conntrack_expect *exp;
465 struct hlist_node *n, *next;
466
467 hlist_for_each_entry_safe(exp, n, next,
468 &nf_ct_userspace_expect_list, lnode) {
469 if (del_timer(&exp->timeout)) {
470 nf_ct_unlink_expect(exp);
471 nf_ct_expect_put(exp);
472 }
473 }
474 }
475 EXPORT_SYMBOL_GPL(nf_ct_remove_userspace_expectations);
476
477 #ifdef CONFIG_PROC_FS
478 struct ct_expect_iter_state {
479 struct seq_net_private p;
480 unsigned int bucket;
481 };
482
483 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
484 {
485 struct net *net = seq_file_net(seq);
486 struct ct_expect_iter_state *st = seq->private;
487 struct hlist_node *n;
488
489 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
490 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
491 if (n)
492 return n;
493 }
494 return NULL;
495 }
496
497 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
498 struct hlist_node *head)
499 {
500 struct net *net = seq_file_net(seq);
501 struct ct_expect_iter_state *st = seq->private;
502
503 head = rcu_dereference(hlist_next_rcu(head));
504 while (head == NULL) {
505 if (++st->bucket >= nf_ct_expect_hsize)
506 return NULL;
507 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
508 }
509 return head;
510 }
511
512 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
513 {
514 struct hlist_node *head = ct_expect_get_first(seq);
515
516 if (head)
517 while (pos && (head = ct_expect_get_next(seq, head)))
518 pos--;
519 return pos ? NULL : head;
520 }
521
522 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
523 __acquires(RCU)
524 {
525 rcu_read_lock();
526 return ct_expect_get_idx(seq, *pos);
527 }
528
529 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
530 {
531 (*pos)++;
532 return ct_expect_get_next(seq, v);
533 }
534
535 static void exp_seq_stop(struct seq_file *seq, void *v)
536 __releases(RCU)
537 {
538 rcu_read_unlock();
539 }
540
541 static int exp_seq_show(struct seq_file *s, void *v)
542 {
543 struct nf_conntrack_expect *expect;
544 struct nf_conntrack_helper *helper;
545 struct hlist_node *n = v;
546 char *delim = "";
547
548 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
549
550 if (expect->timeout.function)
551 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
552 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
553 else
554 seq_printf(s, "- ");
555 seq_printf(s, "l3proto = %u proto=%u ",
556 expect->tuple.src.l3num,
557 expect->tuple.dst.protonum);
558 print_tuple(s, &expect->tuple,
559 __nf_ct_l3proto_find(expect->tuple.src.l3num),
560 __nf_ct_l4proto_find(expect->tuple.src.l3num,
561 expect->tuple.dst.protonum));
562
563 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
564 seq_printf(s, "PERMANENT");
565 delim = ",";
566 }
567 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
568 seq_printf(s, "%sINACTIVE", delim);
569 delim = ",";
570 }
571 if (expect->flags & NF_CT_EXPECT_USERSPACE)
572 seq_printf(s, "%sUSERSPACE", delim);
573
574 helper = rcu_dereference(nfct_help(expect->master)->helper);
575 if (helper) {
576 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
577 if (helper->expect_policy[expect->class].name)
578 seq_printf(s, "/%s",
579 helper->expect_policy[expect->class].name);
580 }
581
582 return seq_putc(s, '\n');
583 }
584
585 static const struct seq_operations exp_seq_ops = {
586 .start = exp_seq_start,
587 .next = exp_seq_next,
588 .stop = exp_seq_stop,
589 .show = exp_seq_show
590 };
591
592 static int exp_open(struct inode *inode, struct file *file)
593 {
594 return seq_open_net(inode, file, &exp_seq_ops,
595 sizeof(struct ct_expect_iter_state));
596 }
597
598 static const struct file_operations exp_file_ops = {
599 .owner = THIS_MODULE,
600 .open = exp_open,
601 .read = seq_read,
602 .llseek = seq_lseek,
603 .release = seq_release_net,
604 };
605 #endif /* CONFIG_PROC_FS */
606
607 static int exp_proc_init(struct net *net)
608 {
609 #ifdef CONFIG_PROC_FS
610 struct proc_dir_entry *proc;
611
612 proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops);
613 if (!proc)
614 return -ENOMEM;
615 #endif /* CONFIG_PROC_FS */
616 return 0;
617 }
618
619 static void exp_proc_remove(struct net *net)
620 {
621 #ifdef CONFIG_PROC_FS
622 proc_net_remove(net, "nf_conntrack_expect");
623 #endif /* CONFIG_PROC_FS */
624 }
625
626 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
627
628 int nf_conntrack_expect_init(struct net *net)
629 {
630 int err = -ENOMEM;
631
632 if (net_eq(net, &init_net)) {
633 if (!nf_ct_expect_hsize) {
634 nf_ct_expect_hsize = net->ct.htable_size / 256;
635 if (!nf_ct_expect_hsize)
636 nf_ct_expect_hsize = 1;
637 }
638 nf_ct_expect_max = nf_ct_expect_hsize * 4;
639 }
640
641 net->ct.expect_count = 0;
642 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
643 if (net->ct.expect_hash == NULL)
644 goto err1;
645
646 if (net_eq(net, &init_net)) {
647 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
648 sizeof(struct nf_conntrack_expect),
649 0, 0, NULL);
650 if (!nf_ct_expect_cachep)
651 goto err2;
652 }
653
654 err = exp_proc_init(net);
655 if (err < 0)
656 goto err3;
657
658 return 0;
659
660 err3:
661 if (net_eq(net, &init_net))
662 kmem_cache_destroy(nf_ct_expect_cachep);
663 err2:
664 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
665 err1:
666 return err;
667 }
668
669 void nf_conntrack_expect_fini(struct net *net)
670 {
671 exp_proc_remove(net);
672 if (net_eq(net, &init_net)) {
673 rcu_barrier(); /* Wait for call_rcu() before destroy */
674 kmem_cache_destroy(nf_ct_expect_cachep);
675 }
676 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
677 }
This page took 0.045146 seconds and 5 git commands to generate.