Merge tag 'drm-intel-fixes-2013-11-20' of git://people.freedesktop.org/~danvet/drm...
[deliverable/linux.git] / net / netfilter / nf_conntrack_expect.c
CommitLineData
77ab9cff
MJ
1/* Expectation handling for nf_conntrack. */
2
3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
f229f6ce 6 * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
77ab9cff
MJ
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/types.h>
14#include <linux/netfilter.h>
15#include <linux/skbuff.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
18#include <linux/stddef.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/percpu.h>
22#include <linux/kernel.h>
a71c0855 23#include <linux/jhash.h>
d9b93842 24#include <linux/moduleparam.h>
bc3b2d7f 25#include <linux/export.h>
457c4cbc 26#include <net/net_namespace.h>
77ab9cff
MJ
27
28#include <net/netfilter/nf_conntrack.h>
29#include <net/netfilter/nf_conntrack_core.h>
30#include <net/netfilter/nf_conntrack_expect.h>
31#include <net/netfilter/nf_conntrack_helper.h>
32#include <net/netfilter/nf_conntrack_tuple.h>
5d0aa2cc 33#include <net/netfilter/nf_conntrack_zones.h>
77ab9cff 34
a71c0855
PM
35unsigned int nf_ct_expect_hsize __read_mostly;
36EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
37
f264a7df 38unsigned int nf_ct_expect_max __read_mostly;
a71c0855 39
e9c1b084 40static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
77ab9cff
MJ
41
42/* nf_conntrack_expect helper functions */
ebbf41df 43void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
ec464e5d 44 u32 portid, int report)
77ab9cff
MJ
45{
46 struct nf_conn_help *master_help = nfct_help(exp->master);
9b03f38d 47 struct net *net = nf_ct_exp_net(exp);
77ab9cff 48
3d058d7b 49 NF_CT_ASSERT(master_help);
77ab9cff
MJ
50 NF_CT_ASSERT(!timer_pending(&exp->timeout));
51
7d0742da 52 hlist_del_rcu(&exp->hnode);
9b03f38d 53 net->ct.expect_count--;
a71c0855 54
b560580a 55 hlist_del(&exp->lnode);
3d058d7b 56 master_help->expecting[exp->class]--;
bc01befd 57
ec464e5d 58 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
6823645d 59 nf_ct_expect_put(exp);
b560580a 60
0d55af87 61 NF_CT_STAT_INC(net, expect_delete);
77ab9cff 62}
ebbf41df 63EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
77ab9cff 64
6823645d 65static void nf_ct_expectation_timed_out(unsigned long ul_expect)
77ab9cff
MJ
66{
67 struct nf_conntrack_expect *exp = (void *)ul_expect;
68
f8ba1aff 69 spin_lock_bh(&nf_conntrack_lock);
77ab9cff 70 nf_ct_unlink_expect(exp);
f8ba1aff 71 spin_unlock_bh(&nf_conntrack_lock);
6823645d 72 nf_ct_expect_put(exp);
77ab9cff
MJ
73}
74
a71c0855
PM
75static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
76{
34498825
PM
77 unsigned int hash;
78
f682cefa
CG
79 if (unlikely(!nf_conntrack_hash_rnd)) {
80 init_nf_conntrack_hash_rnd();
a71c0855
PM
81 }
82
34498825 83 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
a71c0855 84 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
f682cefa 85 (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
34498825 86 return ((u64)hash * nf_ct_expect_hsize) >> 32;
a71c0855
PM
87}
88
77ab9cff 89struct nf_conntrack_expect *
5d0aa2cc
PM
90__nf_ct_expect_find(struct net *net, u16 zone,
91 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
92{
93 struct nf_conntrack_expect *i;
a71c0855
PM
94 unsigned int h;
95
9b03f38d 96 if (!net->ct.expect_count)
a71c0855 97 return NULL;
77ab9cff 98
a71c0855 99 h = nf_ct_expect_dst_hash(tuple);
b67bfe0d 100 hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
5d0aa2cc
PM
101 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
102 nf_ct_zone(i->master) == zone)
77ab9cff
MJ
103 return i;
104 }
105 return NULL;
106}
6823645d 107EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
77ab9cff
MJ
108
109/* Just find a expectation corresponding to a tuple. */
110struct nf_conntrack_expect *
5d0aa2cc
PM
111nf_ct_expect_find_get(struct net *net, u16 zone,
112 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
113{
114 struct nf_conntrack_expect *i;
115
7d0742da 116 rcu_read_lock();
5d0aa2cc 117 i = __nf_ct_expect_find(net, zone, tuple);
7d0742da
PM
118 if (i && !atomic_inc_not_zero(&i->use))
119 i = NULL;
120 rcu_read_unlock();
77ab9cff
MJ
121
122 return i;
123}
6823645d 124EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
77ab9cff
MJ
125
126/* If an expectation for this connection is found, it gets delete from
127 * global list then returned. */
128struct nf_conntrack_expect *
5d0aa2cc
PM
129nf_ct_find_expectation(struct net *net, u16 zone,
130 const struct nf_conntrack_tuple *tuple)
77ab9cff 131{
359b9ab6 132 struct nf_conntrack_expect *i, *exp = NULL;
359b9ab6
PM
133 unsigned int h;
134
9b03f38d 135 if (!net->ct.expect_count)
359b9ab6 136 return NULL;
ece00641 137
359b9ab6 138 h = nf_ct_expect_dst_hash(tuple);
b67bfe0d 139 hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
359b9ab6 140 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
5d0aa2cc
PM
141 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
142 nf_ct_zone(i->master) == zone) {
359b9ab6
PM
143 exp = i;
144 break;
145 }
146 }
ece00641
YK
147 if (!exp)
148 return NULL;
77ab9cff 149
77ab9cff
MJ
150 /* If master is not in hash table yet (ie. packet hasn't left
151 this machine yet), how can other end know about expected?
152 Hence these are not the droids you are looking for (if
153 master ct never got confirmed, we'd hold a reference to it
154 and weird things would happen to future packets). */
ece00641
YK
155 if (!nf_ct_is_confirmed(exp->master))
156 return NULL;
157
158 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
159 atomic_inc(&exp->use);
160 return exp;
161 } else if (del_timer(&exp->timeout)) {
162 nf_ct_unlink_expect(exp);
163 return exp;
77ab9cff 164 }
ece00641 165
77ab9cff
MJ
166 return NULL;
167}
168
169/* delete all expectations for this conntrack */
170void nf_ct_remove_expectations(struct nf_conn *ct)
171{
77ab9cff 172 struct nf_conn_help *help = nfct_help(ct);
b560580a 173 struct nf_conntrack_expect *exp;
b67bfe0d 174 struct hlist_node *next;
77ab9cff
MJ
175
176 /* Optimization: most connection never expect any others. */
6002f266 177 if (!help)
77ab9cff
MJ
178 return;
179
b67bfe0d 180 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
b560580a
PM
181 if (del_timer(&exp->timeout)) {
182 nf_ct_unlink_expect(exp);
183 nf_ct_expect_put(exp);
601e68e1 184 }
77ab9cff
MJ
185 }
186}
13b18339 187EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
77ab9cff
MJ
188
189/* Would two expected things clash? */
190static inline int expect_clash(const struct nf_conntrack_expect *a,
191 const struct nf_conntrack_expect *b)
192{
193 /* Part covered by intersection of masks must be unequal,
194 otherwise they clash */
d4156e8c 195 struct nf_conntrack_tuple_mask intersect_mask;
77ab9cff
MJ
196 int count;
197
77ab9cff 198 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
77ab9cff
MJ
199
200 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
201 intersect_mask.src.u3.all[count] =
202 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
203 }
204
77ab9cff
MJ
205 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
206}
207
208static inline int expect_matches(const struct nf_conntrack_expect *a,
209 const struct nf_conntrack_expect *b)
210{
f64f9e71
JP
211 return a->master == b->master && a->class == b->class &&
212 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
5d0aa2cc
PM
213 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
214 nf_ct_zone(a->master) == nf_ct_zone(b->master);
77ab9cff
MJ
215}
216
217/* Generally a bad idea to call this: could have matched already. */
6823645d 218void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
77ab9cff 219{
f8ba1aff 220 spin_lock_bh(&nf_conntrack_lock);
4e1d4e6c
PM
221 if (del_timer(&exp->timeout)) {
222 nf_ct_unlink_expect(exp);
223 nf_ct_expect_put(exp);
77ab9cff 224 }
f8ba1aff 225 spin_unlock_bh(&nf_conntrack_lock);
77ab9cff 226}
6823645d 227EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
77ab9cff
MJ
228
229/* We don't increase the master conntrack refcount for non-fulfilled
230 * conntracks. During the conntrack destruction, the expectations are
231 * always killed before the conntrack itself */
6823645d 232struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
77ab9cff
MJ
233{
234 struct nf_conntrack_expect *new;
235
6823645d 236 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
77ab9cff
MJ
237 if (!new)
238 return NULL;
239
240 new->master = me;
241 atomic_set(&new->use, 1);
242 return new;
243}
6823645d 244EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
77ab9cff 245
6002f266 246void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
76108cea 247 u_int8_t family,
1d9d7522
PM
248 const union nf_inet_addr *saddr,
249 const union nf_inet_addr *daddr,
250 u_int8_t proto, const __be16 *src, const __be16 *dst)
d6a9b650
PM
251{
252 int len;
253
254 if (family == AF_INET)
255 len = 4;
256 else
257 len = 16;
258
259 exp->flags = 0;
6002f266 260 exp->class = class;
d6a9b650
PM
261 exp->expectfn = NULL;
262 exp->helper = NULL;
263 exp->tuple.src.l3num = family;
264 exp->tuple.dst.protonum = proto;
d6a9b650
PM
265
266 if (saddr) {
267 memcpy(&exp->tuple.src.u3, saddr, len);
268 if (sizeof(exp->tuple.src.u3) > len)
269 /* address needs to be cleared for nf_ct_tuple_equal */
270 memset((void *)&exp->tuple.src.u3 + len, 0x00,
271 sizeof(exp->tuple.src.u3) - len);
272 memset(&exp->mask.src.u3, 0xFF, len);
273 if (sizeof(exp->mask.src.u3) > len)
274 memset((void *)&exp->mask.src.u3 + len, 0x00,
275 sizeof(exp->mask.src.u3) - len);
276 } else {
277 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
278 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
279 }
280
d6a9b650 281 if (src) {
a34c4589
AV
282 exp->tuple.src.u.all = *src;
283 exp->mask.src.u.all = htons(0xFFFF);
d6a9b650
PM
284 } else {
285 exp->tuple.src.u.all = 0;
286 exp->mask.src.u.all = 0;
287 }
288
d4156e8c
PM
289 memcpy(&exp->tuple.dst.u3, daddr, len);
290 if (sizeof(exp->tuple.dst.u3) > len)
291 /* address needs to be cleared for nf_ct_tuple_equal */
292 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
293 sizeof(exp->tuple.dst.u3) - len);
294
a34c4589 295 exp->tuple.dst.u.all = *dst;
f09eca8d
PNA
296
297#ifdef CONFIG_NF_NAT_NEEDED
298 memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
299 memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
300#endif
d6a9b650 301}
6823645d 302EXPORT_SYMBOL_GPL(nf_ct_expect_init);
d6a9b650 303
7d0742da
PM
304static void nf_ct_expect_free_rcu(struct rcu_head *head)
305{
306 struct nf_conntrack_expect *exp;
307
308 exp = container_of(head, struct nf_conntrack_expect, rcu);
309 kmem_cache_free(nf_ct_expect_cachep, exp);
310}
311
6823645d 312void nf_ct_expect_put(struct nf_conntrack_expect *exp)
77ab9cff
MJ
313{
314 if (atomic_dec_and_test(&exp->use))
7d0742da 315 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
77ab9cff 316}
6823645d 317EXPORT_SYMBOL_GPL(nf_ct_expect_put);
77ab9cff 318
3d058d7b 319static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
77ab9cff
MJ
320{
321 struct nf_conn_help *master_help = nfct_help(exp->master);
3d058d7b 322 struct nf_conntrack_helper *helper;
9b03f38d 323 struct net *net = nf_ct_exp_net(exp);
a71c0855 324 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
77ab9cff 325
3bfd45f9
ED
326 /* two references : one for hash insert, one for the timer */
327 atomic_add(2, &exp->use);
b560580a 328
3d058d7b
PNA
329 hlist_add_head(&exp->lnode, &master_help->expectations);
330 master_help->expecting[exp->class]++;
a71c0855 331
9b03f38d
AD
332 hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
333 net->ct.expect_count++;
77ab9cff 334
6823645d
PM
335 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
336 (unsigned long)exp);
3d058d7b
PNA
337 helper = rcu_dereference_protected(master_help->helper,
338 lockdep_is_held(&nf_conntrack_lock));
339 if (helper) {
340 exp->timeout.expires = jiffies +
341 helper->expect_policy[exp->class].timeout * HZ;
bc01befd 342 }
77ab9cff
MJ
343 add_timer(&exp->timeout);
344
0d55af87 345 NF_CT_STAT_INC(net, expect_create);
3d058d7b 346 return 0;
77ab9cff
MJ
347}
348
349/* Race with expectations being used means we could have none to find; OK. */
6002f266
PM
350static void evict_oldest_expect(struct nf_conn *master,
351 struct nf_conntrack_expect *new)
77ab9cff 352{
b560580a 353 struct nf_conn_help *master_help = nfct_help(master);
6002f266 354 struct nf_conntrack_expect *exp, *last = NULL;
77ab9cff 355
b67bfe0d 356 hlist_for_each_entry(exp, &master_help->expectations, lnode) {
6002f266
PM
357 if (exp->class == new->class)
358 last = exp;
359 }
b560580a 360
6002f266
PM
361 if (last && del_timer(&last->timeout)) {
362 nf_ct_unlink_expect(last);
363 nf_ct_expect_put(last);
77ab9cff
MJ
364 }
365}
366
19abb7b0 367static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
77ab9cff 368{
6002f266 369 const struct nf_conntrack_expect_policy *p;
77ab9cff
MJ
370 struct nf_conntrack_expect *i;
371 struct nf_conn *master = expect->master;
372 struct nf_conn_help *master_help = nfct_help(master);
3d058d7b 373 struct nf_conntrack_helper *helper;
9b03f38d 374 struct net *net = nf_ct_exp_net(expect);
b67bfe0d 375 struct hlist_node *next;
a71c0855 376 unsigned int h;
83731671 377 int ret = 1;
77ab9cff 378
3d058d7b 379 if (!master_help) {
3c158f7f
PM
380 ret = -ESHUTDOWN;
381 goto out;
382 }
a71c0855 383 h = nf_ct_expect_dst_hash(&expect->tuple);
b67bfe0d 384 hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) {
77ab9cff 385 if (expect_matches(i, expect)) {
2614f864
PNA
386 if (del_timer(&i->timeout)) {
387 nf_ct_unlink_expect(i);
388 nf_ct_expect_put(i);
389 break;
77ab9cff
MJ
390 }
391 } else if (expect_clash(i, expect)) {
392 ret = -EBUSY;
393 goto out;
394 }
395 }
396 /* Will be over limit? */
3d058d7b
PNA
397 helper = rcu_dereference_protected(master_help->helper,
398 lockdep_is_held(&nf_conntrack_lock));
399 if (helper) {
400 p = &helper->expect_policy[expect->class];
bc01befd
PNA
401 if (p->max_expected &&
402 master_help->expecting[expect->class] >= p->max_expected) {
403 evict_oldest_expect(master, expect);
404 if (master_help->expecting[expect->class]
405 >= p->max_expected) {
406 ret = -EMFILE;
407 goto out;
408 }
6002f266
PM
409 }
410 }
77ab9cff 411
9b03f38d 412 if (net->ct.expect_count >= nf_ct_expect_max) {
e87cc472 413 net_warn_ratelimited("nf_conntrack: expectation table full\n");
f264a7df 414 ret = -EMFILE;
f264a7df 415 }
19abb7b0
PNA
416out:
417 return ret;
418}
419
83731671 420int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
ec464e5d 421 u32 portid, int report)
19abb7b0
PNA
422{
423 int ret;
424
425 spin_lock_bh(&nf_conntrack_lock);
426 ret = __nf_ct_expect_check(expect);
83731671 427 if (ret <= 0)
19abb7b0 428 goto out;
f264a7df 429
3d058d7b
PNA
430 ret = nf_ct_expect_insert(expect);
431 if (ret < 0)
432 goto out;
f8ba1aff 433 spin_unlock_bh(&nf_conntrack_lock);
ec464e5d 434 nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
77ab9cff 435 return ret;
19abb7b0
PNA
436out:
437 spin_unlock_bh(&nf_conntrack_lock);
19abb7b0
PNA
438 return ret;
439}
440EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
441
54b07dca 442#ifdef CONFIG_NF_CONNTRACK_PROCFS
5d08ad44 443struct ct_expect_iter_state {
dc5129f8 444 struct seq_net_private p;
5d08ad44
PM
445 unsigned int bucket;
446};
447
448static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
77ab9cff 449{
dc5129f8 450 struct net *net = seq_file_net(seq);
5d08ad44 451 struct ct_expect_iter_state *st = seq->private;
7d0742da 452 struct hlist_node *n;
77ab9cff 453
5d08ad44 454 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
0e60ebe0 455 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
7d0742da
PM
456 if (n)
457 return n;
5d08ad44
PM
458 }
459 return NULL;
460}
77ab9cff 461
5d08ad44
PM
462static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
463 struct hlist_node *head)
464{
dc5129f8 465 struct net *net = seq_file_net(seq);
5d08ad44 466 struct ct_expect_iter_state *st = seq->private;
77ab9cff 467
0e60ebe0 468 head = rcu_dereference(hlist_next_rcu(head));
5d08ad44
PM
469 while (head == NULL) {
470 if (++st->bucket >= nf_ct_expect_hsize)
77ab9cff 471 return NULL;
0e60ebe0 472 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
77ab9cff 473 }
5d08ad44 474 return head;
77ab9cff
MJ
475}
476
5d08ad44 477static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
77ab9cff 478{
5d08ad44 479 struct hlist_node *head = ct_expect_get_first(seq);
77ab9cff 480
5d08ad44
PM
481 if (head)
482 while (pos && (head = ct_expect_get_next(seq, head)))
483 pos--;
484 return pos ? NULL : head;
485}
77ab9cff 486
5d08ad44 487static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
7d0742da 488 __acquires(RCU)
5d08ad44 489{
7d0742da 490 rcu_read_lock();
5d08ad44
PM
491 return ct_expect_get_idx(seq, *pos);
492}
77ab9cff 493
5d08ad44
PM
494static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
495{
496 (*pos)++;
497 return ct_expect_get_next(seq, v);
77ab9cff
MJ
498}
499
5d08ad44 500static void exp_seq_stop(struct seq_file *seq, void *v)
7d0742da 501 __releases(RCU)
77ab9cff 502{
7d0742da 503 rcu_read_unlock();
77ab9cff
MJ
504}
505
506static int exp_seq_show(struct seq_file *s, void *v)
507{
5d08ad44 508 struct nf_conntrack_expect *expect;
b87921bd 509 struct nf_conntrack_helper *helper;
5d08ad44 510 struct hlist_node *n = v;
359b9ab6 511 char *delim = "";
5d08ad44
PM
512
513 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
77ab9cff
MJ
514
515 if (expect->timeout.function)
516 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
517 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
518 else
519 seq_printf(s, "- ");
520 seq_printf(s, "l3proto = %u proto=%u ",
521 expect->tuple.src.l3num,
522 expect->tuple.dst.protonum);
523 print_tuple(s, &expect->tuple,
524 __nf_ct_l3proto_find(expect->tuple.src.l3num),
605dcad6 525 __nf_ct_l4proto_find(expect->tuple.src.l3num,
77ab9cff 526 expect->tuple.dst.protonum));
4bb119ea 527
359b9ab6
PM
528 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
529 seq_printf(s, "PERMANENT");
530 delim = ",";
531 }
bc01befd 532 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
359b9ab6 533 seq_printf(s, "%sINACTIVE", delim);
bc01befd
PNA
534 delim = ",";
535 }
536 if (expect->flags & NF_CT_EXPECT_USERSPACE)
537 seq_printf(s, "%sUSERSPACE", delim);
4bb119ea 538
b87921bd
PM
539 helper = rcu_dereference(nfct_help(expect->master)->helper);
540 if (helper) {
541 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
542 if (helper->expect_policy[expect->class].name)
543 seq_printf(s, "/%s",
544 helper->expect_policy[expect->class].name);
545 }
546
77ab9cff
MJ
547 return seq_putc(s, '\n');
548}
549
56b3d975 550static const struct seq_operations exp_seq_ops = {
77ab9cff
MJ
551 .start = exp_seq_start,
552 .next = exp_seq_next,
553 .stop = exp_seq_stop,
554 .show = exp_seq_show
555};
556
557static int exp_open(struct inode *inode, struct file *file)
558{
dc5129f8 559 return seq_open_net(inode, file, &exp_seq_ops,
e2da5913 560 sizeof(struct ct_expect_iter_state));
77ab9cff
MJ
561}
562
5d08ad44 563static const struct file_operations exp_file_ops = {
77ab9cff
MJ
564 .owner = THIS_MODULE,
565 .open = exp_open,
566 .read = seq_read,
567 .llseek = seq_lseek,
dc5129f8 568 .release = seq_release_net,
77ab9cff 569};
54b07dca 570#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084 571
dc5129f8 572static int exp_proc_init(struct net *net)
e9c1b084 573{
54b07dca 574#ifdef CONFIG_NF_CONNTRACK_PROCFS
e9c1b084
PM
575 struct proc_dir_entry *proc;
576
d4beaa66
G
577 proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
578 &exp_file_ops);
e9c1b084
PM
579 if (!proc)
580 return -ENOMEM;
54b07dca 581#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084
PM
582 return 0;
583}
584
dc5129f8 585static void exp_proc_remove(struct net *net)
e9c1b084 586{
54b07dca 587#ifdef CONFIG_NF_CONNTRACK_PROCFS
ece31ffd 588 remove_proc_entry("nf_conntrack_expect", net->proc_net);
54b07dca 589#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084
PM
590}
591
13ccdfc2 592module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
a71c0855 593
83b4dbe1 594int nf_conntrack_expect_pernet_init(struct net *net)
e9c1b084 595{
a71c0855
PM
596 int err = -ENOMEM;
597
9b03f38d 598 net->ct.expect_count = 0;
d862a662 599 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
9b03f38d 600 if (net->ct.expect_hash == NULL)
a71c0855 601 goto err1;
e9c1b084 602
dc5129f8 603 err = exp_proc_init(net);
e9c1b084 604 if (err < 0)
83b4dbe1 605 goto err2;
e9c1b084
PM
606
607 return 0;
12293bf9 608err2:
d862a662 609 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
a71c0855 610err1:
e9c1b084
PM
611 return err;
612}
613
83b4dbe1 614void nf_conntrack_expect_pernet_fini(struct net *net)
e9c1b084 615{
dc5129f8 616 exp_proc_remove(net);
d862a662 617 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
e9c1b084 618}
83b4dbe1
G
619
620int nf_conntrack_expect_init(void)
621{
622 if (!nf_ct_expect_hsize) {
623 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
624 if (!nf_ct_expect_hsize)
625 nf_ct_expect_hsize = 1;
626 }
627 nf_ct_expect_max = nf_ct_expect_hsize * 4;
628 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
629 sizeof(struct nf_conntrack_expect),
630 0, 0, NULL);
631 if (!nf_ct_expect_cachep)
632 return -ENOMEM;
633 return 0;
634}
635
636void nf_conntrack_expect_fini(void)
637{
638 rcu_barrier(); /* Wait for call_rcu() before destroy */
639 kmem_cache_destroy(nf_ct_expect_cachep);
640}
This page took 0.667772 seconds and 5 git commands to generate.