mm: memcg: remove needless !mm fixup to init_mm when charging
[deliverable/linux.git] / net / core / flow.c
CommitLineData
1da177e4
LT
1/* flow.c: Generic flow cache.
2 *
3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/list.h>
10#include <linux/jhash.h>
11#include <linux/interrupt.h>
12#include <linux/mm.h>
13#include <linux/random.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/smp.h>
17#include <linux/completion.h>
18#include <linux/percpu.h>
19#include <linux/bitops.h>
20#include <linux/notifier.h>
21#include <linux/cpu.h>
22#include <linux/cpumask.h>
4a3e2f71 23#include <linux/mutex.h>
1da177e4 24#include <net/flow.h>
60063497 25#include <linux/atomic.h>
df71837d 26#include <linux/security.h>
1da177e4
LT
27
28struct flow_cache_entry {
8e479560
TT
29 union {
30 struct hlist_node hlist;
31 struct list_head gc_list;
32 } u;
0542b69e 33 struct net *net;
fe1a5f03
TT
34 u16 family;
35 u8 dir;
36 u32 genid;
37 struct flowi key;
38 struct flow_cache_object *object;
1da177e4
LT
39};
40
d7997fe1 41struct flow_cache_percpu {
8e479560 42 struct hlist_head *hash_table;
d7997fe1
TT
43 int hash_count;
44 u32 hash_rnd;
45 int hash_rnd_recalc;
46 struct tasklet_struct flush_tasklet;
5f58a5c8 47};
1da177e4
LT
48
49struct flow_flush_info {
fe1a5f03 50 struct flow_cache *cache;
d7997fe1
TT
51 atomic_t cpuleft;
52 struct completion completion;
1da177e4 53};
1da177e4 54
d7997fe1
TT
55struct flow_cache {
56 u32 hash_shift;
83b6b1f5 57 struct flow_cache_percpu __percpu *percpu;
d7997fe1
TT
58 struct notifier_block hotcpu_notifier;
59 int low_watermark;
60 int high_watermark;
61 struct timer_list rnd_timer;
62};
63
64atomic_t flow_cache_genid = ATOMIC_INIT(0);
9e34a5b5 65EXPORT_SYMBOL(flow_cache_genid);
d7997fe1 66static struct flow_cache flow_cache_global;
83b6b1f5 67static struct kmem_cache *flow_cachep __read_mostly;
d7997fe1 68
8e479560
TT
69static DEFINE_SPINLOCK(flow_cache_gc_lock);
70static LIST_HEAD(flow_cache_gc_list);
71
d7997fe1
TT
72#define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
73#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
1da177e4
LT
74
75static void flow_cache_new_hashrnd(unsigned long arg)
76{
d7997fe1 77 struct flow_cache *fc = (void *) arg;
1da177e4
LT
78 int i;
79
6f912042 80 for_each_possible_cpu(i)
d7997fe1 81 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
1da177e4 82
d7997fe1
TT
83 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
84 add_timer(&fc->rnd_timer);
1da177e4
LT
85}
86
fe1a5f03
TT
87static int flow_entry_valid(struct flow_cache_entry *fle)
88{
89 if (atomic_read(&flow_cache_genid) != fle->genid)
90 return 0;
91 if (fle->object && !fle->object->ops->check(fle->object))
92 return 0;
93 return 1;
94}
95
8e479560 96static void flow_entry_kill(struct flow_cache_entry *fle)
134b0fc5
JM
97{
98 if (fle->object)
fe1a5f03 99 fle->object->ops->delete(fle->object);
134b0fc5 100 kmem_cache_free(flow_cachep, fle);
8e479560
TT
101}
102
103static void flow_cache_gc_task(struct work_struct *work)
104{
105 struct list_head gc_list;
106 struct flow_cache_entry *fce, *n;
107
108 INIT_LIST_HEAD(&gc_list);
109 spin_lock_bh(&flow_cache_gc_lock);
110 list_splice_tail_init(&flow_cache_gc_list, &gc_list);
111 spin_unlock_bh(&flow_cache_gc_lock);
112
113 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
114 flow_entry_kill(fce);
115}
116static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task);
117
118static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
119 int deleted, struct list_head *gc_list)
120{
121 if (deleted) {
122 fcp->hash_count -= deleted;
123 spin_lock_bh(&flow_cache_gc_lock);
124 list_splice_tail(gc_list, &flow_cache_gc_list);
125 spin_unlock_bh(&flow_cache_gc_lock);
126 schedule_work(&flow_cache_gc_work);
127 }
134b0fc5
JM
128}
129
d7997fe1
TT
130static void __flow_cache_shrink(struct flow_cache *fc,
131 struct flow_cache_percpu *fcp,
132 int shrink_to)
1da177e4 133{
8e479560
TT
134 struct flow_cache_entry *fle;
135 struct hlist_node *entry, *tmp;
136 LIST_HEAD(gc_list);
137 int i, deleted = 0;
1da177e4 138
d7997fe1 139 for (i = 0; i < flow_cache_hash_size(fc); i++) {
fe1a5f03 140 int saved = 0;
1da177e4 141
8e479560
TT
142 hlist_for_each_entry_safe(fle, entry, tmp,
143 &fcp->hash_table[i], u.hlist) {
fe1a5f03
TT
144 if (saved < shrink_to &&
145 flow_entry_valid(fle)) {
146 saved++;
fe1a5f03 147 } else {
8e479560
TT
148 deleted++;
149 hlist_del(&fle->u.hlist);
150 list_add_tail(&fle->u.gc_list, &gc_list);
fe1a5f03 151 }
1da177e4
LT
152 }
153 }
8e479560
TT
154
155 flow_cache_queue_garbage(fcp, deleted, &gc_list);
1da177e4
LT
156}
157
d7997fe1
TT
158static void flow_cache_shrink(struct flow_cache *fc,
159 struct flow_cache_percpu *fcp)
1da177e4 160{
d7997fe1 161 int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
1da177e4 162
d7997fe1 163 __flow_cache_shrink(fc, fcp, shrink_to);
1da177e4
LT
164}
165
d7997fe1
TT
166static void flow_new_hash_rnd(struct flow_cache *fc,
167 struct flow_cache_percpu *fcp)
1da177e4 168{
d7997fe1
TT
169 get_random_bytes(&fcp->hash_rnd, sizeof(u32));
170 fcp->hash_rnd_recalc = 0;
171 __flow_cache_shrink(fc, fcp, 0);
1da177e4
LT
172}
173
d7997fe1
TT
174static u32 flow_hash_code(struct flow_cache *fc,
175 struct flow_cache_percpu *fcp,
aa1c366e 176 const struct flowi *key,
177 size_t keysize)
1da177e4 178{
dee9f4bc 179 const u32 *k = (const u32 *) key;
aa1c366e 180 const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32);
1da177e4 181
aa1c366e 182 return jhash2(k, length, fcp->hash_rnd)
a02cec21 183 & (flow_cache_hash_size(fc) - 1);
1da177e4
LT
184}
185
1da177e4 186/* I hear what you're saying, use memcmp. But memcmp cannot make
aa1c366e 187 * important assumptions that we can here, such as alignment.
1da177e4 188 */
aa1c366e 189static int flow_key_compare(const struct flowi *key1, const struct flowi *key2,
190 size_t keysize)
1da177e4 191{
dee9f4bc 192 const flow_compare_t *k1, *k1_lim, *k2;
1da177e4 193
dee9f4bc 194 k1 = (const flow_compare_t *) key1;
aa1c366e 195 k1_lim = k1 + keysize;
1da177e4 196
dee9f4bc 197 k2 = (const flow_compare_t *) key2;
1da177e4
LT
198
199 do {
200 if (*k1++ != *k2++)
201 return 1;
202 } while (k1 < k1_lim);
203
204 return 0;
205}
206
fe1a5f03 207struct flow_cache_object *
dee9f4bc 208flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
fe1a5f03 209 flow_resolve_t resolver, void *ctx)
1da177e4 210{
d7997fe1
TT
211 struct flow_cache *fc = &flow_cache_global;
212 struct flow_cache_percpu *fcp;
8e479560
TT
213 struct flow_cache_entry *fle, *tfle;
214 struct hlist_node *entry;
fe1a5f03 215 struct flow_cache_object *flo;
aa1c366e 216 size_t keysize;
1da177e4 217 unsigned int hash;
1da177e4
LT
218
219 local_bh_disable();
7a9b2d59 220 fcp = this_cpu_ptr(fc->percpu);
1da177e4
LT
221
222 fle = NULL;
fe1a5f03 223 flo = NULL;
aa1c366e 224
225 keysize = flow_key_size(family);
226 if (!keysize)
227 goto nocache;
228
1da177e4
LT
229 /* Packet really early in init? Making flow_cache_init a
230 * pre-smp initcall would solve this. --RR */
d7997fe1 231 if (!fcp->hash_table)
1da177e4
LT
232 goto nocache;
233
d7997fe1
TT
234 if (fcp->hash_rnd_recalc)
235 flow_new_hash_rnd(fc, fcp);
1da177e4 236
aa1c366e 237 hash = flow_hash_code(fc, fcp, key, keysize);
8e479560 238 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
0542b69e 239 if (tfle->net == net &&
240 tfle->family == family &&
8e479560 241 tfle->dir == dir &&
aa1c366e 242 flow_key_compare(key, &tfle->key, keysize) == 0) {
8e479560 243 fle = tfle;
1da177e4 244 break;
8e479560 245 }
1da177e4
LT
246 }
247
fe1a5f03 248 if (unlikely(!fle)) {
d7997fe1
TT
249 if (fcp->hash_count > fc->high_watermark)
250 flow_cache_shrink(fc, fcp);
1da177e4 251
54e6ecb2 252 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
1da177e4 253 if (fle) {
0542b69e 254 fle->net = net;
1da177e4
LT
255 fle->family = family;
256 fle->dir = dir;
aa1c366e 257 memcpy(&fle->key, key, keysize * sizeof(flow_compare_t));
1da177e4 258 fle->object = NULL;
8e479560 259 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
d7997fe1 260 fcp->hash_count++;
1da177e4 261 }
fe1a5f03
TT
262 } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
263 flo = fle->object;
264 if (!flo)
265 goto ret_object;
266 flo = flo->ops->get(flo);
267 if (flo)
268 goto ret_object;
269 } else if (fle->object) {
270 flo = fle->object;
271 flo->ops->delete(flo);
272 fle->object = NULL;
1da177e4
LT
273 }
274
275nocache:
fe1a5f03
TT
276 flo = NULL;
277 if (fle) {
278 flo = fle->object;
279 fle->object = NULL;
280 }
281 flo = resolver(net, key, family, dir, flo, ctx);
282 if (fle) {
283 fle->genid = atomic_read(&flow_cache_genid);
284 if (!IS_ERR(flo))
285 fle->object = flo;
286 else
287 fle->genid--;
288 } else {
289 if (flo && !IS_ERR(flo))
290 flo->ops->delete(flo);
1da177e4 291 }
fe1a5f03
TT
292ret_object:
293 local_bh_enable();
294 return flo;
1da177e4 295}
9e34a5b5 296EXPORT_SYMBOL(flow_cache_lookup);
1da177e4
LT
297
298static void flow_cache_flush_tasklet(unsigned long data)
299{
300 struct flow_flush_info *info = (void *)data;
d7997fe1
TT
301 struct flow_cache *fc = info->cache;
302 struct flow_cache_percpu *fcp;
8e479560
TT
303 struct flow_cache_entry *fle;
304 struct hlist_node *entry, *tmp;
305 LIST_HEAD(gc_list);
306 int i, deleted = 0;
1da177e4 307
7a9b2d59 308 fcp = this_cpu_ptr(fc->percpu);
d7997fe1 309 for (i = 0; i < flow_cache_hash_size(fc); i++) {
8e479560
TT
310 hlist_for_each_entry_safe(fle, entry, tmp,
311 &fcp->hash_table[i], u.hlist) {
fe1a5f03 312 if (flow_entry_valid(fle))
1da177e4
LT
313 continue;
314
8e479560
TT
315 deleted++;
316 hlist_del(&fle->u.hlist);
317 list_add_tail(&fle->u.gc_list, &gc_list);
1da177e4
LT
318 }
319 }
320
8e479560
TT
321 flow_cache_queue_garbage(fcp, deleted, &gc_list);
322
1da177e4
LT
323 if (atomic_dec_and_test(&info->cpuleft))
324 complete(&info->completion);
325}
326
1da177e4
LT
327static void flow_cache_flush_per_cpu(void *data)
328{
329 struct flow_flush_info *info = data;
330 int cpu;
331 struct tasklet_struct *tasklet;
332
333 cpu = smp_processor_id();
d7997fe1 334 tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet;
1da177e4
LT
335 tasklet->data = (unsigned long)info;
336 tasklet_schedule(tasklet);
337}
338
339void flow_cache_flush(void)
340{
341 struct flow_flush_info info;
4a3e2f71 342 static DEFINE_MUTEX(flow_flush_sem);
1da177e4
LT
343
344 /* Don't want cpus going down or up during this. */
86ef5c9a 345 get_online_cpus();
4a3e2f71 346 mutex_lock(&flow_flush_sem);
d7997fe1 347 info.cache = &flow_cache_global;
1da177e4
LT
348 atomic_set(&info.cpuleft, num_online_cpus());
349 init_completion(&info.completion);
350
351 local_bh_disable();
8691e5a8 352 smp_call_function(flow_cache_flush_per_cpu, &info, 0);
1da177e4
LT
353 flow_cache_flush_tasklet((unsigned long)&info);
354 local_bh_enable();
355
356 wait_for_completion(&info.completion);
4a3e2f71 357 mutex_unlock(&flow_flush_sem);
86ef5c9a 358 put_online_cpus();
1da177e4
LT
359}
360
c0ed1c14
SK
361static void flow_cache_flush_task(struct work_struct *work)
362{
363 flow_cache_flush();
364}
365
366static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
367
368void flow_cache_flush_deferred(void)
369{
370 schedule_work(&flow_cache_flush_work);
371}
372
83b6b1f5 373static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
1da177e4 374{
83b6b1f5
ED
375 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
376 size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
d7997fe1 377
83b6b1f5
ED
378 if (!fcp->hash_table) {
379 fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
380 if (!fcp->hash_table) {
381 pr_err("NET: failed to allocate flow cache sz %zu\n", sz);
382 return -ENOMEM;
383 }
384 fcp->hash_rnd_recalc = 1;
385 fcp->hash_count = 0;
386 tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
387 }
388 return 0;
1da177e4
LT
389}
390
83b6b1f5 391static int __cpuinit flow_cache_cpu(struct notifier_block *nfb,
1da177e4
LT
392 unsigned long action,
393 void *hcpu)
394{
d7997fe1 395 struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
83b6b1f5 396 int res, cpu = (unsigned long) hcpu;
d7997fe1
TT
397 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
398
83b6b1f5
ED
399 switch (action) {
400 case CPU_UP_PREPARE:
401 case CPU_UP_PREPARE_FROZEN:
402 res = flow_cache_cpu_prepare(fc, cpu);
403 if (res)
404 return notifier_from_errno(res);
405 break;
406 case CPU_DEAD:
407 case CPU_DEAD_FROZEN:
d7997fe1 408 __flow_cache_shrink(fc, fcp, 0);
83b6b1f5
ED
409 break;
410 }
1da177e4
LT
411 return NOTIFY_OK;
412}
1da177e4 413
83b6b1f5 414static int __init flow_cache_init(struct flow_cache *fc)
1da177e4
LT
415{
416 int i;
417
d7997fe1
TT
418 fc->hash_shift = 10;
419 fc->low_watermark = 2 * flow_cache_hash_size(fc);
420 fc->high_watermark = 4 * flow_cache_hash_size(fc);
421
d7997fe1 422 fc->percpu = alloc_percpu(struct flow_cache_percpu);
83b6b1f5
ED
423 if (!fc->percpu)
424 return -ENOMEM;
1da177e4 425
83b6b1f5
ED
426 for_each_online_cpu(i) {
427 if (flow_cache_cpu_prepare(fc, i))
6ccc3abd 428 goto err;
83b6b1f5 429 }
d7997fe1
TT
430 fc->hotcpu_notifier = (struct notifier_block){
431 .notifier_call = flow_cache_cpu,
432 };
433 register_hotcpu_notifier(&fc->hotcpu_notifier);
1da177e4 434
83b6b1f5
ED
435 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
436 (unsigned long) fc);
437 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
438 add_timer(&fc->rnd_timer);
439
1da177e4 440 return 0;
6ccc3abd 441
442err:
443 for_each_possible_cpu(i) {
444 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
445 kfree(fcp->hash_table);
446 fcp->hash_table = NULL;
447 }
448
449 free_percpu(fc->percpu);
450 fc->percpu = NULL;
451
452 return -ENOMEM;
1da177e4
LT
453}
454
d7997fe1
TT
455static int __init flow_cache_init_global(void)
456{
457 flow_cachep = kmem_cache_create("flow_cache",
458 sizeof(struct flow_cache_entry),
459 0, SLAB_PANIC, NULL);
460
461 return flow_cache_init(&flow_cache_global);
462}
463
464module_init(flow_cache_init_global);
This page took 0.630962 seconds and 5 git commands to generate.