Merge v4.4-rc1 into MTD development
[deliverable/linux.git] / net / openvswitch / flow_table.c
CommitLineData
e6445719 1/*
9b996e54 2 * Copyright (c) 2007-2014 Nicira, Inc.
e6445719
PS
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#include "flow.h"
20#include "datapath.h"
34ae932a 21#include "flow_netlink.h"
e6445719
PS
22#include <linux/uaccess.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/if_ether.h>
26#include <linux/if_vlan.h>
27#include <net/llc_pdu.h>
28#include <linux/kernel.h>
87545899 29#include <linux/jhash.h>
e6445719
PS
30#include <linux/jiffies.h>
31#include <linux/llc.h>
32#include <linux/module.h>
33#include <linux/in.h>
34#include <linux/rcupdate.h>
35#include <linux/if_arp.h>
36#include <linux/ip.h>
37#include <linux/ipv6.h>
38#include <linux/sctp.h>
39#include <linux/tcp.h>
40#include <linux/udp.h>
41#include <linux/icmp.h>
42#include <linux/icmpv6.h>
43#include <linux/rculist.h>
44#include <net/ip.h>
45#include <net/ipv6.h>
46#include <net/ndisc.h>
47
b637e498
PS
48#define TBL_MIN_BUCKETS 1024
49#define REHASH_INTERVAL (10 * 60 * HZ)
50
e6445719 51static struct kmem_cache *flow_cache;
63e7959c 52struct kmem_cache *flow_stats_cache __read_mostly;
e6445719
PS
53
54static u16 range_n_bytes(const struct sw_flow_key_range *range)
55{
56 return range->end - range->start;
57}
58
59void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
ae5f2fb1 60 bool full, const struct sw_flow_mask *mask)
e6445719 61{
ae5f2fb1
JG
62 int start = full ? 0 : mask->range.start;
63 int len = full ? sizeof *dst : range_n_bytes(&mask->range);
64 const long *m = (const long *)((const u8 *)&mask->key + start);
65 const long *s = (const long *)((const u8 *)src + start);
66 long *d = (long *)((u8 *)dst + start);
e6445719
PS
67 int i;
68
ae5f2fb1
JG
69 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
70 * if 'full' is false the memory outside of the 'mask->range' is left
71 * uninitialized. This can be used as an optimization when further
72 * operations on 'dst' only use contents within 'mask->range'.
e6445719 73 */
ae5f2fb1 74 for (i = 0; i < len; i += sizeof(long))
e6445719
PS
75 *d++ = *s++ & *m++;
76}
77
23dabf88 78struct sw_flow *ovs_flow_alloc(void)
e6445719
PS
79{
80 struct sw_flow *flow;
63e7959c
JR
81 struct flow_stats *stats;
82 int node;
e6445719
PS
83
84 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
85 if (!flow)
86 return ERR_PTR(-ENOMEM);
87
e6445719
PS
88 flow->sf_acts = NULL;
89 flow->mask = NULL;
ca539345
PS
90 flow->id.unmasked_key = NULL;
91 flow->id.ufid_len = 0;
63e7959c 92 flow->stats_last_writer = NUMA_NO_NODE;
e6445719 93
63e7959c
JR
94 /* Initialize the default stat node. */
95 stats = kmem_cache_alloc_node(flow_stats_cache,
598c12d0
KK
96 GFP_KERNEL | __GFP_ZERO,
97 node_online(0) ? 0 : NUMA_NO_NODE);
63e7959c 98 if (!stats)
23dabf88 99 goto err;
e298e505 100
63e7959c
JR
101 spin_lock_init(&stats->lock);
102
103 RCU_INIT_POINTER(flow->stats[0], stats);
104
105 for_each_node(node)
106 if (node != 0)
107 RCU_INIT_POINTER(flow->stats[node], NULL);
e298e505 108
e6445719 109 return flow;
e298e505 110err:
ece37c87 111 kmem_cache_free(flow_cache, flow);
e298e505 112 return ERR_PTR(-ENOMEM);
e6445719
PS
113}
114
12eb18f7 115int ovs_flow_tbl_count(const struct flow_table *table)
b637e498
PS
116{
117 return table->count;
118}
119
e6445719
PS
120static struct flex_array *alloc_buckets(unsigned int n_buckets)
121{
122 struct flex_array *buckets;
123 int i, err;
124
125 buckets = flex_array_alloc(sizeof(struct hlist_head),
126 n_buckets, GFP_KERNEL);
127 if (!buckets)
128 return NULL;
129
130 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
131 if (err) {
132 flex_array_free(buckets);
133 return NULL;
134 }
135
136 for (i = 0; i < n_buckets; i++)
137 INIT_HLIST_HEAD((struct hlist_head *)
138 flex_array_get(buckets, i));
139
140 return buckets;
141}
142
143static void flow_free(struct sw_flow *flow)
144{
63e7959c
JR
145 int node;
146
74ed7ab9
JS
147 if (ovs_identifier_is_key(&flow->id))
148 kfree(flow->id.unmasked_key);
34ae932a
TG
149 if (flow->sf_acts)
150 ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
63e7959c
JR
151 for_each_node(node)
152 if (flow->stats[node])
153 kmem_cache_free(flow_stats_cache,
154 (struct flow_stats __force *)flow->stats[node]);
e6445719
PS
155 kmem_cache_free(flow_cache, flow);
156}
157
158static void rcu_free_flow_callback(struct rcu_head *rcu)
159{
160 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
161
162 flow_free(flow);
163}
164
e80857cc 165void ovs_flow_free(struct sw_flow *flow, bool deferred)
618ed0c8 166{
e80857cc 167 if (!flow)
618ed0c8
PS
168 return;
169
e6445719
PS
170 if (deferred)
171 call_rcu(&flow->rcu, rcu_free_flow_callback);
172 else
173 flow_free(flow);
174}
175
176static void free_buckets(struct flex_array *buckets)
177{
178 flex_array_free(buckets);
179}
180
e80857cc 181
b637e498 182static void __table_instance_destroy(struct table_instance *ti)
e6445719 183{
b637e498
PS
184 free_buckets(ti->buckets);
185 kfree(ti);
e6445719
PS
186}
187
b637e498 188static struct table_instance *table_instance_alloc(int new_size)
e6445719 189{
b637e498 190 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
e6445719 191
b637e498 192 if (!ti)
e6445719
PS
193 return NULL;
194
b637e498 195 ti->buckets = alloc_buckets(new_size);
e6445719 196
b637e498
PS
197 if (!ti->buckets) {
198 kfree(ti);
e6445719
PS
199 return NULL;
200 }
b637e498
PS
201 ti->n_buckets = new_size;
202 ti->node_ver = 0;
203 ti->keep_flows = false;
204 get_random_bytes(&ti->hash_seed, sizeof(u32));
e6445719 205
b637e498 206 return ti;
e6445719
PS
207}
208
b637e498 209int ovs_flow_tbl_init(struct flow_table *table)
e6445719 210{
74ed7ab9 211 struct table_instance *ti, *ufid_ti;
e6445719 212
b637e498 213 ti = table_instance_alloc(TBL_MIN_BUCKETS);
e6445719 214
b637e498
PS
215 if (!ti)
216 return -ENOMEM;
e6445719 217
74ed7ab9
JS
218 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
219 if (!ufid_ti)
220 goto free_ti;
221
b637e498 222 rcu_assign_pointer(table->ti, ti);
74ed7ab9 223 rcu_assign_pointer(table->ufid_ti, ufid_ti);
b637e498
PS
224 INIT_LIST_HEAD(&table->mask_list);
225 table->last_rehash = jiffies;
226 table->count = 0;
74ed7ab9 227 table->ufid_count = 0;
b637e498 228 return 0;
74ed7ab9
JS
229
230free_ti:
231 __table_instance_destroy(ti);
232 return -ENOMEM;
e6445719
PS
233}
234
235static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
236{
b637e498 237 struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
e6445719 238
b637e498 239 __table_instance_destroy(ti);
e6445719
PS
240}
241
74ed7ab9
JS
242static void table_instance_destroy(struct table_instance *ti,
243 struct table_instance *ufid_ti,
244 bool deferred)
e6445719 245{
e80857cc
AZ
246 int i;
247
b637e498 248 if (!ti)
e6445719
PS
249 return;
250
74ed7ab9 251 BUG_ON(!ufid_ti);
e80857cc
AZ
252 if (ti->keep_flows)
253 goto skip_flows;
254
255 for (i = 0; i < ti->n_buckets; i++) {
256 struct sw_flow *flow;
257 struct hlist_head *head = flex_array_get(ti->buckets, i);
258 struct hlist_node *n;
259 int ver = ti->node_ver;
74ed7ab9 260 int ufid_ver = ufid_ti->node_ver;
e80857cc 261
74ed7ab9
JS
262 hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
263 hlist_del_rcu(&flow->flow_table.node[ver]);
264 if (ovs_identifier_is_ufid(&flow->id))
265 hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
e80857cc
AZ
266 ovs_flow_free(flow, deferred);
267 }
268 }
269
270skip_flows:
74ed7ab9 271 if (deferred) {
b637e498 272 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
74ed7ab9
JS
273 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
274 } else {
b637e498 275 __table_instance_destroy(ti);
74ed7ab9
JS
276 __table_instance_destroy(ufid_ti);
277 }
b637e498
PS
278}
279
9b996e54
PS
280/* No need for locking this function is called from RCU callback or
281 * error path.
282 */
283void ovs_flow_tbl_destroy(struct flow_table *table)
b637e498 284{
9b996e54 285 struct table_instance *ti = rcu_dereference_raw(table->ti);
74ed7ab9 286 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
b637e498 287
74ed7ab9 288 table_instance_destroy(ti, ufid_ti, false);
e6445719
PS
289}
290
b637e498 291struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
e6445719
PS
292 u32 *bucket, u32 *last)
293{
294 struct sw_flow *flow;
295 struct hlist_head *head;
296 int ver;
297 int i;
298
b637e498
PS
299 ver = ti->node_ver;
300 while (*bucket < ti->n_buckets) {
e6445719 301 i = 0;
b637e498 302 head = flex_array_get(ti->buckets, *bucket);
74ed7ab9 303 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
e6445719
PS
304 if (i < *last) {
305 i++;
306 continue;
307 }
308 *last = i + 1;
309 return flow;
310 }
311 (*bucket)++;
312 *last = 0;
313 }
314
315 return NULL;
316}
317
b637e498 318static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
e6445719 319{
b637e498
PS
320 hash = jhash_1word(hash, ti->hash_seed);
321 return flex_array_get(ti->buckets,
322 (hash & (ti->n_buckets - 1)));
e6445719
PS
323}
324
74ed7ab9
JS
325static void table_instance_insert(struct table_instance *ti,
326 struct sw_flow *flow)
e6445719
PS
327{
328 struct hlist_head *head;
329
74ed7ab9
JS
330 head = find_bucket(ti, flow->flow_table.hash);
331 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
332}
333
334static void ufid_table_instance_insert(struct table_instance *ti,
335 struct sw_flow *flow)
336{
337 struct hlist_head *head;
338
339 head = find_bucket(ti, flow->ufid_table.hash);
340 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
e6445719
PS
341}
342
b637e498 343static void flow_table_copy_flows(struct table_instance *old,
74ed7ab9 344 struct table_instance *new, bool ufid)
e6445719
PS
345{
346 int old_ver;
347 int i;
348
349 old_ver = old->node_ver;
350 new->node_ver = !old_ver;
351
352 /* Insert in new table. */
353 for (i = 0; i < old->n_buckets; i++) {
354 struct sw_flow *flow;
355 struct hlist_head *head;
356
357 head = flex_array_get(old->buckets, i);
358
74ed7ab9
JS
359 if (ufid)
360 hlist_for_each_entry(flow, head,
361 ufid_table.node[old_ver])
362 ufid_table_instance_insert(new, flow);
363 else
364 hlist_for_each_entry(flow, head,
365 flow_table.node[old_ver])
366 table_instance_insert(new, flow);
e6445719
PS
367 }
368
e6445719
PS
369 old->keep_flows = true;
370}
371
b637e498 372static struct table_instance *table_instance_rehash(struct table_instance *ti,
74ed7ab9 373 int n_buckets, bool ufid)
e6445719 374{
b637e498 375 struct table_instance *new_ti;
e6445719 376
b637e498
PS
377 new_ti = table_instance_alloc(n_buckets);
378 if (!new_ti)
618ed0c8 379 return NULL;
e6445719 380
74ed7ab9 381 flow_table_copy_flows(ti, new_ti, ufid);
e6445719 382
b637e498 383 return new_ti;
e6445719
PS
384}
385
b637e498 386int ovs_flow_tbl_flush(struct flow_table *flow_table)
e6445719 387{
74ed7ab9
JS
388 struct table_instance *old_ti, *new_ti;
389 struct table_instance *old_ufid_ti, *new_ufid_ti;
e6445719 390
b637e498
PS
391 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
392 if (!new_ti)
393 return -ENOMEM;
74ed7ab9
JS
394 new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
395 if (!new_ufid_ti)
396 goto err_free_ti;
397
398 old_ti = ovsl_dereference(flow_table->ti);
399 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
b637e498
PS
400
401 rcu_assign_pointer(flow_table->ti, new_ti);
74ed7ab9 402 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
b637e498
PS
403 flow_table->last_rehash = jiffies;
404 flow_table->count = 0;
74ed7ab9 405 flow_table->ufid_count = 0;
b637e498 406
74ed7ab9 407 table_instance_destroy(old_ti, old_ufid_ti, true);
b637e498 408 return 0;
74ed7ab9
JS
409
410err_free_ti:
411 __table_instance_destroy(new_ti);
412 return -ENOMEM;
e6445719
PS
413}
414
272c2cf8
JS
415static u32 flow_hash(const struct sw_flow_key *key,
416 const struct sw_flow_key_range *range)
e6445719 417{
272c2cf8
JS
418 int key_start = range->start;
419 int key_end = range->end;
7085130b 420 const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
e6445719
PS
421 int hash_u32s = (key_end - key_start) >> 2;
422
423 /* Make sure number of hash bytes are multiple of u32. */
424 BUILD_BUG_ON(sizeof(long) % sizeof(u32));
425
87545899 426 return jhash2(hash_key, hash_u32s, 0);
e6445719
PS
427}
428
429static int flow_key_start(const struct sw_flow_key *key)
430{
00a93bab 431 if (key->tun_proto)
e6445719
PS
432 return 0;
433 else
434 return rounddown(offsetof(struct sw_flow_key, phy),
435 sizeof(long));
436}
437
438static bool cmp_key(const struct sw_flow_key *key1,
439 const struct sw_flow_key *key2,
440 int key_start, int key_end)
441{
7085130b
DDP
442 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
443 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
e6445719
PS
444 long diffs = 0;
445 int i;
446
447 for (i = key_start; i < key_end; i += sizeof(long))
448 diffs |= *cp1++ ^ *cp2++;
449
450 return diffs == 0;
451}
452
453static bool flow_cmp_masked_key(const struct sw_flow *flow,
454 const struct sw_flow_key *key,
272c2cf8 455 const struct sw_flow_key_range *range)
e6445719 456{
272c2cf8 457 return cmp_key(&flow->key, key, range->start, range->end);
e6445719
PS
458}
459
74ed7ab9
JS
460static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
461 const struct sw_flow_match *match)
e6445719
PS
462{
463 struct sw_flow_key *key = match->key;
464 int key_start = flow_key_start(key);
465 int key_end = match->range.end;
466
74ed7ab9
JS
467 BUG_ON(ovs_identifier_is_ufid(&flow->id));
468 return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
e6445719
PS
469}
470
b637e498 471static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
e6445719 472 const struct sw_flow_key *unmasked,
12eb18f7 473 const struct sw_flow_mask *mask)
e6445719
PS
474{
475 struct sw_flow *flow;
476 struct hlist_head *head;
e6445719
PS
477 u32 hash;
478 struct sw_flow_key masked_key;
479
ae5f2fb1 480 ovs_flow_mask_key(&masked_key, unmasked, false, mask);
272c2cf8 481 hash = flow_hash(&masked_key, &mask->range);
b637e498 482 head = find_bucket(ti, hash);
74ed7ab9
JS
483 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
484 if (flow->mask == mask && flow->flow_table.hash == hash &&
272c2cf8 485 flow_cmp_masked_key(flow, &masked_key, &mask->range))
e6445719
PS
486 return flow;
487 }
488 return NULL;
489}
490
5bb50632 491struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
1bd7116f
AZ
492 const struct sw_flow_key *key,
493 u32 *n_mask_hit)
e6445719 494{
663efa36 495 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
e6445719 496 struct sw_flow_mask *mask;
b637e498 497 struct sw_flow *flow;
e6445719 498
1bd7116f 499 *n_mask_hit = 0;
b637e498 500 list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
1bd7116f 501 (*n_mask_hit)++;
b637e498 502 flow = masked_flow_lookup(ti, key, mask);
e6445719 503 if (flow) /* Found */
b637e498 504 return flow;
e6445719 505 }
b637e498
PS
506 return NULL;
507}
e6445719 508
5bb50632
AZ
509struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
510 const struct sw_flow_key *key)
511{
512 u32 __always_unused n_mask_hit;
513
514 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
515}
516
4a46b24e 517struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
12eb18f7 518 const struct sw_flow_match *match)
4a46b24e
AW
519{
520 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
521 struct sw_flow_mask *mask;
522 struct sw_flow *flow;
523
524 /* Always called under ovs-mutex. */
525 list_for_each_entry(mask, &tbl->mask_list, list) {
526 flow = masked_flow_lookup(ti, match->key, mask);
74ed7ab9
JS
527 if (flow && ovs_identifier_is_key(&flow->id) &&
528 ovs_flow_cmp_unmasked_key(flow, match))
529 return flow;
530 }
531 return NULL;
532}
533
534static u32 ufid_hash(const struct sw_flow_id *sfid)
535{
536 return jhash(sfid->ufid, sfid->ufid_len, 0);
537}
538
539static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
540 const struct sw_flow_id *sfid)
541{
542 if (flow->id.ufid_len != sfid->ufid_len)
543 return false;
544
545 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
546}
547
548bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
549{
550 if (ovs_identifier_is_ufid(&flow->id))
551 return flow_cmp_masked_key(flow, match->key, &match->range);
552
553 return ovs_flow_cmp_unmasked_key(flow, match);
554}
555
556struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
557 const struct sw_flow_id *ufid)
558{
559 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
560 struct sw_flow *flow;
561 struct hlist_head *head;
562 u32 hash;
563
564 hash = ufid_hash(ufid);
565 head = find_bucket(ti, hash);
566 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
567 if (flow->ufid_table.hash == hash &&
568 ovs_flow_cmp_ufid(flow, ufid))
4a46b24e
AW
569 return flow;
570 }
571 return NULL;
572}
573
1bd7116f
AZ
574int ovs_flow_tbl_num_masks(const struct flow_table *table)
575{
576 struct sw_flow_mask *mask;
577 int num = 0;
578
579 list_for_each_entry(mask, &table->mask_list, list)
580 num++;
581
582 return num;
583}
584
74ed7ab9
JS
585static struct table_instance *table_instance_expand(struct table_instance *ti,
586 bool ufid)
b637e498 587{
74ed7ab9 588 return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
e6445719
PS
589}
590
56c19868
JR
591/* Remove 'mask' from the mask list, if it is not needed any more. */
592static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
593{
594 if (mask) {
595 /* ovs-lock is required to protect mask-refcount and
596 * mask list.
597 */
598 ASSERT_OVSL();
599 BUG_ON(!mask->ref_count);
600 mask->ref_count--;
601
602 if (!mask->ref_count) {
603 list_del_rcu(&mask->list);
604 kfree_rcu(mask, rcu);
605 }
606 }
607}
608
609/* Must be called with OVS mutex held. */
e6445719
PS
610void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
611{
b637e498 612 struct table_instance *ti = ovsl_dereference(table->ti);
74ed7ab9 613 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
b637e498 614
e6445719 615 BUG_ON(table->count == 0);
74ed7ab9 616 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
e6445719 617 table->count--;
74ed7ab9
JS
618 if (ovs_identifier_is_ufid(&flow->id)) {
619 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
620 table->ufid_count--;
621 }
56c19868
JR
622
623 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
624 * accessible as long as the RCU read lock is held.
625 */
626 flow_mask_remove(table, flow->mask);
e6445719
PS
627}
628
618ed0c8 629static struct sw_flow_mask *mask_alloc(void)
e6445719
PS
630{
631 struct sw_flow_mask *mask;
632
633 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
634 if (mask)
e80857cc 635 mask->ref_count = 1;
e6445719
PS
636
637 return mask;
638}
639
e6445719
PS
640static bool mask_equal(const struct sw_flow_mask *a,
641 const struct sw_flow_mask *b)
642{
7085130b
DDP
643 const u8 *a_ = (const u8 *)&a->key + a->range.start;
644 const u8 *b_ = (const u8 *)&b->key + b->range.start;
e6445719
PS
645
646 return (a->range.end == b->range.end)
647 && (a->range.start == b->range.start)
648 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
649}
650
618ed0c8 651static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
e6445719
PS
652 const struct sw_flow_mask *mask)
653{
654 struct list_head *ml;
655
b637e498 656 list_for_each(ml, &tbl->mask_list) {
e6445719
PS
657 struct sw_flow_mask *m;
658 m = container_of(ml, struct sw_flow_mask, list);
659 if (mask_equal(mask, m))
660 return m;
661 }
662
663 return NULL;
664}
665
d1211908 666/* Add 'mask' into the mask list, if it is not already there. */
618ed0c8 667static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
12eb18f7 668 const struct sw_flow_mask *new)
618ed0c8
PS
669{
670 struct sw_flow_mask *mask;
671 mask = flow_mask_find(tbl, new);
672 if (!mask) {
673 /* Allocate a new mask if none exsits. */
674 mask = mask_alloc();
675 if (!mask)
676 return -ENOMEM;
677 mask->key = new->key;
678 mask->range = new->range;
679 list_add_rcu(&mask->list, &tbl->mask_list);
e80857cc
AZ
680 } else {
681 BUG_ON(!mask->ref_count);
682 mask->ref_count++;
618ed0c8
PS
683 }
684
618ed0c8
PS
685 flow->mask = mask;
686 return 0;
687}
688
56c19868 689/* Must be called with OVS mutex held. */
d29ab6f8 690static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
e6445719 691{
618ed0c8
PS
692 struct table_instance *new_ti = NULL;
693 struct table_instance *ti;
618ed0c8 694
74ed7ab9 695 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
618ed0c8
PS
696 ti = ovsl_dereference(table->ti);
697 table_instance_insert(ti, flow);
698 table->count++;
699
700 /* Expand table, if necessary, to make room. */
701 if (table->count > ti->n_buckets)
74ed7ab9 702 new_ti = table_instance_expand(ti, false);
618ed0c8 703 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
74ed7ab9 704 new_ti = table_instance_rehash(ti, ti->n_buckets, false);
618ed0c8
PS
705
706 if (new_ti) {
707 rcu_assign_pointer(table->ti, new_ti);
74ed7ab9 708 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
618ed0c8
PS
709 table->last_rehash = jiffies;
710 }
d29ab6f8
JS
711}
712
74ed7ab9
JS
713/* Must be called with OVS mutex held. */
714static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
715{
716 struct table_instance *ti;
717
718 flow->ufid_table.hash = ufid_hash(&flow->id);
719 ti = ovsl_dereference(table->ufid_ti);
720 ufid_table_instance_insert(ti, flow);
721 table->ufid_count++;
722
723 /* Expand table, if necessary, to make room. */
724 if (table->ufid_count > ti->n_buckets) {
725 struct table_instance *new_ti;
726
727 new_ti = table_instance_expand(ti, true);
728 if (new_ti) {
729 rcu_assign_pointer(table->ufid_ti, new_ti);
730 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
731 }
732 }
733}
734
d29ab6f8
JS
735/* Must be called with OVS mutex held. */
736int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
737 const struct sw_flow_mask *mask)
738{
739 int err;
740
741 err = flow_mask_insert(table, flow, mask);
742 if (err)
743 return err;
744 flow_key_insert(table, flow);
74ed7ab9
JS
745 if (ovs_identifier_is_ufid(&flow->id))
746 flow_ufid_insert(table, flow);
d29ab6f8 747
618ed0c8 748 return 0;
e6445719
PS
749}
750
751/* Initializes the flow module.
752 * Returns zero if successful or a negative error code. */
753int ovs_flow_init(void)
754{
755 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
756 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
757
63e7959c 758 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
bac541e4 759 + (nr_node_ids
63e7959c
JR
760 * sizeof(struct flow_stats *)),
761 0, 0, NULL);
e6445719
PS
762 if (flow_cache == NULL)
763 return -ENOMEM;
764
63e7959c
JR
765 flow_stats_cache
766 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
767 0, SLAB_HWCACHE_ALIGN, NULL);
768 if (flow_stats_cache == NULL) {
769 kmem_cache_destroy(flow_cache);
770 flow_cache = NULL;
771 return -ENOMEM;
772 }
773
e6445719
PS
774 return 0;
775}
776
777/* Uninitializes the flow module. */
778void ovs_flow_exit(void)
779{
63e7959c 780 kmem_cache_destroy(flow_stats_cache);
e6445719
PS
781 kmem_cache_destroy(flow_cache);
782}
This page took 0.157114 seconds and 5 git commands to generate.