1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/bpf.h>
14 #include <linux/jhash.h>
15 #include <linux/filter.h>
16 #include <linux/vmalloc.h>
17 #include "percpu_freelist.h"
20 struct hlist_head head
;
26 struct bucket
*buckets
;
28 struct pcpu_freelist freelist
;
29 void __percpu
*extra_elems
;
30 atomic_t count
; /* number of elements in this hashtable */
31 u32 n_buckets
; /* number of hash buckets */
32 u32 elem_size
; /* size of each element in bytes */
35 enum extra_elem_state
{
36 HTAB_NOT_AN_EXTRA_ELEM
= 0,
41 /* each htab element is struct htab_elem + key + value */
44 struct hlist_node hash_node
;
45 struct bpf_htab
*htab
;
46 struct pcpu_freelist_node fnode
;
50 enum extra_elem_state state
;
53 char key
[0] __aligned(8);
56 static inline void htab_elem_set_ptr(struct htab_elem
*l
, u32 key_size
,
59 *(void __percpu
**)(l
->key
+ key_size
) = pptr
;
62 static inline void __percpu
*htab_elem_get_ptr(struct htab_elem
*l
, u32 key_size
)
64 return *(void __percpu
**)(l
->key
+ key_size
);
67 static struct htab_elem
*get_htab_elem(struct bpf_htab
*htab
, int i
)
69 return (struct htab_elem
*) (htab
->elems
+ i
* htab
->elem_size
);
72 static void htab_free_elems(struct bpf_htab
*htab
)
76 if (htab
->map
.map_type
!= BPF_MAP_TYPE_PERCPU_HASH
)
79 for (i
= 0; i
< htab
->map
.max_entries
; i
++) {
82 pptr
= htab_elem_get_ptr(get_htab_elem(htab
, i
),
90 static int prealloc_elems_and_freelist(struct bpf_htab
*htab
)
94 htab
->elems
= vzalloc(htab
->elem_size
* htab
->map
.max_entries
);
98 if (htab
->map
.map_type
!= BPF_MAP_TYPE_PERCPU_HASH
)
99 goto skip_percpu_elems
;
101 for (i
= 0; i
< htab
->map
.max_entries
; i
++) {
102 u32 size
= round_up(htab
->map
.value_size
, 8);
105 pptr
= __alloc_percpu_gfp(size
, 8, GFP_USER
| __GFP_NOWARN
);
108 htab_elem_set_ptr(get_htab_elem(htab
, i
), htab
->map
.key_size
,
113 err
= pcpu_freelist_init(&htab
->freelist
);
117 pcpu_freelist_populate(&htab
->freelist
, htab
->elems
, htab
->elem_size
,
118 htab
->map
.max_entries
);
122 htab_free_elems(htab
);
126 static int alloc_extra_elems(struct bpf_htab
*htab
)
131 pptr
= __alloc_percpu_gfp(htab
->elem_size
, 8, GFP_USER
| __GFP_NOWARN
);
135 for_each_possible_cpu(cpu
) {
136 ((struct htab_elem
*)per_cpu_ptr(pptr
, cpu
))->state
=
137 HTAB_EXTRA_ELEM_FREE
;
139 htab
->extra_elems
= pptr
;
143 /* Called from syscall */
144 static struct bpf_map
*htab_map_alloc(union bpf_attr
*attr
)
146 bool percpu
= attr
->map_type
== BPF_MAP_TYPE_PERCPU_HASH
;
147 struct bpf_htab
*htab
;
151 if (attr
->map_flags
& ~BPF_F_NO_PREALLOC
)
152 /* reserved bits should not be used */
153 return ERR_PTR(-EINVAL
);
155 htab
= kzalloc(sizeof(*htab
), GFP_USER
);
157 return ERR_PTR(-ENOMEM
);
159 /* mandatory map attributes */
160 htab
->map
.map_type
= attr
->map_type
;
161 htab
->map
.key_size
= attr
->key_size
;
162 htab
->map
.value_size
= attr
->value_size
;
163 htab
->map
.max_entries
= attr
->max_entries
;
164 htab
->map
.map_flags
= attr
->map_flags
;
166 /* check sanity of attributes.
167 * value_size == 0 may be allowed in the future to use map as a set
170 if (htab
->map
.max_entries
== 0 || htab
->map
.key_size
== 0 ||
171 htab
->map
.value_size
== 0)
174 /* hash table size must be power of 2 */
175 htab
->n_buckets
= roundup_pow_of_two(htab
->map
.max_entries
);
178 if (htab
->map
.key_size
> MAX_BPF_STACK
)
179 /* eBPF programs initialize keys on stack, so they cannot be
180 * larger than max stack size
184 if (htab
->map
.value_size
>= (1 << (KMALLOC_SHIFT_MAX
- 1)) -
185 MAX_BPF_STACK
- sizeof(struct htab_elem
))
186 /* if value_size is bigger, the user space won't be able to
187 * access the elements via bpf syscall. This check also makes
188 * sure that the elem_size doesn't overflow and it's
189 * kmalloc-able later in htab_map_update_elem()
193 if (percpu
&& round_up(htab
->map
.value_size
, 8) > PCPU_MIN_UNIT_SIZE
)
194 /* make sure the size for pcpu_alloc() is reasonable */
197 htab
->elem_size
= sizeof(struct htab_elem
) +
198 round_up(htab
->map
.key_size
, 8);
200 htab
->elem_size
+= sizeof(void *);
202 htab
->elem_size
+= round_up(htab
->map
.value_size
, 8);
204 /* prevent zero size kmalloc and check for u32 overflow */
205 if (htab
->n_buckets
== 0 ||
206 htab
->n_buckets
> U32_MAX
/ sizeof(struct bucket
))
209 cost
= (u64
) htab
->n_buckets
* sizeof(struct bucket
) +
210 (u64
) htab
->elem_size
* htab
->map
.max_entries
;
213 cost
+= (u64
) round_up(htab
->map
.value_size
, 8) *
214 num_possible_cpus() * htab
->map
.max_entries
;
216 cost
+= (u64
) htab
->elem_size
* num_possible_cpus();
218 if (cost
>= U32_MAX
- PAGE_SIZE
)
219 /* make sure page count doesn't overflow */
222 htab
->map
.pages
= round_up(cost
, PAGE_SIZE
) >> PAGE_SHIFT
;
224 /* if map size is larger than memlock limit, reject it early */
225 err
= bpf_map_precharge_memlock(htab
->map
.pages
);
230 htab
->buckets
= kmalloc_array(htab
->n_buckets
, sizeof(struct bucket
),
231 GFP_USER
| __GFP_NOWARN
);
233 if (!htab
->buckets
) {
234 htab
->buckets
= vmalloc(htab
->n_buckets
* sizeof(struct bucket
));
239 for (i
= 0; i
< htab
->n_buckets
; i
++) {
240 INIT_HLIST_HEAD(&htab
->buckets
[i
].head
);
241 raw_spin_lock_init(&htab
->buckets
[i
].lock
);
245 err
= alloc_extra_elems(htab
);
250 if (!(attr
->map_flags
& BPF_F_NO_PREALLOC
)) {
251 err
= prealloc_elems_and_freelist(htab
);
253 goto free_extra_elems
;
259 free_percpu(htab
->extra_elems
);
261 kvfree(htab
->buckets
);
267 static inline u32
htab_map_hash(const void *key
, u32 key_len
)
269 return jhash(key
, key_len
, 0);
272 static inline struct bucket
*__select_bucket(struct bpf_htab
*htab
, u32 hash
)
274 return &htab
->buckets
[hash
& (htab
->n_buckets
- 1)];
277 static inline struct hlist_head
*select_bucket(struct bpf_htab
*htab
, u32 hash
)
279 return &__select_bucket(htab
, hash
)->head
;
282 static struct htab_elem
*lookup_elem_raw(struct hlist_head
*head
, u32 hash
,
283 void *key
, u32 key_size
)
287 hlist_for_each_entry_rcu(l
, head
, hash_node
)
288 if (l
->hash
== hash
&& !memcmp(&l
->key
, key
, key_size
))
294 /* Called from syscall or from eBPF program */
295 static void *__htab_map_lookup_elem(struct bpf_map
*map
, void *key
)
297 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
298 struct hlist_head
*head
;
302 /* Must be called with rcu_read_lock. */
303 WARN_ON_ONCE(!rcu_read_lock_held());
305 key_size
= map
->key_size
;
307 hash
= htab_map_hash(key
, key_size
);
309 head
= select_bucket(htab
, hash
);
311 l
= lookup_elem_raw(head
, hash
, key
, key_size
);
316 static void *htab_map_lookup_elem(struct bpf_map
*map
, void *key
)
318 struct htab_elem
*l
= __htab_map_lookup_elem(map
, key
);
321 return l
->key
+ round_up(map
->key_size
, 8);
326 /* Called from syscall */
327 static int htab_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
329 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
330 struct hlist_head
*head
;
331 struct htab_elem
*l
, *next_l
;
335 WARN_ON_ONCE(!rcu_read_lock_held());
337 key_size
= map
->key_size
;
339 hash
= htab_map_hash(key
, key_size
);
341 head
= select_bucket(htab
, hash
);
344 l
= lookup_elem_raw(head
, hash
, key
, key_size
);
348 goto find_first_elem
;
351 /* key was found, get next key in the same bucket */
352 next_l
= hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l
->hash_node
)),
353 struct htab_elem
, hash_node
);
356 /* if next elem in this hash list is non-zero, just return it */
357 memcpy(next_key
, next_l
->key
, key_size
);
361 /* no more elements in this hash list, go to the next bucket */
362 i
= hash
& (htab
->n_buckets
- 1);
366 /* iterate over buckets */
367 for (; i
< htab
->n_buckets
; i
++) {
368 head
= select_bucket(htab
, i
);
370 /* pick first element in the bucket */
371 next_l
= hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head
)),
372 struct htab_elem
, hash_node
);
374 /* if it's not empty, just return it */
375 memcpy(next_key
, next_l
->key
, key_size
);
380 /* iterated over all buckets and all elements */
384 static void htab_elem_free(struct bpf_htab
*htab
, struct htab_elem
*l
)
386 if (htab
->map
.map_type
== BPF_MAP_TYPE_PERCPU_HASH
)
387 free_percpu(htab_elem_get_ptr(l
, htab
->map
.key_size
));
391 static void htab_elem_free_rcu(struct rcu_head
*head
)
393 struct htab_elem
*l
= container_of(head
, struct htab_elem
, rcu
);
394 struct bpf_htab
*htab
= l
->htab
;
396 /* must increment bpf_prog_active to avoid kprobe+bpf triggering while
397 * we're calling kfree, otherwise deadlock is possible if kprobes
398 * are placed somewhere inside of slub
401 __this_cpu_inc(bpf_prog_active
);
402 htab_elem_free(htab
, l
);
403 __this_cpu_dec(bpf_prog_active
);
407 static void free_htab_elem(struct bpf_htab
*htab
, struct htab_elem
*l
)
409 if (l
->state
== HTAB_EXTRA_ELEM_USED
) {
410 l
->state
= HTAB_EXTRA_ELEM_FREE
;
414 if (!(htab
->map
.map_flags
& BPF_F_NO_PREALLOC
)) {
415 pcpu_freelist_push(&htab
->freelist
, &l
->fnode
);
417 atomic_dec(&htab
->count
);
419 call_rcu(&l
->rcu
, htab_elem_free_rcu
);
423 static struct htab_elem
*alloc_htab_elem(struct bpf_htab
*htab
, void *key
,
424 void *value
, u32 key_size
, u32 hash
,
425 bool percpu
, bool onallcpus
,
426 bool old_elem_exists
)
428 u32 size
= htab
->map
.value_size
;
429 bool prealloc
= !(htab
->map
.map_flags
& BPF_F_NO_PREALLOC
);
430 struct htab_elem
*l_new
;
435 l_new
= (struct htab_elem
*)pcpu_freelist_pop(&htab
->freelist
);
439 if (atomic_inc_return(&htab
->count
) > htab
->map
.max_entries
) {
440 atomic_dec(&htab
->count
);
443 l_new
= kmalloc(htab
->elem_size
,
444 GFP_ATOMIC
| __GFP_NOWARN
);
446 return ERR_PTR(-ENOMEM
);
451 if (!old_elem_exists
)
454 /* if we're updating the existing element and the hash table
455 * is full, use per-cpu extra elems
457 l_new
= this_cpu_ptr(htab
->extra_elems
);
458 if (l_new
->state
!= HTAB_EXTRA_ELEM_FREE
)
459 return ERR_PTR(-E2BIG
);
460 l_new
->state
= HTAB_EXTRA_ELEM_USED
;
462 l_new
->state
= HTAB_NOT_AN_EXTRA_ELEM
;
465 memcpy(l_new
->key
, key
, key_size
);
467 /* round up value_size to 8 bytes */
468 size
= round_up(size
, 8);
471 pptr
= htab_elem_get_ptr(l_new
, key_size
);
473 /* alloc_percpu zero-fills */
474 pptr
= __alloc_percpu_gfp(size
, 8,
475 GFP_ATOMIC
| __GFP_NOWARN
);
478 return ERR_PTR(-ENOMEM
);
483 /* copy true value_size bytes */
484 memcpy(this_cpu_ptr(pptr
), value
, htab
->map
.value_size
);
488 for_each_possible_cpu(cpu
) {
489 bpf_long_memcpy(per_cpu_ptr(pptr
, cpu
),
495 htab_elem_set_ptr(l_new
, key_size
, pptr
);
497 memcpy(l_new
->key
+ round_up(key_size
, 8), value
, size
);
504 static int check_flags(struct bpf_htab
*htab
, struct htab_elem
*l_old
,
507 if (l_old
&& map_flags
== BPF_NOEXIST
)
508 /* elem already exists */
511 if (!l_old
&& map_flags
== BPF_EXIST
)
512 /* elem doesn't exist, cannot update it */
518 /* Called from syscall or from eBPF program */
519 static int htab_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
522 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
523 struct htab_elem
*l_new
= NULL
, *l_old
;
524 struct hlist_head
*head
;
530 if (unlikely(map_flags
> BPF_EXIST
))
534 WARN_ON_ONCE(!rcu_read_lock_held());
536 key_size
= map
->key_size
;
538 hash
= htab_map_hash(key
, key_size
);
540 b
= __select_bucket(htab
, hash
);
543 /* bpf_map_update_elem() can be called in_irq() */
544 raw_spin_lock_irqsave(&b
->lock
, flags
);
546 l_old
= lookup_elem_raw(head
, hash
, key
, key_size
);
548 ret
= check_flags(htab
, l_old
, map_flags
);
552 l_new
= alloc_htab_elem(htab
, key
, value
, key_size
, hash
, false, false,
555 /* all pre-allocated elements are in use or memory exhausted */
556 ret
= PTR_ERR(l_new
);
560 /* add new element to the head of the list, so that
561 * concurrent search will find it before old elem
563 hlist_add_head_rcu(&l_new
->hash_node
, head
);
565 hlist_del_rcu(&l_old
->hash_node
);
566 free_htab_elem(htab
, l_old
);
570 raw_spin_unlock_irqrestore(&b
->lock
, flags
);
574 static int __htab_percpu_map_update_elem(struct bpf_map
*map
, void *key
,
575 void *value
, u64 map_flags
,
578 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
579 struct htab_elem
*l_new
= NULL
, *l_old
;
580 struct hlist_head
*head
;
586 if (unlikely(map_flags
> BPF_EXIST
))
590 WARN_ON_ONCE(!rcu_read_lock_held());
592 key_size
= map
->key_size
;
594 hash
= htab_map_hash(key
, key_size
);
596 b
= __select_bucket(htab
, hash
);
599 /* bpf_map_update_elem() can be called in_irq() */
600 raw_spin_lock_irqsave(&b
->lock
, flags
);
602 l_old
= lookup_elem_raw(head
, hash
, key
, key_size
);
604 ret
= check_flags(htab
, l_old
, map_flags
);
609 void __percpu
*pptr
= htab_elem_get_ptr(l_old
, key_size
);
610 u32 size
= htab
->map
.value_size
;
612 /* per-cpu hash map can update value in-place */
614 memcpy(this_cpu_ptr(pptr
), value
, size
);
618 size
= round_up(size
, 8);
619 for_each_possible_cpu(cpu
) {
620 bpf_long_memcpy(per_cpu_ptr(pptr
, cpu
),
626 l_new
= alloc_htab_elem(htab
, key
, value
, key_size
,
627 hash
, true, onallcpus
, false);
629 ret
= PTR_ERR(l_new
);
632 hlist_add_head_rcu(&l_new
->hash_node
, head
);
636 raw_spin_unlock_irqrestore(&b
->lock
, flags
);
640 static int htab_percpu_map_update_elem(struct bpf_map
*map
, void *key
,
641 void *value
, u64 map_flags
)
643 return __htab_percpu_map_update_elem(map
, key
, value
, map_flags
, false);
646 /* Called from syscall or from eBPF program */
647 static int htab_map_delete_elem(struct bpf_map
*map
, void *key
)
649 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
650 struct hlist_head
*head
;
657 WARN_ON_ONCE(!rcu_read_lock_held());
659 key_size
= map
->key_size
;
661 hash
= htab_map_hash(key
, key_size
);
662 b
= __select_bucket(htab
, hash
);
665 raw_spin_lock_irqsave(&b
->lock
, flags
);
667 l
= lookup_elem_raw(head
, hash
, key
, key_size
);
670 hlist_del_rcu(&l
->hash_node
);
671 free_htab_elem(htab
, l
);
675 raw_spin_unlock_irqrestore(&b
->lock
, flags
);
679 static void delete_all_elements(struct bpf_htab
*htab
)
683 for (i
= 0; i
< htab
->n_buckets
; i
++) {
684 struct hlist_head
*head
= select_bucket(htab
, i
);
685 struct hlist_node
*n
;
688 hlist_for_each_entry_safe(l
, n
, head
, hash_node
) {
689 hlist_del_rcu(&l
->hash_node
);
690 htab_elem_free(htab
, l
);
694 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
695 static void htab_map_free(struct bpf_map
*map
)
697 struct bpf_htab
*htab
= container_of(map
, struct bpf_htab
, map
);
699 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
700 * so the programs (can be more than one that used this map) were
701 * disconnected from events. Wait for outstanding critical sections in
702 * these programs to complete
706 /* some of free_htab_elem() callbacks for elements of this map may
707 * not have executed. Wait for them.
710 if (htab
->map
.map_flags
& BPF_F_NO_PREALLOC
) {
711 delete_all_elements(htab
);
713 htab_free_elems(htab
);
714 pcpu_freelist_destroy(&htab
->freelist
);
716 free_percpu(htab
->extra_elems
);
717 kvfree(htab
->buckets
);
721 static const struct bpf_map_ops htab_ops
= {
722 .map_alloc
= htab_map_alloc
,
723 .map_free
= htab_map_free
,
724 .map_get_next_key
= htab_map_get_next_key
,
725 .map_lookup_elem
= htab_map_lookup_elem
,
726 .map_update_elem
= htab_map_update_elem
,
727 .map_delete_elem
= htab_map_delete_elem
,
730 static struct bpf_map_type_list htab_type __read_mostly
= {
732 .type
= BPF_MAP_TYPE_HASH
,
735 /* Called from eBPF program */
736 static void *htab_percpu_map_lookup_elem(struct bpf_map
*map
, void *key
)
738 struct htab_elem
*l
= __htab_map_lookup_elem(map
, key
);
741 return this_cpu_ptr(htab_elem_get_ptr(l
, map
->key_size
));
746 int bpf_percpu_hash_copy(struct bpf_map
*map
, void *key
, void *value
)
754 /* per_cpu areas are zero-filled and bpf programs can only
755 * access 'value_size' of them, so copying rounded areas
756 * will not leak any kernel data
758 size
= round_up(map
->value_size
, 8);
760 l
= __htab_map_lookup_elem(map
, key
);
763 pptr
= htab_elem_get_ptr(l
, map
->key_size
);
764 for_each_possible_cpu(cpu
) {
765 bpf_long_memcpy(value
+ off
,
766 per_cpu_ptr(pptr
, cpu
), size
);
775 int bpf_percpu_hash_update(struct bpf_map
*map
, void *key
, void *value
,
781 ret
= __htab_percpu_map_update_elem(map
, key
, value
, map_flags
, true);
787 static const struct bpf_map_ops htab_percpu_ops
= {
788 .map_alloc
= htab_map_alloc
,
789 .map_free
= htab_map_free
,
790 .map_get_next_key
= htab_map_get_next_key
,
791 .map_lookup_elem
= htab_percpu_map_lookup_elem
,
792 .map_update_elem
= htab_percpu_map_update_elem
,
793 .map_delete_elem
= htab_map_delete_elem
,
796 static struct bpf_map_type_list htab_percpu_type __read_mostly
= {
797 .ops
= &htab_percpu_ops
,
798 .type
= BPF_MAP_TYPE_PERCPU_HASH
,
801 static int __init
register_htab_map(void)
803 bpf_register_map_type(&htab_type
);
804 bpf_register_map_type(&htab_percpu_type
);
807 late_initcall(register_htab_map
);