mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-15 15:36:56 +07:00
ac00881f92
When running bpf samples on rt kernel, it reports the below warning: BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:917 in_atomic(): 1, irqs_disabled(): 128, pid: 477, name: ping Preemption disabled at:[<ffff80000017db58>] kprobe_perf_func+0x30/0x228 CPU: 3 PID: 477 Comm: ping Not tainted 4.1.10-rt8 #4 Hardware name: Freescale Layerscape 2085a RDB Board (DT) Call trace: [<ffff80000008a5b0>] dump_backtrace+0x0/0x128 [<ffff80000008a6f8>] show_stack+0x20/0x30 [<ffff8000007da90c>] dump_stack+0x7c/0xa0 [<ffff8000000e4830>] ___might_sleep+0x188/0x1a0 [<ffff8000007e2200>] rt_spin_lock+0x28/0x40 [<ffff80000018bf9c>] htab_map_update_elem+0x124/0x320 [<ffff80000018c718>] bpf_map_update_elem+0x40/0x58 [<ffff800000187658>] __bpf_prog_run+0xd48/0x1640 [<ffff80000017ca6c>] trace_call_bpf+0x8c/0x100 [<ffff80000017db58>] kprobe_perf_func+0x30/0x228 [<ffff80000017dd84>] kprobe_dispatcher+0x34/0x58 [<ffff8000007e399c>] kprobe_handler+0x114/0x250 [<ffff8000007e3bf4>] kprobe_breakpoint_handler+0x1c/0x30 [<ffff800000085b80>] brk_handler+0x88/0x98 [<ffff8000000822f0>] do_debug_exception+0x50/0xb8 Exception stack(0xffff808349687460 to 0xffff808349687580) 7460: 4ca2b600 ffff8083 4a3a7000 ffff8083 49687620 ffff8083 0069c5f8 ffff8000 7480: 00000001 00000000 007e0628 ffff8000 496874b0 ffff8083 007e1de8 ffff8000 74a0: 496874d0 ffff8083 0008e04c ffff8000 00000001 00000000 4ca2b600 ffff8083 74c0: 00ba2e80 ffff8000 49687528 ffff8083 49687510 ffff8083 000e5c70 ffff8000 74e0: 00c22348 ffff8000 00000000 ffff8083 49687510 ffff8083 000e5c74 ffff8000 7500: 4ca2b600 ffff8083 49401800 ffff8083 00000001 00000000 00000000 00000000 7520: 496874d0 ffff8083 00000000 00000000 00000000 00000000 00000000 00000000 7540: 2f2e2d2c 33323130 00000000 00000000 4c944500 ffff8083 00000000 00000000 7560: 00000000 00000000 008751e0 ffff8000 00000001 00000000 124e2d1d 00107b77 Convert hashtab lock to raw lock to avoid such warning. Signed-off-by: Yang Shi <yang.shi@linaro.org> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
372 lines
9.1 KiB
C
372 lines
9.1 KiB
C
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of version 2 of the GNU General Public
|
|
* License as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* General Public License for more details.
|
|
*/
|
|
#include <linux/bpf.h>
|
|
#include <linux/jhash.h>
|
|
#include <linux/filter.h>
|
|
#include <linux/vmalloc.h>
|
|
|
|
struct bpf_htab {
|
|
struct bpf_map map;
|
|
struct hlist_head *buckets;
|
|
raw_spinlock_t lock;
|
|
u32 count; /* number of elements in this hashtable */
|
|
u32 n_buckets; /* number of hash buckets */
|
|
u32 elem_size; /* size of each element in bytes */
|
|
};
|
|
|
|
/* each htab element is struct htab_elem + key + value */
|
|
struct htab_elem {
|
|
struct hlist_node hash_node;
|
|
struct rcu_head rcu;
|
|
u32 hash;
|
|
char key[0] __aligned(8);
|
|
};
|
|
|
|
/* Called from syscall */
|
|
static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
|
{
|
|
struct bpf_htab *htab;
|
|
int err, i;
|
|
|
|
htab = kzalloc(sizeof(*htab), GFP_USER);
|
|
if (!htab)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
/* mandatory map attributes */
|
|
htab->map.key_size = attr->key_size;
|
|
htab->map.value_size = attr->value_size;
|
|
htab->map.max_entries = attr->max_entries;
|
|
|
|
/* check sanity of attributes.
|
|
* value_size == 0 may be allowed in the future to use map as a set
|
|
*/
|
|
err = -EINVAL;
|
|
if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
|
|
htab->map.value_size == 0)
|
|
goto free_htab;
|
|
|
|
/* hash table size must be power of 2 */
|
|
htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
|
|
|
|
err = -E2BIG;
|
|
if (htab->map.key_size > MAX_BPF_STACK)
|
|
/* eBPF programs initialize keys on stack, so they cannot be
|
|
* larger than max stack size
|
|
*/
|
|
goto free_htab;
|
|
|
|
err = -ENOMEM;
|
|
/* prevent zero size kmalloc and check for u32 overflow */
|
|
if (htab->n_buckets == 0 ||
|
|
htab->n_buckets > U32_MAX / sizeof(struct hlist_head))
|
|
goto free_htab;
|
|
|
|
htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct hlist_head),
|
|
GFP_USER | __GFP_NOWARN);
|
|
|
|
if (!htab->buckets) {
|
|
htab->buckets = vmalloc(htab->n_buckets * sizeof(struct hlist_head));
|
|
if (!htab->buckets)
|
|
goto free_htab;
|
|
}
|
|
|
|
for (i = 0; i < htab->n_buckets; i++)
|
|
INIT_HLIST_HEAD(&htab->buckets[i]);
|
|
|
|
raw_spin_lock_init(&htab->lock);
|
|
htab->count = 0;
|
|
|
|
htab->elem_size = sizeof(struct htab_elem) +
|
|
round_up(htab->map.key_size, 8) +
|
|
htab->map.value_size;
|
|
|
|
htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) +
|
|
htab->elem_size * htab->map.max_entries,
|
|
PAGE_SIZE) >> PAGE_SHIFT;
|
|
return &htab->map;
|
|
|
|
free_htab:
|
|
kfree(htab);
|
|
return ERR_PTR(err);
|
|
}
|
|
|
|
static inline u32 htab_map_hash(const void *key, u32 key_len)
|
|
{
|
|
return jhash(key, key_len, 0);
|
|
}
|
|
|
|
static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
|
|
{
|
|
return &htab->buckets[hash & (htab->n_buckets - 1)];
|
|
}
|
|
|
|
static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
|
|
void *key, u32 key_size)
|
|
{
|
|
struct htab_elem *l;
|
|
|
|
hlist_for_each_entry_rcu(l, head, hash_node)
|
|
if (l->hash == hash && !memcmp(&l->key, key, key_size))
|
|
return l;
|
|
|
|
return NULL;
|
|
}
|
|
|
|
/* Called from syscall or from eBPF program */
|
|
static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
|
|
{
|
|
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
|
struct hlist_head *head;
|
|
struct htab_elem *l;
|
|
u32 hash, key_size;
|
|
|
|
/* Must be called with rcu_read_lock. */
|
|
WARN_ON_ONCE(!rcu_read_lock_held());
|
|
|
|
key_size = map->key_size;
|
|
|
|
hash = htab_map_hash(key, key_size);
|
|
|
|
head = select_bucket(htab, hash);
|
|
|
|
l = lookup_elem_raw(head, hash, key, key_size);
|
|
|
|
if (l)
|
|
return l->key + round_up(map->key_size, 8);
|
|
|
|
return NULL;
|
|
}
|
|
|
|
/* Called from syscall */
|
|
static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
|
|
{
|
|
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
|
struct hlist_head *head;
|
|
struct htab_elem *l, *next_l;
|
|
u32 hash, key_size;
|
|
int i;
|
|
|
|
WARN_ON_ONCE(!rcu_read_lock_held());
|
|
|
|
key_size = map->key_size;
|
|
|
|
hash = htab_map_hash(key, key_size);
|
|
|
|
head = select_bucket(htab, hash);
|
|
|
|
/* lookup the key */
|
|
l = lookup_elem_raw(head, hash, key, key_size);
|
|
|
|
if (!l) {
|
|
i = 0;
|
|
goto find_first_elem;
|
|
}
|
|
|
|
/* key was found, get next key in the same bucket */
|
|
next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
|
|
struct htab_elem, hash_node);
|
|
|
|
if (next_l) {
|
|
/* if next elem in this hash list is non-zero, just return it */
|
|
memcpy(next_key, next_l->key, key_size);
|
|
return 0;
|
|
}
|
|
|
|
/* no more elements in this hash list, go to the next bucket */
|
|
i = hash & (htab->n_buckets - 1);
|
|
i++;
|
|
|
|
find_first_elem:
|
|
/* iterate over buckets */
|
|
for (; i < htab->n_buckets; i++) {
|
|
head = select_bucket(htab, i);
|
|
|
|
/* pick first element in the bucket */
|
|
next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
|
|
struct htab_elem, hash_node);
|
|
if (next_l) {
|
|
/* if it's not empty, just return it */
|
|
memcpy(next_key, next_l->key, key_size);
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
/* itereated over all buckets and all elements */
|
|
return -ENOENT;
|
|
}
|
|
|
|
/* Called from syscall or from eBPF program */
|
|
static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
|
|
u64 map_flags)
|
|
{
|
|
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
|
struct htab_elem *l_new, *l_old;
|
|
struct hlist_head *head;
|
|
unsigned long flags;
|
|
u32 key_size;
|
|
int ret;
|
|
|
|
if (map_flags > BPF_EXIST)
|
|
/* unknown flags */
|
|
return -EINVAL;
|
|
|
|
WARN_ON_ONCE(!rcu_read_lock_held());
|
|
|
|
/* allocate new element outside of lock */
|
|
l_new = kmalloc(htab->elem_size, GFP_ATOMIC);
|
|
if (!l_new)
|
|
return -ENOMEM;
|
|
|
|
key_size = map->key_size;
|
|
|
|
memcpy(l_new->key, key, key_size);
|
|
memcpy(l_new->key + round_up(key_size, 8), value, map->value_size);
|
|
|
|
l_new->hash = htab_map_hash(l_new->key, key_size);
|
|
|
|
/* bpf_map_update_elem() can be called in_irq() */
|
|
raw_spin_lock_irqsave(&htab->lock, flags);
|
|
|
|
head = select_bucket(htab, l_new->hash);
|
|
|
|
l_old = lookup_elem_raw(head, l_new->hash, key, key_size);
|
|
|
|
if (!l_old && unlikely(htab->count >= map->max_entries)) {
|
|
/* if elem with this 'key' doesn't exist and we've reached
|
|
* max_entries limit, fail insertion of new elem
|
|
*/
|
|
ret = -E2BIG;
|
|
goto err;
|
|
}
|
|
|
|
if (l_old && map_flags == BPF_NOEXIST) {
|
|
/* elem already exists */
|
|
ret = -EEXIST;
|
|
goto err;
|
|
}
|
|
|
|
if (!l_old && map_flags == BPF_EXIST) {
|
|
/* elem doesn't exist, cannot update it */
|
|
ret = -ENOENT;
|
|
goto err;
|
|
}
|
|
|
|
/* add new element to the head of the list, so that concurrent
|
|
* search will find it before old elem
|
|
*/
|
|
hlist_add_head_rcu(&l_new->hash_node, head);
|
|
if (l_old) {
|
|
hlist_del_rcu(&l_old->hash_node);
|
|
kfree_rcu(l_old, rcu);
|
|
} else {
|
|
htab->count++;
|
|
}
|
|
raw_spin_unlock_irqrestore(&htab->lock, flags);
|
|
|
|
return 0;
|
|
err:
|
|
raw_spin_unlock_irqrestore(&htab->lock, flags);
|
|
kfree(l_new);
|
|
return ret;
|
|
}
|
|
|
|
/* Called from syscall or from eBPF program */
|
|
static int htab_map_delete_elem(struct bpf_map *map, void *key)
|
|
{
|
|
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
|
struct hlist_head *head;
|
|
struct htab_elem *l;
|
|
unsigned long flags;
|
|
u32 hash, key_size;
|
|
int ret = -ENOENT;
|
|
|
|
WARN_ON_ONCE(!rcu_read_lock_held());
|
|
|
|
key_size = map->key_size;
|
|
|
|
hash = htab_map_hash(key, key_size);
|
|
|
|
raw_spin_lock_irqsave(&htab->lock, flags);
|
|
|
|
head = select_bucket(htab, hash);
|
|
|
|
l = lookup_elem_raw(head, hash, key, key_size);
|
|
|
|
if (l) {
|
|
hlist_del_rcu(&l->hash_node);
|
|
htab->count--;
|
|
kfree_rcu(l, rcu);
|
|
ret = 0;
|
|
}
|
|
|
|
raw_spin_unlock_irqrestore(&htab->lock, flags);
|
|
return ret;
|
|
}
|
|
|
|
static void delete_all_elements(struct bpf_htab *htab)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < htab->n_buckets; i++) {
|
|
struct hlist_head *head = select_bucket(htab, i);
|
|
struct hlist_node *n;
|
|
struct htab_elem *l;
|
|
|
|
hlist_for_each_entry_safe(l, n, head, hash_node) {
|
|
hlist_del_rcu(&l->hash_node);
|
|
htab->count--;
|
|
kfree(l);
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
|
|
static void htab_map_free(struct bpf_map *map)
|
|
{
|
|
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
|
|
|
/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
|
|
* so the programs (can be more than one that used this map) were
|
|
* disconnected from events. Wait for outstanding critical sections in
|
|
* these programs to complete
|
|
*/
|
|
synchronize_rcu();
|
|
|
|
/* some of kfree_rcu() callbacks for elements of this map may not have
|
|
* executed. It's ok. Proceed to free residual elements and map itself
|
|
*/
|
|
delete_all_elements(htab);
|
|
kvfree(htab->buckets);
|
|
kfree(htab);
|
|
}
|
|
|
|
static const struct bpf_map_ops htab_ops = {
|
|
.map_alloc = htab_map_alloc,
|
|
.map_free = htab_map_free,
|
|
.map_get_next_key = htab_map_get_next_key,
|
|
.map_lookup_elem = htab_map_lookup_elem,
|
|
.map_update_elem = htab_map_update_elem,
|
|
.map_delete_elem = htab_map_delete_elem,
|
|
};
|
|
|
|
static struct bpf_map_type_list htab_type __read_mostly = {
|
|
.ops = &htab_ops,
|
|
.type = BPF_MAP_TYPE_HASH,
|
|
};
|
|
|
|
static int __init register_htab_map(void)
|
|
{
|
|
bpf_register_map_type(&htab_type);
|
|
return 0;
|
|
}
|
|
late_initcall(register_htab_map);
|