mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 23:34:40 +07:00
3a5795b83d
This patch adds a map-in-map LRU example. If we know only a subset of cores will use the LRU, we can allocate a common LRU list per targeting core and store it into an array-of-hashs. It allows using the common LRU map with map-update performance comparable to the BPF_F_NO_COMMON_LRU map but without wasting memory on the unused cores that we know they will never access the LRU map. BPF_F_NO_COMMON_LRU: > map_perf_test 32 8 10000000 10000000 | awk '{sum += $3}END{print sum}' 9234314 (9.23M/s) map-in-map LRU: > map_perf_test 512 8 1260000 80000000 | awk '{sum += $3}END{print sum}' 9962743 (9.96M/s) Notes that the max_entries for the map-in-map LRU test is 1260000 which is the max_entries for each inner LRU map. 8 processes have been started, so 8 * 1260000 = 10080000 (~10M) which is close to what is used in the BPF_F_NO_COMMON_LRU test. Signed-off-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: David S. Miller <davem@davemloft.net>
250 lines
5.5 KiB
C
250 lines
5.5 KiB
C
/* Copyright (c) 2016 Facebook
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of version 2 of the GNU General Public
|
|
* License as published by the Free Software Foundation.
|
|
*/
|
|
#include <linux/skbuff.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/version.h>
|
|
#include <uapi/linux/bpf.h>
|
|
#include "bpf_helpers.h"
|
|
|
|
#define MAX_ENTRIES 1000
|
|
#define MAX_NR_CPUS 1024
|
|
|
|
struct bpf_map_def SEC("maps") hash_map = {
|
|
.type = BPF_MAP_TYPE_HASH,
|
|
.key_size = sizeof(u32),
|
|
.value_size = sizeof(long),
|
|
.max_entries = MAX_ENTRIES,
|
|
};
|
|
|
|
struct bpf_map_def SEC("maps") lru_hash_map = {
|
|
.type = BPF_MAP_TYPE_LRU_HASH,
|
|
.key_size = sizeof(u32),
|
|
.value_size = sizeof(long),
|
|
.max_entries = 10000,
|
|
};
|
|
|
|
struct bpf_map_def SEC("maps") nocommon_lru_hash_map = {
|
|
.type = BPF_MAP_TYPE_LRU_HASH,
|
|
.key_size = sizeof(u32),
|
|
.value_size = sizeof(long),
|
|
.max_entries = 10000,
|
|
.map_flags = BPF_F_NO_COMMON_LRU,
|
|
};
|
|
|
|
struct bpf_map_def SEC("maps") inner_lru_hash_map = {
|
|
.type = BPF_MAP_TYPE_LRU_HASH,
|
|
.key_size = sizeof(u32),
|
|
.value_size = sizeof(long),
|
|
.max_entries = MAX_ENTRIES,
|
|
};
|
|
|
|
struct bpf_map_def SEC("maps") array_of_lru_hashs = {
|
|
.type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
|
|
.key_size = sizeof(u32),
|
|
.max_entries = MAX_NR_CPUS,
|
|
};
|
|
|
|
struct bpf_map_def SEC("maps") percpu_hash_map = {
|
|
.type = BPF_MAP_TYPE_PERCPU_HASH,
|
|
.key_size = sizeof(u32),
|
|
.value_size = sizeof(long),
|
|
.max_entries = MAX_ENTRIES,
|
|
};
|
|
|
|
struct bpf_map_def SEC("maps") hash_map_alloc = {
|
|
.type = BPF_MAP_TYPE_HASH,
|
|
.key_size = sizeof(u32),
|
|
.value_size = sizeof(long),
|
|
.max_entries = MAX_ENTRIES,
|
|
.map_flags = BPF_F_NO_PREALLOC,
|
|
};
|
|
|
|
struct bpf_map_def SEC("maps") percpu_hash_map_alloc = {
|
|
.type = BPF_MAP_TYPE_PERCPU_HASH,
|
|
.key_size = sizeof(u32),
|
|
.value_size = sizeof(long),
|
|
.max_entries = MAX_ENTRIES,
|
|
.map_flags = BPF_F_NO_PREALLOC,
|
|
};
|
|
|
|
struct bpf_map_def SEC("maps") lpm_trie_map_alloc = {
|
|
.type = BPF_MAP_TYPE_LPM_TRIE,
|
|
.key_size = 8,
|
|
.value_size = sizeof(long),
|
|
.max_entries = 10000,
|
|
.map_flags = BPF_F_NO_PREALLOC,
|
|
};
|
|
|
|
struct bpf_map_def SEC("maps") array_map = {
|
|
.type = BPF_MAP_TYPE_ARRAY,
|
|
.key_size = sizeof(u32),
|
|
.value_size = sizeof(long),
|
|
.max_entries = MAX_ENTRIES,
|
|
};
|
|
|
|
SEC("kprobe/sys_getuid")
|
|
int stress_hmap(struct pt_regs *ctx)
|
|
{
|
|
u32 key = bpf_get_current_pid_tgid();
|
|
long init_val = 1;
|
|
long *value;
|
|
|
|
bpf_map_update_elem(&hash_map, &key, &init_val, BPF_ANY);
|
|
value = bpf_map_lookup_elem(&hash_map, &key);
|
|
if (value)
|
|
bpf_map_delete_elem(&hash_map, &key);
|
|
|
|
return 0;
|
|
}
|
|
|
|
SEC("kprobe/sys_geteuid")
|
|
int stress_percpu_hmap(struct pt_regs *ctx)
|
|
{
|
|
u32 key = bpf_get_current_pid_tgid();
|
|
long init_val = 1;
|
|
long *value;
|
|
|
|
bpf_map_update_elem(&percpu_hash_map, &key, &init_val, BPF_ANY);
|
|
value = bpf_map_lookup_elem(&percpu_hash_map, &key);
|
|
if (value)
|
|
bpf_map_delete_elem(&percpu_hash_map, &key);
|
|
return 0;
|
|
}
|
|
|
|
SEC("kprobe/sys_getgid")
|
|
int stress_hmap_alloc(struct pt_regs *ctx)
|
|
{
|
|
u32 key = bpf_get_current_pid_tgid();
|
|
long init_val = 1;
|
|
long *value;
|
|
|
|
bpf_map_update_elem(&hash_map_alloc, &key, &init_val, BPF_ANY);
|
|
value = bpf_map_lookup_elem(&hash_map_alloc, &key);
|
|
if (value)
|
|
bpf_map_delete_elem(&hash_map_alloc, &key);
|
|
return 0;
|
|
}
|
|
|
|
SEC("kprobe/sys_getegid")
|
|
int stress_percpu_hmap_alloc(struct pt_regs *ctx)
|
|
{
|
|
u32 key = bpf_get_current_pid_tgid();
|
|
long init_val = 1;
|
|
long *value;
|
|
|
|
bpf_map_update_elem(&percpu_hash_map_alloc, &key, &init_val, BPF_ANY);
|
|
value = bpf_map_lookup_elem(&percpu_hash_map_alloc, &key);
|
|
if (value)
|
|
bpf_map_delete_elem(&percpu_hash_map_alloc, &key);
|
|
return 0;
|
|
}
|
|
|
|
SEC("kprobe/sys_connect")
|
|
int stress_lru_hmap_alloc(struct pt_regs *ctx)
|
|
{
|
|
struct sockaddr_in6 *in6;
|
|
u16 test_case, dst6[8];
|
|
int addrlen, ret;
|
|
char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%d\n";
|
|
long val = 1;
|
|
u32 key = bpf_get_prandom_u32();
|
|
|
|
in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx);
|
|
addrlen = (int)PT_REGS_PARM3(ctx);
|
|
|
|
if (addrlen != sizeof(*in6))
|
|
return 0;
|
|
|
|
ret = bpf_probe_read(dst6, sizeof(dst6), &in6->sin6_addr);
|
|
if (ret)
|
|
goto done;
|
|
|
|
if (dst6[0] != 0xdead || dst6[1] != 0xbeef)
|
|
return 0;
|
|
|
|
test_case = dst6[7];
|
|
|
|
if (test_case == 0) {
|
|
ret = bpf_map_update_elem(&lru_hash_map, &key, &val, BPF_ANY);
|
|
} else if (test_case == 1) {
|
|
ret = bpf_map_update_elem(&nocommon_lru_hash_map, &key, &val,
|
|
BPF_ANY);
|
|
} else if (test_case == 2) {
|
|
void *nolocal_lru_map;
|
|
int cpu = bpf_get_smp_processor_id();
|
|
|
|
nolocal_lru_map = bpf_map_lookup_elem(&array_of_lru_hashs,
|
|
&cpu);
|
|
if (!nolocal_lru_map) {
|
|
ret = -ENOENT;
|
|
goto done;
|
|
}
|
|
|
|
ret = bpf_map_update_elem(nolocal_lru_map, &key, &val,
|
|
BPF_ANY);
|
|
} else {
|
|
ret = -EINVAL;
|
|
}
|
|
|
|
done:
|
|
if (ret)
|
|
bpf_trace_printk(fmt, sizeof(fmt), ret);
|
|
|
|
return 0;
|
|
}
|
|
|
|
SEC("kprobe/sys_gettid")
|
|
int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
|
|
{
|
|
union {
|
|
u32 b32[2];
|
|
u8 b8[8];
|
|
} key;
|
|
unsigned int i;
|
|
|
|
key.b32[0] = 32;
|
|
key.b8[4] = 192;
|
|
key.b8[5] = 168;
|
|
key.b8[6] = 0;
|
|
key.b8[7] = 1;
|
|
|
|
#pragma clang loop unroll(full)
|
|
for (i = 0; i < 32; ++i)
|
|
bpf_map_lookup_elem(&lpm_trie_map_alloc, &key);
|
|
|
|
return 0;
|
|
}
|
|
|
|
SEC("kprobe/sys_getpgid")
|
|
int stress_hash_map_lookup(struct pt_regs *ctx)
|
|
{
|
|
u32 key = 1, i;
|
|
long *value;
|
|
|
|
#pragma clang loop unroll(full)
|
|
for (i = 0; i < 64; ++i)
|
|
value = bpf_map_lookup_elem(&hash_map, &key);
|
|
|
|
return 0;
|
|
}
|
|
|
|
SEC("kprobe/sys_getpgrp")
|
|
int stress_array_map_lookup(struct pt_regs *ctx)
|
|
{
|
|
u32 key = 1, i;
|
|
long *value;
|
|
|
|
#pragma clang loop unroll(full)
|
|
for (i = 0; i < 64; ++i)
|
|
value = bpf_map_lookup_elem(&array_map, &key);
|
|
|
|
return 0;
|
|
}
|
|
|
|
char _license[] SEC("license") = "GPL";
|
|
u32 _version SEC("version") = LINUX_VERSION_CODE;
|