2016-03-09 06:07:54 +07:00
|
|
|
/* Copyright (c) 2016 Facebook
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of version 2 of the GNU General Public
|
|
|
|
* License as published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/version.h>
|
|
|
|
#include <uapi/linux/bpf.h>
|
2020-01-20 20:06:49 +07:00
|
|
|
#include <bpf/bpf_helpers.h>
|
|
|
|
#include <bpf/bpf_tracing.h>
|
samples: bpf: Fix bpf programs with kprobe/sys_connect event
Currently, BPF programs with kprobe/sys_connect does not work properly.
Commit 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64")
This commit modifies the bpf_load behavior of kprobe events in the x64
architecture. If the current kprobe event target starts with "sys_*",
add the prefix "__x64_" to the front of the event.
Appending "__x64_" prefix with kprobe/sys_* event was appropriate as a
solution to most of the problems caused by the commit below.
commit d5a00528b58c ("syscalls/core, syscalls/x86: Rename struct
pt_regs-based sys_*() to __x64_sys_*()")
However, there is a problem with the sys_connect kprobe event that does
not work properly. For __sys_connect event, parameters can be fetched
normally, but for __x64_sys_connect, parameters cannot be fetched.
ffffffff818d3520 <__x64_sys_connect>:
ffffffff818d3520: e8 fb df 32 00 callq 0xffffffff81c01520
<__fentry__>
ffffffff818d3525: 48 8b 57 60 movq 96(%rdi), %rdx
ffffffff818d3529: 48 8b 77 68 movq 104(%rdi), %rsi
ffffffff818d352d: 48 8b 7f 70 movq 112(%rdi), %rdi
ffffffff818d3531: e8 1a ff ff ff callq 0xffffffff818d3450
<__sys_connect>
ffffffff818d3536: 48 98 cltq
ffffffff818d3538: c3 retq
ffffffff818d3539: 0f 1f 80 00 00 00 00 nopl (%rax)
As the assembly code for __x64_sys_connect shows, parameters should be
fetched and set into rdi, rsi, rdx registers prior to calling
__sys_connect.
Because of this problem, this commit fixes the sys_connect event by
first getting the value of the rdi register and then the value of the
rdi, rsi, and rdx register through an offset based on that value.
Fixes: 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64")
Signed-off-by: Daniel T. Lee <danieltimlee@gmail.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Link: https://lore.kernel.org/bpf/20200707184855.30968-2-danieltimlee@gmail.com
2020-07-08 01:48:52 +07:00
|
|
|
#include <bpf/bpf_core_read.h>
|
|
|
|
#include "trace_common.h"
|
2016-03-09 06:07:54 +07:00
|
|
|
|
|
|
|
#define MAX_ENTRIES 1000
|
2017-04-15 00:30:30 +07:00
|
|
|
#define MAX_NR_CPUS 1024
|
2016-03-09 06:07:54 +07:00
|
|
|
|
2020-07-08 01:48:54 +07:00
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
|
|
__type(key, u32);
|
|
|
|
__type(value, long);
|
|
|
|
__uint(max_entries, MAX_ENTRIES);
|
|
|
|
} hash_map SEC(".maps");
|
|
|
|
|
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_LRU_HASH);
|
|
|
|
__type(key, u32);
|
|
|
|
__type(value, long);
|
|
|
|
__uint(max_entries, 10000);
|
|
|
|
} lru_hash_map SEC(".maps");
|
|
|
|
|
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_LRU_HASH);
|
|
|
|
__type(key, u32);
|
|
|
|
__type(value, long);
|
|
|
|
__uint(max_entries, 10000);
|
|
|
|
__uint(map_flags, BPF_F_NO_COMMON_LRU);
|
|
|
|
} nocommon_lru_hash_map SEC(".maps");
|
|
|
|
|
|
|
|
struct inner_lru {
|
|
|
|
__uint(type, BPF_MAP_TYPE_LRU_HASH);
|
|
|
|
__type(key, u32);
|
|
|
|
__type(value, long);
|
|
|
|
__uint(max_entries, MAX_ENTRIES);
|
|
|
|
__uint(map_flags, BPF_F_NUMA_NODE);
|
|
|
|
__uint(numa_node, 0);
|
|
|
|
} inner_lru_hash_map SEC(".maps");
|
|
|
|
|
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
|
|
|
|
__uint(max_entries, MAX_NR_CPUS);
|
|
|
|
__uint(key_size, sizeof(u32));
|
|
|
|
__array(values, struct inner_lru); /* use inner_lru as inner map */
|
|
|
|
} array_of_lru_hashs SEC(".maps") = {
|
|
|
|
/* statically initialize the first element */
|
|
|
|
.values = { &inner_lru_hash_map },
|
2016-03-09 06:07:54 +07:00
|
|
|
};
|
|
|
|
|
2020-07-08 01:48:54 +07:00
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
|
|
|
|
__uint(key_size, sizeof(u32));
|
|
|
|
__uint(value_size, sizeof(long));
|
|
|
|
__uint(max_entries, MAX_ENTRIES);
|
|
|
|
} percpu_hash_map SEC(".maps");
|
|
|
|
|
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
|
|
__type(key, u32);
|
|
|
|
__type(value, long);
|
|
|
|
__uint(max_entries, MAX_ENTRIES);
|
|
|
|
__uint(map_flags, BPF_F_NO_PREALLOC);
|
|
|
|
} hash_map_alloc SEC(".maps");
|
|
|
|
|
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
|
|
|
|
__uint(key_size, sizeof(u32));
|
|
|
|
__uint(value_size, sizeof(long));
|
|
|
|
__uint(max_entries, MAX_ENTRIES);
|
|
|
|
__uint(map_flags, BPF_F_NO_PREALLOC);
|
|
|
|
} percpu_hash_map_alloc SEC(".maps");
|
|
|
|
|
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_LPM_TRIE);
|
|
|
|
__uint(key_size, 8);
|
|
|
|
__uint(value_size, sizeof(long));
|
|
|
|
__uint(max_entries, 10000);
|
|
|
|
__uint(map_flags, BPF_F_NO_PREALLOC);
|
|
|
|
} lpm_trie_map_alloc SEC(".maps");
|
|
|
|
|
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_ARRAY);
|
|
|
|
__type(key, u32);
|
|
|
|
__type(value, long);
|
|
|
|
__uint(max_entries, MAX_ENTRIES);
|
|
|
|
} array_map SEC(".maps");
|
|
|
|
|
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_LRU_HASH);
|
|
|
|
__type(key, u32);
|
|
|
|
__type(value, long);
|
|
|
|
__uint(max_entries, MAX_ENTRIES);
|
|
|
|
} lru_hash_lookup_map SEC(".maps");
|
|
|
|
|
|
|
|
SEC("kprobe/" SYSCALL(sys_getuid))
|
2016-03-09 06:07:54 +07:00
|
|
|
int stress_hmap(struct pt_regs *ctx)
|
|
|
|
{
|
|
|
|
u32 key = bpf_get_current_pid_tgid();
|
|
|
|
long init_val = 1;
|
|
|
|
long *value;
|
|
|
|
|
|
|
|
bpf_map_update_elem(&hash_map, &key, &init_val, BPF_ANY);
|
|
|
|
value = bpf_map_lookup_elem(&hash_map, &key);
|
|
|
|
if (value)
|
|
|
|
bpf_map_delete_elem(&hash_map, &key);
|
2016-11-12 01:55:11 +07:00
|
|
|
|
2016-03-09 06:07:54 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-08 01:48:54 +07:00
|
|
|
SEC("kprobe/" SYSCALL(sys_geteuid))
|
2016-03-09 06:07:54 +07:00
|
|
|
int stress_percpu_hmap(struct pt_regs *ctx)
|
|
|
|
{
|
|
|
|
u32 key = bpf_get_current_pid_tgid();
|
|
|
|
long init_val = 1;
|
|
|
|
long *value;
|
|
|
|
|
|
|
|
bpf_map_update_elem(&percpu_hash_map, &key, &init_val, BPF_ANY);
|
|
|
|
value = bpf_map_lookup_elem(&percpu_hash_map, &key);
|
|
|
|
if (value)
|
|
|
|
bpf_map_delete_elem(&percpu_hash_map, &key);
|
|
|
|
return 0;
|
|
|
|
}
|
2017-04-15 00:30:27 +07:00
|
|
|
|
2020-07-08 01:48:54 +07:00
|
|
|
SEC("kprobe/" SYSCALL(sys_getgid))
|
2016-03-09 06:07:54 +07:00
|
|
|
int stress_hmap_alloc(struct pt_regs *ctx)
|
|
|
|
{
|
|
|
|
u32 key = bpf_get_current_pid_tgid();
|
|
|
|
long init_val = 1;
|
|
|
|
long *value;
|
|
|
|
|
|
|
|
bpf_map_update_elem(&hash_map_alloc, &key, &init_val, BPF_ANY);
|
|
|
|
value = bpf_map_lookup_elem(&hash_map_alloc, &key);
|
|
|
|
if (value)
|
|
|
|
bpf_map_delete_elem(&hash_map_alloc, &key);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-08 01:48:54 +07:00
|
|
|
SEC("kprobe/" SYSCALL(sys_getegid))
|
2016-03-09 06:07:54 +07:00
|
|
|
int stress_percpu_hmap_alloc(struct pt_regs *ctx)
|
|
|
|
{
|
|
|
|
u32 key = bpf_get_current_pid_tgid();
|
|
|
|
long init_val = 1;
|
|
|
|
long *value;
|
|
|
|
|
|
|
|
bpf_map_update_elem(&percpu_hash_map_alloc, &key, &init_val, BPF_ANY);
|
|
|
|
value = bpf_map_lookup_elem(&percpu_hash_map_alloc, &key);
|
|
|
|
if (value)
|
|
|
|
bpf_map_delete_elem(&percpu_hash_map_alloc, &key);
|
|
|
|
return 0;
|
|
|
|
}
|
2016-11-12 01:55:11 +07:00
|
|
|
|
samples: bpf: Fix bpf programs with kprobe/sys_connect event
Currently, BPF programs with kprobe/sys_connect does not work properly.
Commit 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64")
This commit modifies the bpf_load behavior of kprobe events in the x64
architecture. If the current kprobe event target starts with "sys_*",
add the prefix "__x64_" to the front of the event.
Appending "__x64_" prefix with kprobe/sys_* event was appropriate as a
solution to most of the problems caused by the commit below.
commit d5a00528b58c ("syscalls/core, syscalls/x86: Rename struct
pt_regs-based sys_*() to __x64_sys_*()")
However, there is a problem with the sys_connect kprobe event that does
not work properly. For __sys_connect event, parameters can be fetched
normally, but for __x64_sys_connect, parameters cannot be fetched.
ffffffff818d3520 <__x64_sys_connect>:
ffffffff818d3520: e8 fb df 32 00 callq 0xffffffff81c01520
<__fentry__>
ffffffff818d3525: 48 8b 57 60 movq 96(%rdi), %rdx
ffffffff818d3529: 48 8b 77 68 movq 104(%rdi), %rsi
ffffffff818d352d: 48 8b 7f 70 movq 112(%rdi), %rdi
ffffffff818d3531: e8 1a ff ff ff callq 0xffffffff818d3450
<__sys_connect>
ffffffff818d3536: 48 98 cltq
ffffffff818d3538: c3 retq
ffffffff818d3539: 0f 1f 80 00 00 00 00 nopl (%rax)
As the assembly code for __x64_sys_connect shows, parameters should be
fetched and set into rdi, rsi, rdx registers prior to calling
__sys_connect.
Because of this problem, this commit fixes the sys_connect event by
first getting the value of the rdi register and then the value of the
rdi, rsi, and rdx register through an offset based on that value.
Fixes: 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64")
Signed-off-by: Daniel T. Lee <danieltimlee@gmail.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Link: https://lore.kernel.org/bpf/20200707184855.30968-2-danieltimlee@gmail.com
2020-07-08 01:48:52 +07:00
|
|
|
SEC("kprobe/" SYSCALL(sys_connect))
|
2016-11-12 01:55:11 +07:00
|
|
|
int stress_lru_hmap_alloc(struct pt_regs *ctx)
|
|
|
|
{
|
samples: bpf: Fix bpf programs with kprobe/sys_connect event
Currently, BPF programs with kprobe/sys_connect does not work properly.
Commit 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64")
This commit modifies the bpf_load behavior of kprobe events in the x64
architecture. If the current kprobe event target starts with "sys_*",
add the prefix "__x64_" to the front of the event.
Appending "__x64_" prefix with kprobe/sys_* event was appropriate as a
solution to most of the problems caused by the commit below.
commit d5a00528b58c ("syscalls/core, syscalls/x86: Rename struct
pt_regs-based sys_*() to __x64_sys_*()")
However, there is a problem with the sys_connect kprobe event that does
not work properly. For __sys_connect event, parameters can be fetched
normally, but for __x64_sys_connect, parameters cannot be fetched.
ffffffff818d3520 <__x64_sys_connect>:
ffffffff818d3520: e8 fb df 32 00 callq 0xffffffff81c01520
<__fentry__>
ffffffff818d3525: 48 8b 57 60 movq 96(%rdi), %rdx
ffffffff818d3529: 48 8b 77 68 movq 104(%rdi), %rsi
ffffffff818d352d: 48 8b 7f 70 movq 112(%rdi), %rdi
ffffffff818d3531: e8 1a ff ff ff callq 0xffffffff818d3450
<__sys_connect>
ffffffff818d3536: 48 98 cltq
ffffffff818d3538: c3 retq
ffffffff818d3539: 0f 1f 80 00 00 00 00 nopl (%rax)
As the assembly code for __x64_sys_connect shows, parameters should be
fetched and set into rdi, rsi, rdx registers prior to calling
__sys_connect.
Because of this problem, this commit fixes the sys_connect event by
first getting the value of the rdi register and then the value of the
rdi, rsi, and rdx register through an offset based on that value.
Fixes: 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64")
Signed-off-by: Daniel T. Lee <danieltimlee@gmail.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Link: https://lore.kernel.org/bpf/20200707184855.30968-2-danieltimlee@gmail.com
2020-07-08 01:48:52 +07:00
|
|
|
struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
|
2017-09-01 13:27:11 +07:00
|
|
|
char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn";
|
|
|
|
union {
|
|
|
|
u16 dst6[8];
|
|
|
|
struct {
|
|
|
|
u16 magic0;
|
|
|
|
u16 magic1;
|
|
|
|
u16 tcase;
|
|
|
|
u16 unused16;
|
|
|
|
u32 unused32;
|
|
|
|
u32 key;
|
|
|
|
};
|
|
|
|
} test_params;
|
2017-04-15 00:30:27 +07:00
|
|
|
struct sockaddr_in6 *in6;
|
2017-09-01 13:27:11 +07:00
|
|
|
u16 test_case;
|
2017-04-15 00:30:27 +07:00
|
|
|
int addrlen, ret;
|
2016-11-12 01:55:11 +07:00
|
|
|
long val = 1;
|
2017-09-01 13:27:11 +07:00
|
|
|
u32 key = 0;
|
2016-11-12 01:55:11 +07:00
|
|
|
|
samples: bpf: Fix bpf programs with kprobe/sys_connect event
Currently, BPF programs with kprobe/sys_connect does not work properly.
Commit 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64")
This commit modifies the bpf_load behavior of kprobe events in the x64
architecture. If the current kprobe event target starts with "sys_*",
add the prefix "__x64_" to the front of the event.
Appending "__x64_" prefix with kprobe/sys_* event was appropriate as a
solution to most of the problems caused by the commit below.
commit d5a00528b58c ("syscalls/core, syscalls/x86: Rename struct
pt_regs-based sys_*() to __x64_sys_*()")
However, there is a problem with the sys_connect kprobe event that does
not work properly. For __sys_connect event, parameters can be fetched
normally, but for __x64_sys_connect, parameters cannot be fetched.
ffffffff818d3520 <__x64_sys_connect>:
ffffffff818d3520: e8 fb df 32 00 callq 0xffffffff81c01520
<__fentry__>
ffffffff818d3525: 48 8b 57 60 movq 96(%rdi), %rdx
ffffffff818d3529: 48 8b 77 68 movq 104(%rdi), %rsi
ffffffff818d352d: 48 8b 7f 70 movq 112(%rdi), %rdi
ffffffff818d3531: e8 1a ff ff ff callq 0xffffffff818d3450
<__sys_connect>
ffffffff818d3536: 48 98 cltq
ffffffff818d3538: c3 retq
ffffffff818d3539: 0f 1f 80 00 00 00 00 nopl (%rax)
As the assembly code for __x64_sys_connect shows, parameters should be
fetched and set into rdi, rsi, rdx registers prior to calling
__sys_connect.
Because of this problem, this commit fixes the sys_connect event by
first getting the value of the rdi register and then the value of the
rdi, rsi, and rdx register through an offset based on that value.
Fixes: 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64")
Signed-off-by: Daniel T. Lee <danieltimlee@gmail.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Link: https://lore.kernel.org/bpf/20200707184855.30968-2-danieltimlee@gmail.com
2020-07-08 01:48:52 +07:00
|
|
|
in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs);
|
|
|
|
addrlen = (int)PT_REGS_PARM3_CORE(real_regs);
|
2016-11-12 01:55:11 +07:00
|
|
|
|
2017-04-15 00:30:27 +07:00
|
|
|
if (addrlen != sizeof(*in6))
|
|
|
|
return 0;
|
2016-11-12 01:55:11 +07:00
|
|
|
|
2019-11-02 06:18:01 +07:00
|
|
|
ret = bpf_probe_read_user(test_params.dst6, sizeof(test_params.dst6),
|
|
|
|
&in6->sin6_addr);
|
2017-04-15 00:30:27 +07:00
|
|
|
if (ret)
|
|
|
|
goto done;
|
|
|
|
|
2017-09-01 13:27:11 +07:00
|
|
|
if (test_params.magic0 != 0xdead ||
|
|
|
|
test_params.magic1 != 0xbeef)
|
2017-04-15 00:30:27 +07:00
|
|
|
return 0;
|
|
|
|
|
2017-09-01 13:27:11 +07:00
|
|
|
test_case = test_params.tcase;
|
|
|
|
if (test_case != 3)
|
|
|
|
key = bpf_get_prandom_u32();
|
2017-04-15 00:30:27 +07:00
|
|
|
|
2017-04-15 00:30:30 +07:00
|
|
|
if (test_case == 0) {
|
2017-04-15 00:30:27 +07:00
|
|
|
ret = bpf_map_update_elem(&lru_hash_map, &key, &val, BPF_ANY);
|
2017-04-15 00:30:30 +07:00
|
|
|
} else if (test_case == 1) {
|
2017-04-15 00:30:27 +07:00
|
|
|
ret = bpf_map_update_elem(&nocommon_lru_hash_map, &key, &val,
|
|
|
|
BPF_ANY);
|
2017-04-15 00:30:30 +07:00
|
|
|
} else if (test_case == 2) {
|
|
|
|
void *nolocal_lru_map;
|
|
|
|
int cpu = bpf_get_smp_processor_id();
|
|
|
|
|
|
|
|
nolocal_lru_map = bpf_map_lookup_elem(&array_of_lru_hashs,
|
|
|
|
&cpu);
|
|
|
|
if (!nolocal_lru_map) {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = bpf_map_update_elem(nolocal_lru_map, &key, &val,
|
|
|
|
BPF_ANY);
|
2017-09-01 13:27:11 +07:00
|
|
|
} else if (test_case == 3) {
|
|
|
|
u32 i;
|
|
|
|
|
|
|
|
key = test_params.key;
|
|
|
|
|
|
|
|
#pragma clang loop unroll(full)
|
|
|
|
for (i = 0; i < 32; i++) {
|
|
|
|
bpf_map_lookup_elem(&lru_hash_lookup_map, &key);
|
|
|
|
key++;
|
|
|
|
}
|
2017-04-15 00:30:30 +07:00
|
|
|
} else {
|
2017-04-15 00:30:27 +07:00
|
|
|
ret = -EINVAL;
|
2017-04-15 00:30:30 +07:00
|
|
|
}
|
2016-11-12 01:55:11 +07:00
|
|
|
|
2017-04-15 00:30:27 +07:00
|
|
|
done:
|
|
|
|
if (ret)
|
|
|
|
bpf_trace_printk(fmt, sizeof(fmt), ret);
|
2016-11-12 01:55:11 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-08 01:48:54 +07:00
|
|
|
SEC("kprobe/" SYSCALL(sys_gettid))
|
2017-01-21 23:26:13 +07:00
|
|
|
int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
|
|
|
|
{
|
|
|
|
union {
|
|
|
|
u32 b32[2];
|
|
|
|
u8 b8[8];
|
|
|
|
} key;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
key.b32[0] = 32;
|
|
|
|
key.b8[4] = 192;
|
|
|
|
key.b8[5] = 168;
|
|
|
|
key.b8[6] = 0;
|
|
|
|
key.b8[7] = 1;
|
|
|
|
|
|
|
|
#pragma clang loop unroll(full)
|
|
|
|
for (i = 0; i < 32; ++i)
|
|
|
|
bpf_map_lookup_elem(&lpm_trie_map_alloc, &key);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-08 01:48:54 +07:00
|
|
|
SEC("kprobe/" SYSCALL(sys_getpgid))
|
2017-03-16 08:26:44 +07:00
|
|
|
int stress_hash_map_lookup(struct pt_regs *ctx)
|
|
|
|
{
|
|
|
|
u32 key = 1, i;
|
|
|
|
long *value;
|
|
|
|
|
|
|
|
#pragma clang loop unroll(full)
|
|
|
|
for (i = 0; i < 64; ++i)
|
|
|
|
value = bpf_map_lookup_elem(&hash_map, &key);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-08 01:48:54 +07:00
|
|
|
SEC("kprobe/" SYSCALL(sys_getppid))
|
2017-03-16 08:26:44 +07:00
|
|
|
int stress_array_map_lookup(struct pt_regs *ctx)
|
|
|
|
{
|
|
|
|
u32 key = 1, i;
|
|
|
|
long *value;
|
|
|
|
|
|
|
|
#pragma clang loop unroll(full)
|
|
|
|
for (i = 0; i < 64; ++i)
|
|
|
|
value = bpf_map_lookup_elem(&array_map, &key);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-03-09 06:07:54 +07:00
|
|
|
char _license[] SEC("license") = "GPL";
|
|
|
|
u32 _version SEC("version") = LINUX_VERSION_CODE;
|