mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-18 06:46:35 +07:00
Merge branch 'bpf-misc-test-fixes'
Stanislav Fomichev says: ==================== * add test__skip to indicate skipped tests * remove global success/error counts (use environment) * remove asserts from the tests * remove unused ret from send_signal test v3: * QCHECK -> CHECK_FAIL (Daniel Borkmann) v2: * drop patch that changes output to keep consistent with test_verifier (Alexei Starovoitov) * QCHECK instead of test__fail (Andrii Nakryiko) * test__skip count number of subtests (Andrii Nakryiko) ==================== Cc: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
commit
7bc7d83870
@ -48,16 +48,17 @@ void test_bpf_obj_id(void)
|
||||
/* test_obj_id.o is a dumb prog. It should never fail
|
||||
* to load.
|
||||
*/
|
||||
if (err)
|
||||
error_cnt++;
|
||||
assert(!err);
|
||||
if (CHECK_FAIL(err))
|
||||
continue;
|
||||
|
||||
/* Insert a magic value to the map */
|
||||
map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
|
||||
assert(map_fds[i] >= 0);
|
||||
if (CHECK_FAIL(map_fds[i] < 0))
|
||||
goto done;
|
||||
err = bpf_map_update_elem(map_fds[i], &array_key,
|
||||
&array_magic_value, 0);
|
||||
assert(!err);
|
||||
if (CHECK_FAIL(err))
|
||||
goto done;
|
||||
|
||||
/* Check getting map info */
|
||||
info_len = sizeof(struct bpf_map_info) * 2;
|
||||
@ -96,9 +97,11 @@ void test_bpf_obj_id(void)
|
||||
prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
|
||||
prog_infos[i].nr_map_ids = 2;
|
||||
err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
|
||||
assert(!err);
|
||||
if (CHECK_FAIL(err))
|
||||
goto done;
|
||||
err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
|
||||
assert(!err);
|
||||
if (CHECK_FAIL(err))
|
||||
goto done;
|
||||
err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
|
||||
&info_len);
|
||||
load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
|
||||
@ -224,7 +227,8 @@ void test_bpf_obj_id(void)
|
||||
nr_id_found++;
|
||||
|
||||
err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
|
||||
assert(!err);
|
||||
if (CHECK_FAIL(err))
|
||||
goto done;
|
||||
|
||||
err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
|
||||
CHECK(err || info_len != sizeof(struct bpf_map_info) ||
|
||||
|
@ -28,8 +28,6 @@ static int check_load(const char *file, enum bpf_prog_type type)
|
||||
attr.prog_flags = BPF_F_TEST_RND_HI32;
|
||||
err = bpf_prog_load_xattr(&attr, &obj, &prog_fd);
|
||||
bpf_object__close(obj);
|
||||
if (err)
|
||||
error_cnt++;
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -105,12 +103,7 @@ void test_bpf_verif_scale(void)
|
||||
continue;
|
||||
|
||||
err = check_load(test->file, test->attach_type);
|
||||
if (test->fails) { /* expected to fail */
|
||||
if (err)
|
||||
error_cnt--;
|
||||
else
|
||||
error_cnt++;
|
||||
}
|
||||
CHECK_FAIL(err && !test->fails);
|
||||
}
|
||||
|
||||
if (env.verifier_stats)
|
||||
|
@ -452,10 +452,8 @@ void test_flow_dissector(void)
|
||||
|
||||
err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector",
|
||||
"jmp_table", "last_dissection", &prog_fd, &keys_fd);
|
||||
if (err) {
|
||||
error_cnt++;
|
||||
if (CHECK_FAIL(err))
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(tests); i++) {
|
||||
struct bpf_flow_keys flow_keys;
|
||||
|
@ -135,10 +135,7 @@ void test_get_stack_raw_tp(void)
|
||||
exp_cnt -= err;
|
||||
}
|
||||
|
||||
goto close_prog_noerr;
|
||||
close_prog:
|
||||
error_cnt++;
|
||||
close_prog_noerr:
|
||||
if (!IS_ERR_OR_NULL(link))
|
||||
bpf_link__destroy(link);
|
||||
if (!IS_ERR_OR_NULL(pb))
|
||||
|
@ -7,10 +7,8 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration)
|
||||
uint64_t num;
|
||||
|
||||
map_fd = bpf_find_map(__func__, obj, "result_number");
|
||||
if (map_fd < 0) {
|
||||
error_cnt++;
|
||||
if (CHECK_FAIL(map_fd < 0))
|
||||
return;
|
||||
}
|
||||
|
||||
struct {
|
||||
char *name;
|
||||
@ -44,10 +42,8 @@ static void test_global_data_string(struct bpf_object *obj, __u32 duration)
|
||||
char str[32];
|
||||
|
||||
map_fd = bpf_find_map(__func__, obj, "result_string");
|
||||
if (map_fd < 0) {
|
||||
error_cnt++;
|
||||
if (CHECK_FAIL(map_fd < 0))
|
||||
return;
|
||||
}
|
||||
|
||||
struct {
|
||||
char *name;
|
||||
@ -81,10 +77,8 @@ static void test_global_data_struct(struct bpf_object *obj, __u32 duration)
|
||||
struct foo val;
|
||||
|
||||
map_fd = bpf_find_map(__func__, obj, "result_struct");
|
||||
if (map_fd < 0) {
|
||||
error_cnt++;
|
||||
if (CHECK_FAIL(map_fd < 0))
|
||||
return;
|
||||
}
|
||||
|
||||
struct {
|
||||
char *name;
|
||||
@ -112,16 +106,12 @@ static void test_global_data_rdonly(struct bpf_object *obj, __u32 duration)
|
||||
__u8 *buff;
|
||||
|
||||
map = bpf_object__find_map_by_name(obj, "test_glo.rodata");
|
||||
if (!map || !bpf_map__is_internal(map)) {
|
||||
error_cnt++;
|
||||
if (CHECK_FAIL(!map || !bpf_map__is_internal(map)))
|
||||
return;
|
||||
}
|
||||
|
||||
map_fd = bpf_map__fd(map);
|
||||
if (map_fd < 0) {
|
||||
error_cnt++;
|
||||
if (CHECK_FAIL(map_fd < 0))
|
||||
return;
|
||||
}
|
||||
|
||||
buff = malloc(bpf_map__def(map)->value_size);
|
||||
if (buff)
|
||||
|
@ -30,10 +30,8 @@ static void test_l4lb(const char *file)
|
||||
u32 *magic = (u32 *)buf;
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
|
||||
if (err) {
|
||||
error_cnt++;
|
||||
if (CHECK_FAIL(err))
|
||||
return;
|
||||
}
|
||||
|
||||
map_fd = bpf_find_map(__func__, obj, "vip_map");
|
||||
if (map_fd < 0)
|
||||
@ -72,10 +70,9 @@ static void test_l4lb(const char *file)
|
||||
bytes += stats[i].bytes;
|
||||
pkts += stats[i].pkts;
|
||||
}
|
||||
if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
|
||||
error_cnt++;
|
||||
if (CHECK_FAIL(bytes != MAGIC_BYTES * NUM_ITER * 2 ||
|
||||
pkts != NUM_ITER * 2))
|
||||
printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
|
||||
}
|
||||
out:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
@ -8,14 +8,12 @@ static void *parallel_map_access(void *arg)
|
||||
|
||||
for (i = 0; i < 10000; i++) {
|
||||
err = bpf_map_lookup_elem_flags(map_fd, &key, vars, BPF_F_LOCK);
|
||||
if (err) {
|
||||
if (CHECK_FAIL(err)) {
|
||||
printf("lookup failed\n");
|
||||
error_cnt++;
|
||||
goto out;
|
||||
}
|
||||
if (vars[0] != 0) {
|
||||
if (CHECK_FAIL(vars[0] != 0)) {
|
||||
printf("lookup #%d var[0]=%d\n", i, vars[0]);
|
||||
error_cnt++;
|
||||
goto out;
|
||||
}
|
||||
rnd = vars[1];
|
||||
@ -24,7 +22,7 @@ static void *parallel_map_access(void *arg)
|
||||
continue;
|
||||
printf("lookup #%d var[1]=%d var[%d]=%d\n",
|
||||
i, rnd, j, vars[j]);
|
||||
error_cnt++;
|
||||
CHECK_FAIL(vars[j] != rnd);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
@ -42,34 +40,36 @@ void test_map_lock(void)
|
||||
void *ret;
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
|
||||
if (err) {
|
||||
if (CHECK_FAIL(err)) {
|
||||
printf("test_map_lock:bpf_prog_load errno %d\n", errno);
|
||||
goto close_prog;
|
||||
}
|
||||
map_fd[0] = bpf_find_map(__func__, obj, "hash_map");
|
||||
if (map_fd[0] < 0)
|
||||
if (CHECK_FAIL(map_fd[0] < 0))
|
||||
goto close_prog;
|
||||
map_fd[1] = bpf_find_map(__func__, obj, "array_map");
|
||||
if (map_fd[1] < 0)
|
||||
if (CHECK_FAIL(map_fd[1] < 0))
|
||||
goto close_prog;
|
||||
|
||||
bpf_map_update_elem(map_fd[0], &key, vars, BPF_F_LOCK);
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
assert(pthread_create(&thread_id[i], NULL,
|
||||
&spin_lock_thread, &prog_fd) == 0);
|
||||
if (CHECK_FAIL(pthread_create(&thread_id[i], NULL,
|
||||
&spin_lock_thread, &prog_fd)))
|
||||
goto close_prog;
|
||||
for (i = 4; i < 6; i++)
|
||||
assert(pthread_create(&thread_id[i], NULL,
|
||||
¶llel_map_access, &map_fd[i - 4]) == 0);
|
||||
if (CHECK_FAIL(pthread_create(&thread_id[i], NULL,
|
||||
¶llel_map_access,
|
||||
&map_fd[i - 4])))
|
||||
goto close_prog;
|
||||
for (i = 0; i < 4; i++)
|
||||
assert(pthread_join(thread_id[i], &ret) == 0 &&
|
||||
ret == (void *)&prog_fd);
|
||||
if (CHECK_FAIL(pthread_join(thread_id[i], &ret) ||
|
||||
ret != (void *)&prog_fd))
|
||||
goto close_prog;
|
||||
for (i = 4; i < 6; i++)
|
||||
assert(pthread_join(thread_id[i], &ret) == 0 &&
|
||||
ret == (void *)&map_fd[i - 4]);
|
||||
goto close_prog_noerr;
|
||||
if (CHECK_FAIL(pthread_join(thread_id[i], &ret) ||
|
||||
ret != (void *)&map_fd[i - 4]))
|
||||
goto close_prog;
|
||||
close_prog:
|
||||
error_cnt++;
|
||||
close_prog_noerr:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
@ -9,10 +9,8 @@ void test_pkt_access(void)
|
||||
int err, prog_fd;
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
|
||||
if (err) {
|
||||
error_cnt++;
|
||||
if (CHECK_FAIL(err))
|
||||
return;
|
||||
}
|
||||
|
||||
err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
|
||||
NULL, NULL, &retval, &duration);
|
||||
|
@ -9,10 +9,8 @@ void test_pkt_md_access(void)
|
||||
int err, prog_fd;
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
|
||||
if (err) {
|
||||
error_cnt++;
|
||||
if (CHECK_FAIL(err))
|
||||
return;
|
||||
}
|
||||
|
||||
err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
|
||||
NULL, NULL, &retval, &duration);
|
||||
|
@ -27,10 +27,8 @@ static void test_queue_stack_map_by_type(int type)
|
||||
return;
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
|
||||
if (err) {
|
||||
error_cnt++;
|
||||
if (CHECK_FAIL(err))
|
||||
return;
|
||||
}
|
||||
|
||||
map_in_fd = bpf_find_map(__func__, obj, "map_in");
|
||||
if (map_in_fd < 0)
|
||||
@ -43,10 +41,8 @@ static void test_queue_stack_map_by_type(int type)
|
||||
/* Push 32 elements to the input map */
|
||||
for (i = 0; i < MAP_SIZE; i++) {
|
||||
err = bpf_map_update_elem(map_in_fd, NULL, &vals[i], 0);
|
||||
if (err) {
|
||||
error_cnt++;
|
||||
if (CHECK_FAIL(err))
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* The eBPF program pushes iph.saddr in the output map,
|
||||
|
@ -10,10 +10,8 @@ void test_reference_tracking(void)
|
||||
int err = 0;
|
||||
|
||||
obj = bpf_object__open(file);
|
||||
if (IS_ERR(obj)) {
|
||||
error_cnt++;
|
||||
if (CHECK_FAIL(IS_ERR(obj)))
|
||||
return;
|
||||
}
|
||||
|
||||
bpf_object__for_each_program(prog, obj) {
|
||||
const char *title;
|
||||
|
@ -8,7 +8,7 @@ static void sigusr1_handler(int signum)
|
||||
sigusr1_received++;
|
||||
}
|
||||
|
||||
static int test_send_signal_common(struct perf_event_attr *attr,
|
||||
static void test_send_signal_common(struct perf_event_attr *attr,
|
||||
int prog_type,
|
||||
const char *test_name)
|
||||
{
|
||||
@ -23,13 +23,13 @@ static int test_send_signal_common(struct perf_event_attr *attr,
|
||||
|
||||
if (CHECK(pipe(pipe_c2p), test_name,
|
||||
"pipe pipe_c2p error: %s\n", strerror(errno)))
|
||||
goto no_fork_done;
|
||||
return;
|
||||
|
||||
if (CHECK(pipe(pipe_p2c), test_name,
|
||||
"pipe pipe_p2c error: %s\n", strerror(errno))) {
|
||||
close(pipe_c2p[0]);
|
||||
close(pipe_c2p[1]);
|
||||
goto no_fork_done;
|
||||
return;
|
||||
}
|
||||
|
||||
pid = fork();
|
||||
@ -38,7 +38,7 @@ static int test_send_signal_common(struct perf_event_attr *attr,
|
||||
close(pipe_c2p[1]);
|
||||
close(pipe_p2c[0]);
|
||||
close(pipe_p2c[1]);
|
||||
goto no_fork_done;
|
||||
return;
|
||||
}
|
||||
|
||||
if (pid == 0) {
|
||||
@ -125,7 +125,7 @@ static int test_send_signal_common(struct perf_event_attr *attr,
|
||||
goto disable_pmu;
|
||||
}
|
||||
|
||||
err = CHECK(buf[0] != '2', test_name, "incorrect result\n");
|
||||
CHECK(buf[0] != '2', test_name, "incorrect result\n");
|
||||
|
||||
/* notify child safe to exit */
|
||||
write(pipe_p2c[1], buf, 1);
|
||||
@ -138,11 +138,9 @@ static int test_send_signal_common(struct perf_event_attr *attr,
|
||||
close(pipe_c2p[0]);
|
||||
close(pipe_p2c[1]);
|
||||
wait(NULL);
|
||||
no_fork_done:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int test_send_signal_tracepoint(void)
|
||||
static void test_send_signal_tracepoint(void)
|
||||
{
|
||||
const char *id_path = "/sys/kernel/debug/tracing/events/syscalls/sys_enter_nanosleep/id";
|
||||
struct perf_event_attr attr = {
|
||||
@ -159,21 +157,21 @@ static int test_send_signal_tracepoint(void)
|
||||
if (CHECK(efd < 0, "tracepoint",
|
||||
"open syscalls/sys_enter_nanosleep/id failure: %s\n",
|
||||
strerror(errno)))
|
||||
return -1;
|
||||
return;
|
||||
|
||||
bytes = read(efd, buf, sizeof(buf));
|
||||
close(efd);
|
||||
if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "tracepoint",
|
||||
"read syscalls/sys_enter_nanosleep/id failure: %s\n",
|
||||
strerror(errno)))
|
||||
return -1;
|
||||
return;
|
||||
|
||||
attr.config = strtol(buf, NULL, 0);
|
||||
|
||||
return test_send_signal_common(&attr, BPF_PROG_TYPE_TRACEPOINT, "tracepoint");
|
||||
test_send_signal_common(&attr, BPF_PROG_TYPE_TRACEPOINT, "tracepoint");
|
||||
}
|
||||
|
||||
static int test_send_signal_perf(void)
|
||||
static void test_send_signal_perf(void)
|
||||
{
|
||||
struct perf_event_attr attr = {
|
||||
.sample_period = 1,
|
||||
@ -181,11 +179,11 @@ static int test_send_signal_perf(void)
|
||||
.config = PERF_COUNT_SW_CPU_CLOCK,
|
||||
};
|
||||
|
||||
return test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT,
|
||||
"perf_sw_event");
|
||||
test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT,
|
||||
"perf_sw_event");
|
||||
}
|
||||
|
||||
static int test_send_signal_nmi(void)
|
||||
static void test_send_signal_nmi(void)
|
||||
{
|
||||
struct perf_event_attr attr = {
|
||||
.sample_freq = 50,
|
||||
@ -204,25 +202,24 @@ static int test_send_signal_nmi(void)
|
||||
if (errno == ENOENT) {
|
||||
printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n",
|
||||
__func__);
|
||||
return 0;
|
||||
test__skip();
|
||||
return;
|
||||
}
|
||||
/* Let the test fail with a more informative message */
|
||||
} else {
|
||||
close(pmu_fd);
|
||||
}
|
||||
|
||||
return test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT,
|
||||
"perf_hw_event");
|
||||
test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT,
|
||||
"perf_hw_event");
|
||||
}
|
||||
|
||||
void test_send_signal(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (test__start_subtest("send_signal_tracepoint"))
|
||||
ret |= test_send_signal_tracepoint();
|
||||
test_send_signal_tracepoint();
|
||||
if (test__start_subtest("send_signal_perf"))
|
||||
ret |= test_send_signal_perf();
|
||||
test_send_signal_perf();
|
||||
if (test__start_subtest("send_signal_nmi"))
|
||||
ret |= test_send_signal_nmi();
|
||||
test_send_signal_nmi();
|
||||
}
|
||||
|
@ -11,19 +11,19 @@ void test_spinlock(void)
|
||||
void *ret;
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
|
||||
if (err) {
|
||||
if (CHECK_FAIL(err)) {
|
||||
printf("test_spin_lock:bpf_prog_load errno %d\n", errno);
|
||||
goto close_prog;
|
||||
}
|
||||
for (i = 0; i < 4; i++)
|
||||
assert(pthread_create(&thread_id[i], NULL,
|
||||
&spin_lock_thread, &prog_fd) == 0);
|
||||
if (CHECK_FAIL(pthread_create(&thread_id[i], NULL,
|
||||
&spin_lock_thread, &prog_fd)))
|
||||
goto close_prog;
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
assert(pthread_join(thread_id[i], &ret) == 0 &&
|
||||
ret == (void *)&prog_fd);
|
||||
goto close_prog_noerr;
|
||||
if (CHECK_FAIL(pthread_join(thread_id[i], &ret) ||
|
||||
ret != (void *)&prog_fd))
|
||||
goto close_prog;
|
||||
close_prog:
|
||||
error_cnt++;
|
||||
close_prog_noerr:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
@ -51,9 +51,10 @@ void test_stacktrace_build_id(void)
|
||||
"err %d errno %d\n", err, errno))
|
||||
goto disable_pmu;
|
||||
|
||||
assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
|
||||
== 0);
|
||||
assert(system("./urandom_read") == 0);
|
||||
if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")))
|
||||
goto disable_pmu;
|
||||
if (CHECK_FAIL(system("./urandom_read")))
|
||||
goto disable_pmu;
|
||||
/* disable stack trace collection */
|
||||
key = 0;
|
||||
val = 1;
|
||||
|
@ -82,9 +82,10 @@ void test_stacktrace_build_id_nmi(void)
|
||||
"err %d errno %d\n", err, errno))
|
||||
goto disable_pmu;
|
||||
|
||||
assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
|
||||
== 0);
|
||||
assert(system("taskset 0x1 ./urandom_read 100000") == 0);
|
||||
if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")))
|
||||
goto disable_pmu;
|
||||
if (CHECK_FAIL(system("taskset 0x1 ./urandom_read 100000")))
|
||||
goto disable_pmu;
|
||||
/* disable stack trace collection */
|
||||
key = 0;
|
||||
val = 1;
|
||||
|
@ -26,19 +26,19 @@ void test_stacktrace_map(void)
|
||||
|
||||
/* find map fds */
|
||||
control_map_fd = bpf_find_map(__func__, obj, "control_map");
|
||||
if (control_map_fd < 0)
|
||||
if (CHECK_FAIL(control_map_fd < 0))
|
||||
goto disable_pmu;
|
||||
|
||||
stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
|
||||
if (stackid_hmap_fd < 0)
|
||||
if (CHECK_FAIL(stackid_hmap_fd < 0))
|
||||
goto disable_pmu;
|
||||
|
||||
stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
|
||||
if (stackmap_fd < 0)
|
||||
if (CHECK_FAIL(stackmap_fd < 0))
|
||||
goto disable_pmu;
|
||||
|
||||
stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
|
||||
if (stack_amap_fd < 0)
|
||||
if (CHECK_FAIL(stack_amap_fd < 0))
|
||||
goto disable_pmu;
|
||||
|
||||
/* give some time for bpf program run */
|
||||
@ -55,23 +55,20 @@ void test_stacktrace_map(void)
|
||||
err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
|
||||
if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
|
||||
"err %d errno %d\n", err, errno))
|
||||
goto disable_pmu_noerr;
|
||||
goto disable_pmu;
|
||||
|
||||
err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
|
||||
if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
|
||||
"err %d errno %d\n", err, errno))
|
||||
goto disable_pmu_noerr;
|
||||
goto disable_pmu;
|
||||
|
||||
stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
|
||||
err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
|
||||
if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
|
||||
"err %d errno %d\n", err, errno))
|
||||
goto disable_pmu_noerr;
|
||||
goto disable_pmu;
|
||||
|
||||
goto disable_pmu_noerr;
|
||||
disable_pmu:
|
||||
error_cnt++;
|
||||
disable_pmu_noerr:
|
||||
bpf_link__destroy(link);
|
||||
close_prog:
|
||||
bpf_object__close(obj);
|
||||
|
@ -26,15 +26,15 @@ void test_stacktrace_map_raw_tp(void)
|
||||
|
||||
/* find map fds */
|
||||
control_map_fd = bpf_find_map(__func__, obj, "control_map");
|
||||
if (control_map_fd < 0)
|
||||
if (CHECK_FAIL(control_map_fd < 0))
|
||||
goto close_prog;
|
||||
|
||||
stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
|
||||
if (stackid_hmap_fd < 0)
|
||||
if (CHECK_FAIL(stackid_hmap_fd < 0))
|
||||
goto close_prog;
|
||||
|
||||
stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
|
||||
if (stackmap_fd < 0)
|
||||
if (CHECK_FAIL(stackmap_fd < 0))
|
||||
goto close_prog;
|
||||
|
||||
/* give some time for bpf program run */
|
||||
@ -58,10 +58,7 @@ void test_stacktrace_map_raw_tp(void)
|
||||
"err %d errno %d\n", err, errno))
|
||||
goto close_prog;
|
||||
|
||||
goto close_prog_noerr;
|
||||
close_prog:
|
||||
error_cnt++;
|
||||
close_prog_noerr:
|
||||
if (!IS_ERR_OR_NULL(link))
|
||||
bpf_link__destroy(link);
|
||||
bpf_object__close(obj);
|
||||
|
@ -70,9 +70,6 @@ void test_task_fd_query_rawtp(void)
|
||||
if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
|
||||
goto close_prog;
|
||||
|
||||
goto close_prog_noerr;
|
||||
close_prog:
|
||||
error_cnt++;
|
||||
close_prog_noerr:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
@ -62,14 +62,9 @@ static void test_task_fd_query_tp_core(const char *probe_name,
|
||||
fd_type, buf))
|
||||
goto close_pmu;
|
||||
|
||||
close(pmu_fd);
|
||||
goto close_prog_noerr;
|
||||
|
||||
close_pmu:
|
||||
close(pmu_fd);
|
||||
close_prog:
|
||||
error_cnt++;
|
||||
close_prog_noerr:
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
||||
|
@ -10,10 +10,8 @@ void test_tcp_estats(void)
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
|
||||
CHECK(err, "", "err %d errno %d\n", err, errno);
|
||||
if (err) {
|
||||
error_cnt++;
|
||||
if (err)
|
||||
return;
|
||||
}
|
||||
|
||||
bpf_object__close(obj);
|
||||
}
|
||||
|
@ -16,10 +16,8 @@ void test_xdp(void)
|
||||
int err, prog_fd, map_fd;
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
|
||||
if (err) {
|
||||
error_cnt++;
|
||||
if (CHECK_FAIL(err))
|
||||
return;
|
||||
}
|
||||
|
||||
map_fd = bpf_find_map(__func__, obj, "vip2tnl");
|
||||
if (map_fd < 0)
|
||||
|
@ -10,10 +10,8 @@ void test_xdp_adjust_tail(void)
|
||||
int err, prog_fd;
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
|
||||
if (err) {
|
||||
error_cnt++;
|
||||
if (CHECK_FAIL(err))
|
||||
return;
|
||||
}
|
||||
|
||||
err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
|
||||
buf, &size, &retval, &duration);
|
||||
|
@ -31,10 +31,8 @@ void test_xdp_noinline(void)
|
||||
u32 *magic = (u32 *)buf;
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
|
||||
if (err) {
|
||||
error_cnt++;
|
||||
if (CHECK_FAIL(err))
|
||||
return;
|
||||
}
|
||||
|
||||
map_fd = bpf_find_map(__func__, obj, "vip_map");
|
||||
if (map_fd < 0)
|
||||
@ -73,8 +71,8 @@ void test_xdp_noinline(void)
|
||||
bytes += stats[i].bytes;
|
||||
pkts += stats[i].pkts;
|
||||
}
|
||||
if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
|
||||
error_cnt++;
|
||||
if (CHECK_FAIL(bytes != MAGIC_BYTES * NUM_ITER * 2 ||
|
||||
pkts != NUM_ITER * 2)) {
|
||||
printf("test_xdp_noinline:FAIL:stats %lld %lld\n",
|
||||
bytes, pkts);
|
||||
}
|
||||
|
@ -8,22 +8,20 @@
|
||||
|
||||
/* defined in test_progs.h */
|
||||
struct test_env env;
|
||||
int error_cnt, pass_cnt;
|
||||
|
||||
struct prog_test_def {
|
||||
const char *test_name;
|
||||
int test_num;
|
||||
void (*run_test)(void);
|
||||
bool force_log;
|
||||
int pass_cnt;
|
||||
int error_cnt;
|
||||
int skip_cnt;
|
||||
bool tested;
|
||||
|
||||
const char *subtest_name;
|
||||
int subtest_num;
|
||||
|
||||
/* store counts before subtest started */
|
||||
int old_pass_cnt;
|
||||
int old_error_cnt;
|
||||
};
|
||||
|
||||
@ -56,15 +54,24 @@ static void dump_test_log(const struct prog_test_def *test, bool failed)
|
||||
fseeko(stdout, 0, SEEK_SET); /* rewind */
|
||||
}
|
||||
|
||||
static void skip_account(void)
|
||||
{
|
||||
if (env.test->skip_cnt) {
|
||||
env.skip_cnt++;
|
||||
env.test->skip_cnt = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void test__end_subtest()
|
||||
{
|
||||
struct prog_test_def *test = env.test;
|
||||
int sub_error_cnt = error_cnt - test->old_error_cnt;
|
||||
int sub_error_cnt = test->error_cnt - test->old_error_cnt;
|
||||
|
||||
if (sub_error_cnt)
|
||||
env.fail_cnt++;
|
||||
else
|
||||
env.sub_succ_cnt++;
|
||||
skip_account();
|
||||
|
||||
dump_test_log(test, sub_error_cnt);
|
||||
|
||||
@ -95,8 +102,7 @@ bool test__start_subtest(const char *name)
|
||||
return false;
|
||||
|
||||
test->subtest_name = name;
|
||||
env.test->old_pass_cnt = pass_cnt;
|
||||
env.test->old_error_cnt = error_cnt;
|
||||
env.test->old_error_cnt = env.test->error_cnt;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -105,6 +111,16 @@ void test__force_log() {
|
||||
env.test->force_log = true;
|
||||
}
|
||||
|
||||
void test__skip(void)
|
||||
{
|
||||
env.test->skip_cnt++;
|
||||
}
|
||||
|
||||
void test__fail(void)
|
||||
{
|
||||
env.test->error_cnt++;
|
||||
}
|
||||
|
||||
struct ipv4_packet pkt_v4 = {
|
||||
.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
|
||||
.iph.ihl = 5,
|
||||
@ -129,7 +145,7 @@ int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
|
||||
map = bpf_object__find_map_by_name(obj, name);
|
||||
if (!map) {
|
||||
printf("%s:FAIL:map '%s' not found\n", test, name);
|
||||
error_cnt++;
|
||||
test__fail();
|
||||
return -1;
|
||||
}
|
||||
return bpf_map__fd(map);
|
||||
@ -488,8 +504,6 @@ int main(int argc, char **argv)
|
||||
stdio_hijack();
|
||||
for (i = 0; i < prog_test_cnt; i++) {
|
||||
struct prog_test_def *test = &prog_test_defs[i];
|
||||
int old_pass_cnt = pass_cnt;
|
||||
int old_error_cnt = error_cnt;
|
||||
|
||||
env.test = test;
|
||||
test->test_num = i + 1;
|
||||
@ -504,12 +518,11 @@ int main(int argc, char **argv)
|
||||
test__end_subtest();
|
||||
|
||||
test->tested = true;
|
||||
test->pass_cnt = pass_cnt - old_pass_cnt;
|
||||
test->error_cnt = error_cnt - old_error_cnt;
|
||||
if (test->error_cnt)
|
||||
env.fail_cnt++;
|
||||
else
|
||||
env.succ_cnt++;
|
||||
skip_account();
|
||||
|
||||
dump_test_log(test, test->error_cnt);
|
||||
|
||||
@ -518,11 +531,11 @@ int main(int argc, char **argv)
|
||||
test->error_cnt ? "FAIL" : "OK");
|
||||
}
|
||||
stdio_restore();
|
||||
printf("Summary: %d/%d PASSED, %d FAILED\n",
|
||||
env.succ_cnt, env.sub_succ_cnt, env.fail_cnt);
|
||||
printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
|
||||
env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt);
|
||||
|
||||
free(env.test_selector.num_set);
|
||||
free(env.subtest_selector.num_set);
|
||||
|
||||
return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
|
||||
return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
|
||||
}
|
||||
|
@ -38,8 +38,6 @@ typedef __u16 __sum16;
|
||||
#include "trace_helpers.h"
|
||||
#include "flow_dissector_load.h"
|
||||
|
||||
struct prog_test_def;
|
||||
|
||||
struct test_selector {
|
||||
const char *name;
|
||||
bool *num_set;
|
||||
@ -64,14 +62,15 @@ struct test_env {
|
||||
int succ_cnt; /* successful tests */
|
||||
int sub_succ_cnt; /* successful sub-tests */
|
||||
int fail_cnt; /* total failed tests + sub-tests */
|
||||
int skip_cnt; /* skipped tests */
|
||||
};
|
||||
|
||||
extern int error_cnt;
|
||||
extern int pass_cnt;
|
||||
extern struct test_env env;
|
||||
|
||||
extern void test__force_log();
|
||||
extern bool test__start_subtest(const char *name);
|
||||
extern void test__skip(void);
|
||||
extern void test__fail(void);
|
||||
|
||||
#define MAGIC_BYTES 123
|
||||
|
||||
@ -94,17 +93,25 @@ extern struct ipv6_packet pkt_v6;
|
||||
#define _CHECK(condition, tag, duration, format...) ({ \
|
||||
int __ret = !!(condition); \
|
||||
if (__ret) { \
|
||||
error_cnt++; \
|
||||
test__fail(); \
|
||||
printf("%s:FAIL:%s ", __func__, tag); \
|
||||
printf(format); \
|
||||
} else { \
|
||||
pass_cnt++; \
|
||||
printf("%s:PASS:%s %d nsec\n", \
|
||||
__func__, tag, duration); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define CHECK_FAIL(condition) ({ \
|
||||
int __ret = !!(condition); \
|
||||
if (__ret) { \
|
||||
test__fail(); \
|
||||
printf("%s:FAIL:%d ", __func__, __LINE__); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define CHECK(condition, tag, format...) \
|
||||
_CHECK(condition, tag, duration, format)
|
||||
#define CHECK_ATTR(condition, tag, format...) \
|
||||
|
Loading…
Reference in New Issue
Block a user