tools: bpftool: partition basic-block for each function in the CFG

This patch partition basic-block for each function in the CFG. The
algorithm is simple, we identify basic-block head in a first traversal,
then second traversal to identify the tail.

We could build extended basic-block (EBB) in next steps. EBB could make the
graph more readable when the eBPF sequence is big.

Signed-off-by: Jiong Wang <jiong.wang@netronome.com>
Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Jiong Wang 2018-03-01 18:01:19 -08:00 committed by Alexei Starovoitov
parent 80331752e9
commit 0824611f9b

View File

@ -49,17 +49,32 @@ struct cfg {
struct func_node {
struct list_head l;
struct list_head bbs;
struct bpf_insn *start;
struct bpf_insn *end;
int idx;
int bb_num;
};
struct bb_node {
struct list_head l;
struct bpf_insn *head;
struct bpf_insn *tail;
int idx;
};
#define func_prev(func) list_prev_entry(func, l)
#define func_next(func) list_next_entry(func, l)
#define bb_prev(bb) list_prev_entry(bb, l)
#define bb_next(bb) list_next_entry(bb, l)
#define cfg_first_func(cfg) \
list_first_entry(&cfg->funcs, struct func_node, l)
#define cfg_last_func(cfg) \
list_last_entry(&cfg->funcs, struct func_node, l)
#define func_first_bb(func) \
list_first_entry(&func->bbs, struct bb_node, l)
#define func_last_bb(func) \
list_last_entry(&func->bbs, struct bb_node, l)
static struct func_node *cfg_append_func(struct cfg *cfg, struct bpf_insn *insn)
{
@ -86,6 +101,30 @@ static struct func_node *cfg_append_func(struct cfg *cfg, struct bpf_insn *insn)
return new_func;
}
static struct bb_node *func_append_bb(struct func_node *func,
struct bpf_insn *insn)
{
struct bb_node *new_bb, *bb;
list_for_each_entry(bb, &func->bbs, l) {
if (bb->head == insn)
return bb;
else if (bb->head > insn)
break;
}
bb = bb_prev(bb);
new_bb = calloc(1, sizeof(*new_bb));
if (!new_bb) {
p_err("OOM when allocating BB node");
return NULL;
}
new_bb->head = insn;
list_add(&new_bb->l, &bb->l);
return new_bb;
}
static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
struct bpf_insn *end)
{
@ -115,13 +154,83 @@ static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
return false;
}
static bool func_partition_bb_head(struct func_node *func)
{
struct bpf_insn *cur, *end;
struct bb_node *bb;
cur = func->start;
end = func->end;
INIT_LIST_HEAD(&func->bbs);
bb = func_append_bb(func, cur);
if (!bb)
return true;
for (; cur <= end; cur++) {
if (BPF_CLASS(cur->code) == BPF_JMP) {
u8 opcode = BPF_OP(cur->code);
if (opcode == BPF_EXIT || opcode == BPF_CALL)
continue;
bb = func_append_bb(func, cur + cur->off + 1);
if (!bb)
return true;
if (opcode != BPF_JA) {
bb = func_append_bb(func, cur + 1);
if (!bb)
return true;
}
}
}
return false;
}
static void func_partition_bb_tail(struct func_node *func)
{
struct bb_node *bb, *last;
unsigned int bb_idx = 0;
last = func_last_bb(func);
last->tail = func->end;
bb = func_first_bb(func);
list_for_each_entry_from(bb, &last->l, l) {
bb->tail = bb_next(bb)->head - 1;
bb->idx = bb_idx++;
}
last->idx = bb_idx++;
func->bb_num = bb_idx;
}
static bool func_partition_bb(struct func_node *func)
{
if (func_partition_bb_head(func))
return true;
func_partition_bb_tail(func);
return false;
}
static bool cfg_build(struct cfg *cfg, struct bpf_insn *insn, unsigned int len)
{
int cnt = len / sizeof(*insn);
struct func_node *func;
INIT_LIST_HEAD(&cfg->funcs);
return cfg_partition_funcs(cfg, insn, insn + cnt);
if (cfg_partition_funcs(cfg, insn, insn + cnt))
return true;
list_for_each_entry(func, &cfg->funcs, l) {
if (func_partition_bb(func))
return true;
}
return false;
}
static void cfg_destroy(struct cfg *cfg)
@ -129,6 +238,13 @@ static void cfg_destroy(struct cfg *cfg)
struct func_node *func, *func2;
list_for_each_entry_safe(func, func2, &cfg->funcs, l) {
struct bb_node *bb, *bb2;
list_for_each_entry_safe(bb, bb2, &func->bbs, l) {
list_del(&bb->l);
free(bb);
}
list_del(&func->l);
free(func);
}