2016-09-21 17:43:57 +07:00
|
|
|
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of version 2 of the GNU General Public
|
|
|
|
* License as published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
#ifndef _LINUX_BPF_VERIFIER_H
|
|
|
|
#define _LINUX_BPF_VERIFIER_H 1
|
|
|
|
|
|
|
|
#include <linux/bpf.h> /* for enum bpf_reg_type */
|
|
|
|
#include <linux/filter.h> /* for MAX_BPF_STACK */
|
|
|
|
|
2016-09-28 21:54:32 +07:00
|
|
|
/* Just some arbitrary values so we can safely do math without overflowing and
|
|
|
|
* are obviously wrong for any sort of memory access.
|
|
|
|
*/
|
|
|
|
#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
|
2016-11-15 03:45:36 +07:00
|
|
|
#define BPF_REGISTER_MIN_RANGE -1
|
2016-09-28 21:54:32 +07:00
|
|
|
|
2016-09-21 17:43:57 +07:00
|
|
|
struct bpf_reg_state {
|
|
|
|
enum bpf_reg_type type;
|
|
|
|
union {
|
|
|
|
/* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
|
|
|
|
s64 imm;
|
|
|
|
|
|
|
|
/* valid when type == PTR_TO_PACKET* */
|
|
|
|
struct {
|
|
|
|
u16 off;
|
|
|
|
u16 range;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
|
|
|
|
* PTR_TO_MAP_VALUE_OR_NULL
|
|
|
|
*/
|
|
|
|
struct bpf_map *map_ptr;
|
|
|
|
};
|
2016-12-08 01:57:59 +07:00
|
|
|
u32 id;
|
|
|
|
/* Used to determine if any memory access using this register will
|
|
|
|
* result in a bad access. These two fields must be last.
|
|
|
|
* See states_equal()
|
|
|
|
*/
|
|
|
|
s64 min_value;
|
|
|
|
u64 max_value;
|
bpf: Track alignment of register values in the verifier.
Currently if we add only constant values to pointers we can fully
validate the alignment, and properly check if we need to reject the
program on !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS architectures.
However, once an unknown value is introduced we only allow byte sized
memory accesses which is too restrictive.
Add logic to track the known minimum alignment of register values,
and propagate this state into registers containing pointers.
The most common paradigm that makes use of this new logic is computing
the transport header using the IP header length field. For example:
struct ethhdr *ep = skb->data;
struct iphdr *iph = (struct iphdr *) (ep + 1);
struct tcphdr *th;
...
n = iph->ihl;
th = ((void *)iph + (n * 4));
port = th->dest;
The existing code will reject the load of th->dest because it cannot
validate that the alignment is at least 2 once "n * 4" is added the
the packet pointer.
In the new code, the register holding "n * 4" will have a reg->min_align
value of 4, because any value multiplied by 4 will be at least 4 byte
aligned. (actually, the eBPF code emitted by the compiler in this case
is most likely to use a shift left by 2, but the end result is identical)
At the critical addition:
th = ((void *)iph + (n * 4));
The register holding 'th' will start with reg->off value of 14. The
pointer addition will transform that reg into something that looks like:
reg->aux_off = 14
reg->aux_off_align = 4
Next, the verifier will look at the th->dest load, and it will see
a load offset of 2, and first check:
if (reg->aux_off_align % size)
which will pass because aux_off_align is 4. reg_off will be computed:
reg_off = reg->off;
...
reg_off += reg->aux_off;
plus we have off==2, and it will thus check:
if ((NET_IP_ALIGN + reg_off + off) % size != 0)
which evaluates to:
if ((NET_IP_ALIGN + 14 + 2) % size != 0)
On strict alignment architectures, NET_IP_ALIGN is 2, thus:
if ((2 + 14 + 2) % size != 0)
which passes.
These pointer transformations and checks work regardless of whether
the constant offset or the variable with known alignment is added
first to the pointer register.
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
2017-05-11 01:22:52 +07:00
|
|
|
u32 min_align;
|
|
|
|
u32 aux_off;
|
|
|
|
u32 aux_off_align;
|
2016-09-21 17:43:57 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
enum bpf_stack_slot_type {
|
|
|
|
STACK_INVALID, /* nothing was stored in this stack slot */
|
|
|
|
STACK_SPILL, /* register spilled into stack */
|
|
|
|
STACK_MISC /* BPF program wrote some data into this slot */
|
|
|
|
};
|
|
|
|
|
|
|
|
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
|
|
|
|
|
|
|
|
/* state of the program:
|
|
|
|
* type of all registers and stack info
|
|
|
|
*/
|
|
|
|
struct bpf_verifier_state {
|
|
|
|
struct bpf_reg_state regs[MAX_BPF_REG];
|
|
|
|
u8 stack_slot_type[MAX_BPF_STACK];
|
|
|
|
struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
|
|
|
|
};
|
|
|
|
|
|
|
|
/* linked list of verifier states used to prune search */
|
|
|
|
struct bpf_verifier_state_list {
|
|
|
|
struct bpf_verifier_state state;
|
|
|
|
struct bpf_verifier_state_list *next;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct bpf_insn_aux_data {
|
2017-03-16 08:26:42 +07:00
|
|
|
union {
|
|
|
|
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
|
|
|
|
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
|
|
|
|
};
|
2016-09-21 17:43:57 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
|
|
|
|
|
2016-09-21 17:43:58 +07:00
|
|
|
struct bpf_verifier_env;
|
|
|
|
struct bpf_ext_analyzer_ops {
|
|
|
|
int (*insn_hook)(struct bpf_verifier_env *env,
|
|
|
|
int insn_idx, int prev_insn_idx);
|
|
|
|
};
|
|
|
|
|
2016-09-21 17:43:57 +07:00
|
|
|
/* single container for all structs
|
|
|
|
* one verifier_env per bpf_check() call
|
|
|
|
*/
|
|
|
|
struct bpf_verifier_env {
|
|
|
|
struct bpf_prog *prog; /* eBPF program being verified */
|
|
|
|
struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
|
|
|
|
int stack_size; /* number of states to be processed */
|
2017-05-11 01:38:07 +07:00
|
|
|
bool strict_alignment; /* perform strict pointer alignment checks */
|
2016-09-21 17:43:57 +07:00
|
|
|
struct bpf_verifier_state cur_state; /* current verifier state */
|
|
|
|
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
|
2016-09-21 17:43:58 +07:00
|
|
|
const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
|
|
|
|
void *analyzer_priv; /* pointer to external analyzer's private data */
|
2016-09-21 17:43:57 +07:00
|
|
|
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
|
|
|
|
u32 used_map_cnt; /* number of used maps */
|
|
|
|
u32 id_gen; /* used to generate unique reg IDs */
|
|
|
|
bool allow_ptr_leaks;
|
|
|
|
bool seen_direct_write;
|
2016-09-28 21:54:32 +07:00
|
|
|
bool varlen_map_value_access;
|
2016-09-21 17:43:57 +07:00
|
|
|
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
|
|
|
|
};
|
|
|
|
|
2016-09-21 17:43:58 +07:00
|
|
|
int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
|
|
|
|
void *priv);
|
|
|
|
|
2016-09-21 17:43:57 +07:00
|
|
|
#endif /* _LINUX_BPF_VERIFIER_H */
|