2014-01-07 21:17:08 +07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2013 Huawei Ltd.
|
|
|
|
* Author: Jiang Liu <liuj97@gmail.com>
|
|
|
|
*
|
2014-08-27 11:15:17 +07:00
|
|
|
* Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
|
|
|
|
*
|
2014-01-07 21:17:08 +07:00
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_INSN_H
|
|
|
|
#define __ASM_INSN_H
|
|
|
|
#include <linux/types.h>
|
|
|
|
|
2014-01-07 21:17:09 +07:00
|
|
|
/* A64 instructions are always 32 bits. */
|
|
|
|
#define AARCH64_INSN_SIZE 4
|
|
|
|
|
2014-04-30 16:54:30 +07:00
|
|
|
#ifndef __ASSEMBLY__
|
2014-01-07 21:17:08 +07:00
|
|
|
/*
|
|
|
|
* ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
|
|
|
|
* Section C3.1 "A64 instruction index by encoding":
|
|
|
|
* AArch64 main encoding table
|
|
|
|
* Bit position
|
|
|
|
* 28 27 26 25 Encoding Group
|
|
|
|
* 0 0 - - Unallocated
|
|
|
|
* 1 0 0 - Data processing, immediate
|
|
|
|
* 1 0 1 - Branch, exception generation and system instructions
|
|
|
|
* - 1 - 0 Loads and stores
|
|
|
|
* - 1 0 1 Data processing - register
|
|
|
|
* 0 1 1 1 Data processing - SIMD and floating point
|
|
|
|
* 1 1 1 1 Data processing - SIMD and floating point
|
|
|
|
* "-" means "don't care"
|
|
|
|
*/
|
|
|
|
enum aarch64_insn_encoding_class {
|
|
|
|
AARCH64_INSN_CLS_UNKNOWN, /* UNALLOCATED */
|
|
|
|
AARCH64_INSN_CLS_DP_IMM, /* Data processing - immediate */
|
|
|
|
AARCH64_INSN_CLS_DP_REG, /* Data processing - register */
|
|
|
|
AARCH64_INSN_CLS_DP_FPSIMD, /* Data processing - SIMD and FP */
|
|
|
|
AARCH64_INSN_CLS_LDST, /* Loads and stores */
|
|
|
|
AARCH64_INSN_CLS_BR_SYS, /* Branch, exception generation and
|
|
|
|
* system instructions */
|
|
|
|
};
|
|
|
|
|
|
|
|
enum aarch64_insn_hint_op {
|
|
|
|
AARCH64_INSN_HINT_NOP = 0x0 << 5,
|
|
|
|
AARCH64_INSN_HINT_YIELD = 0x1 << 5,
|
|
|
|
AARCH64_INSN_HINT_WFE = 0x2 << 5,
|
|
|
|
AARCH64_INSN_HINT_WFI = 0x3 << 5,
|
|
|
|
AARCH64_INSN_HINT_SEV = 0x4 << 5,
|
|
|
|
AARCH64_INSN_HINT_SEVL = 0x5 << 5,
|
|
|
|
};
|
|
|
|
|
2014-01-07 21:17:10 +07:00
|
|
|
enum aarch64_insn_imm_type {
|
|
|
|
AARCH64_INSN_IMM_ADR,
|
|
|
|
AARCH64_INSN_IMM_26,
|
|
|
|
AARCH64_INSN_IMM_19,
|
|
|
|
AARCH64_INSN_IMM_16,
|
|
|
|
AARCH64_INSN_IMM_14,
|
|
|
|
AARCH64_INSN_IMM_12,
|
|
|
|
AARCH64_INSN_IMM_9,
|
2014-08-27 11:15:21 +07:00
|
|
|
AARCH64_INSN_IMM_7,
|
2014-08-27 11:15:25 +07:00
|
|
|
AARCH64_INSN_IMM_6,
|
2014-08-27 11:15:23 +07:00
|
|
|
AARCH64_INSN_IMM_S,
|
|
|
|
AARCH64_INSN_IMM_R,
|
2014-01-07 21:17:10 +07:00
|
|
|
AARCH64_INSN_IMM_MAX
|
|
|
|
};
|
|
|
|
|
2014-08-27 11:15:17 +07:00
|
|
|
enum aarch64_insn_register_type {
|
|
|
|
AARCH64_INSN_REGTYPE_RT,
|
2014-08-27 11:15:18 +07:00
|
|
|
AARCH64_INSN_REGTYPE_RN,
|
2014-08-27 11:15:21 +07:00
|
|
|
AARCH64_INSN_REGTYPE_RT2,
|
2014-08-27 11:15:20 +07:00
|
|
|
AARCH64_INSN_REGTYPE_RM,
|
2014-08-27 11:15:22 +07:00
|
|
|
AARCH64_INSN_REGTYPE_RD,
|
2014-08-27 11:15:28 +07:00
|
|
|
AARCH64_INSN_REGTYPE_RA,
|
bpf, arm64: implement jiting of BPF_XADD
This work adds BPF_XADD for BPF_W/BPF_DW to the arm64 JIT and therefore
completes JITing of all BPF instructions, meaning we can thus also remove
the 'notyet' label and do not need to fall back to the interpreter when
BPF_XADD is used in a program!
This now also brings arm64 JIT in line with x86_64, s390x, ppc64, sparc64,
where all current eBPF features are supported.
BPF_W example from test_bpf:
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_W, R10, -40, 0x10),
BPF_STX_XADD(BPF_W, R10, R0, -40),
BPF_LDX_MEM(BPF_W, R0, R10, -40),
BPF_EXIT_INSN(),
},
[...]
00000020: 52800247 mov w7, #0x12 // #18
00000024: 928004eb mov x11, #0xffffffffffffffd8 // #-40
00000028: d280020a mov x10, #0x10 // #16
0000002c: b82b6b2a str w10, [x25,x11]
// start of xadd mapping:
00000030: 928004ea mov x10, #0xffffffffffffffd8 // #-40
00000034: 8b19014a add x10, x10, x25
00000038: f9800151 prfm pstl1strm, [x10]
0000003c: 885f7d4b ldxr w11, [x10]
00000040: 0b07016b add w11, w11, w7
00000044: 880b7d4b stxr w11, w11, [x10]
00000048: 35ffffab cbnz w11, 0x0000003c
// end of xadd mapping:
[...]
BPF_DW example from test_bpf:
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
BPF_STX_XADD(BPF_DW, R10, R0, -40),
BPF_LDX_MEM(BPF_DW, R0, R10, -40),
BPF_EXIT_INSN(),
},
[...]
00000020: 52800247 mov w7, #0x12 // #18
00000024: 928004eb mov x11, #0xffffffffffffffd8 // #-40
00000028: d280020a mov x10, #0x10 // #16
0000002c: f82b6b2a str x10, [x25,x11]
// start of xadd mapping:
00000030: 928004ea mov x10, #0xffffffffffffffd8 // #-40
00000034: 8b19014a add x10, x10, x25
00000038: f9800151 prfm pstl1strm, [x10]
0000003c: c85f7d4b ldxr x11, [x10]
00000040: 8b07016b add x11, x11, x7
00000044: c80b7d4b stxr w11, x11, [x10]
00000048: 35ffffab cbnz w11, 0x0000003c
// end of xadd mapping:
[...]
Tested on Cavium ThunderX ARMv8, test suite results after the patch:
No JIT: [ 3751.855362] test_bpf: Summary: 311 PASSED, 0 FAILED, [0/303 JIT'ed]
With JIT: [ 3573.759527] test_bpf: Summary: 311 PASSED, 0 FAILED, [303/303 JIT'ed]
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-01 07:57:20 +07:00
|
|
|
AARCH64_INSN_REGTYPE_RS,
|
2014-08-27 11:15:17 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
enum aarch64_insn_register {
|
|
|
|
AARCH64_INSN_REG_0 = 0,
|
|
|
|
AARCH64_INSN_REG_1 = 1,
|
|
|
|
AARCH64_INSN_REG_2 = 2,
|
|
|
|
AARCH64_INSN_REG_3 = 3,
|
|
|
|
AARCH64_INSN_REG_4 = 4,
|
|
|
|
AARCH64_INSN_REG_5 = 5,
|
|
|
|
AARCH64_INSN_REG_6 = 6,
|
|
|
|
AARCH64_INSN_REG_7 = 7,
|
|
|
|
AARCH64_INSN_REG_8 = 8,
|
|
|
|
AARCH64_INSN_REG_9 = 9,
|
|
|
|
AARCH64_INSN_REG_10 = 10,
|
|
|
|
AARCH64_INSN_REG_11 = 11,
|
|
|
|
AARCH64_INSN_REG_12 = 12,
|
|
|
|
AARCH64_INSN_REG_13 = 13,
|
|
|
|
AARCH64_INSN_REG_14 = 14,
|
|
|
|
AARCH64_INSN_REG_15 = 15,
|
|
|
|
AARCH64_INSN_REG_16 = 16,
|
|
|
|
AARCH64_INSN_REG_17 = 17,
|
|
|
|
AARCH64_INSN_REG_18 = 18,
|
|
|
|
AARCH64_INSN_REG_19 = 19,
|
|
|
|
AARCH64_INSN_REG_20 = 20,
|
|
|
|
AARCH64_INSN_REG_21 = 21,
|
|
|
|
AARCH64_INSN_REG_22 = 22,
|
|
|
|
AARCH64_INSN_REG_23 = 23,
|
|
|
|
AARCH64_INSN_REG_24 = 24,
|
|
|
|
AARCH64_INSN_REG_25 = 25,
|
|
|
|
AARCH64_INSN_REG_26 = 26,
|
|
|
|
AARCH64_INSN_REG_27 = 27,
|
|
|
|
AARCH64_INSN_REG_28 = 28,
|
|
|
|
AARCH64_INSN_REG_29 = 29,
|
|
|
|
AARCH64_INSN_REG_FP = 29, /* Frame pointer */
|
|
|
|
AARCH64_INSN_REG_30 = 30,
|
|
|
|
AARCH64_INSN_REG_LR = 30, /* Link register */
|
|
|
|
AARCH64_INSN_REG_ZR = 31, /* Zero: as source register */
|
|
|
|
AARCH64_INSN_REG_SP = 31 /* Stack pointer: as load/store base reg */
|
|
|
|
};
|
|
|
|
|
2016-07-08 23:35:46 +07:00
|
|
|
enum aarch64_insn_special_register {
|
|
|
|
AARCH64_INSN_SPCLREG_SPSR_EL1 = 0xC200,
|
|
|
|
AARCH64_INSN_SPCLREG_ELR_EL1 = 0xC201,
|
|
|
|
AARCH64_INSN_SPCLREG_SP_EL0 = 0xC208,
|
|
|
|
AARCH64_INSN_SPCLREG_SPSEL = 0xC210,
|
|
|
|
AARCH64_INSN_SPCLREG_CURRENTEL = 0xC212,
|
|
|
|
AARCH64_INSN_SPCLREG_DAIF = 0xDA11,
|
|
|
|
AARCH64_INSN_SPCLREG_NZCV = 0xDA10,
|
|
|
|
AARCH64_INSN_SPCLREG_FPCR = 0xDA20,
|
|
|
|
AARCH64_INSN_SPCLREG_DSPSR_EL0 = 0xDA28,
|
|
|
|
AARCH64_INSN_SPCLREG_DLR_EL0 = 0xDA29,
|
|
|
|
AARCH64_INSN_SPCLREG_SPSR_EL2 = 0xE200,
|
|
|
|
AARCH64_INSN_SPCLREG_ELR_EL2 = 0xE201,
|
|
|
|
AARCH64_INSN_SPCLREG_SP_EL1 = 0xE208,
|
|
|
|
AARCH64_INSN_SPCLREG_SPSR_INQ = 0xE218,
|
|
|
|
AARCH64_INSN_SPCLREG_SPSR_ABT = 0xE219,
|
|
|
|
AARCH64_INSN_SPCLREG_SPSR_UND = 0xE21A,
|
|
|
|
AARCH64_INSN_SPCLREG_SPSR_FIQ = 0xE21B,
|
|
|
|
AARCH64_INSN_SPCLREG_SPSR_EL3 = 0xF200,
|
|
|
|
AARCH64_INSN_SPCLREG_ELR_EL3 = 0xF201,
|
|
|
|
AARCH64_INSN_SPCLREG_SP_EL2 = 0xF210
|
|
|
|
};
|
|
|
|
|
2014-08-27 11:15:17 +07:00
|
|
|
enum aarch64_insn_variant {
|
|
|
|
AARCH64_INSN_VARIANT_32BIT,
|
|
|
|
AARCH64_INSN_VARIANT_64BIT
|
|
|
|
};
|
|
|
|
|
2014-08-27 11:15:19 +07:00
|
|
|
enum aarch64_insn_condition {
|
|
|
|
AARCH64_INSN_COND_EQ = 0x0, /* == */
|
|
|
|
AARCH64_INSN_COND_NE = 0x1, /* != */
|
|
|
|
AARCH64_INSN_COND_CS = 0x2, /* unsigned >= */
|
|
|
|
AARCH64_INSN_COND_CC = 0x3, /* unsigned < */
|
|
|
|
AARCH64_INSN_COND_MI = 0x4, /* < 0 */
|
|
|
|
AARCH64_INSN_COND_PL = 0x5, /* >= 0 */
|
|
|
|
AARCH64_INSN_COND_VS = 0x6, /* overflow */
|
|
|
|
AARCH64_INSN_COND_VC = 0x7, /* no overflow */
|
|
|
|
AARCH64_INSN_COND_HI = 0x8, /* unsigned > */
|
|
|
|
AARCH64_INSN_COND_LS = 0x9, /* unsigned <= */
|
|
|
|
AARCH64_INSN_COND_GE = 0xa, /* signed >= */
|
|
|
|
AARCH64_INSN_COND_LT = 0xb, /* signed < */
|
|
|
|
AARCH64_INSN_COND_GT = 0xc, /* signed > */
|
|
|
|
AARCH64_INSN_COND_LE = 0xd, /* signed <= */
|
|
|
|
AARCH64_INSN_COND_AL = 0xe, /* always */
|
|
|
|
};
|
|
|
|
|
2014-01-07 21:17:11 +07:00
|
|
|
enum aarch64_insn_branch_type {
|
|
|
|
AARCH64_INSN_BRANCH_NOLINK,
|
|
|
|
AARCH64_INSN_BRANCH_LINK,
|
2014-08-27 11:15:18 +07:00
|
|
|
AARCH64_INSN_BRANCH_RETURN,
|
2014-08-27 11:15:17 +07:00
|
|
|
AARCH64_INSN_BRANCH_COMP_ZERO,
|
|
|
|
AARCH64_INSN_BRANCH_COMP_NONZERO,
|
2014-01-07 21:17:11 +07:00
|
|
|
};
|
|
|
|
|
2014-08-27 11:15:20 +07:00
|
|
|
enum aarch64_insn_size_type {
|
|
|
|
AARCH64_INSN_SIZE_8,
|
|
|
|
AARCH64_INSN_SIZE_16,
|
|
|
|
AARCH64_INSN_SIZE_32,
|
|
|
|
AARCH64_INSN_SIZE_64,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum aarch64_insn_ldst_type {
|
|
|
|
AARCH64_INSN_LDST_LOAD_REG_OFFSET,
|
|
|
|
AARCH64_INSN_LDST_STORE_REG_OFFSET,
|
2014-08-27 11:15:21 +07:00
|
|
|
AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX,
|
|
|
|
AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX,
|
|
|
|
AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX,
|
|
|
|
AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX,
|
bpf, arm64: implement jiting of BPF_XADD
This work adds BPF_XADD for BPF_W/BPF_DW to the arm64 JIT and therefore
completes JITing of all BPF instructions, meaning we can thus also remove
the 'notyet' label and do not need to fall back to the interpreter when
BPF_XADD is used in a program!
This now also brings arm64 JIT in line with x86_64, s390x, ppc64, sparc64,
where all current eBPF features are supported.
BPF_W example from test_bpf:
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_W, R10, -40, 0x10),
BPF_STX_XADD(BPF_W, R10, R0, -40),
BPF_LDX_MEM(BPF_W, R0, R10, -40),
BPF_EXIT_INSN(),
},
[...]
00000020: 52800247 mov w7, #0x12 // #18
00000024: 928004eb mov x11, #0xffffffffffffffd8 // #-40
00000028: d280020a mov x10, #0x10 // #16
0000002c: b82b6b2a str w10, [x25,x11]
// start of xadd mapping:
00000030: 928004ea mov x10, #0xffffffffffffffd8 // #-40
00000034: 8b19014a add x10, x10, x25
00000038: f9800151 prfm pstl1strm, [x10]
0000003c: 885f7d4b ldxr w11, [x10]
00000040: 0b07016b add w11, w11, w7
00000044: 880b7d4b stxr w11, w11, [x10]
00000048: 35ffffab cbnz w11, 0x0000003c
// end of xadd mapping:
[...]
BPF_DW example from test_bpf:
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
BPF_STX_XADD(BPF_DW, R10, R0, -40),
BPF_LDX_MEM(BPF_DW, R0, R10, -40),
BPF_EXIT_INSN(),
},
[...]
00000020: 52800247 mov w7, #0x12 // #18
00000024: 928004eb mov x11, #0xffffffffffffffd8 // #-40
00000028: d280020a mov x10, #0x10 // #16
0000002c: f82b6b2a str x10, [x25,x11]
// start of xadd mapping:
00000030: 928004ea mov x10, #0xffffffffffffffd8 // #-40
00000034: 8b19014a add x10, x10, x25
00000038: f9800151 prfm pstl1strm, [x10]
0000003c: c85f7d4b ldxr x11, [x10]
00000040: 8b07016b add x11, x11, x7
00000044: c80b7d4b stxr w11, x11, [x10]
00000048: 35ffffab cbnz w11, 0x0000003c
// end of xadd mapping:
[...]
Tested on Cavium ThunderX ARMv8, test suite results after the patch:
No JIT: [ 3751.855362] test_bpf: Summary: 311 PASSED, 0 FAILED, [0/303 JIT'ed]
With JIT: [ 3573.759527] test_bpf: Summary: 311 PASSED, 0 FAILED, [303/303 JIT'ed]
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-01 07:57:20 +07:00
|
|
|
AARCH64_INSN_LDST_LOAD_EX,
|
|
|
|
AARCH64_INSN_LDST_STORE_EX,
|
2014-08-27 11:15:20 +07:00
|
|
|
};
|
|
|
|
|
2014-08-27 11:15:22 +07:00
|
|
|
enum aarch64_insn_adsb_type {
|
|
|
|
AARCH64_INSN_ADSB_ADD,
|
|
|
|
AARCH64_INSN_ADSB_SUB,
|
|
|
|
AARCH64_INSN_ADSB_ADD_SETFLAGS,
|
|
|
|
AARCH64_INSN_ADSB_SUB_SETFLAGS
|
|
|
|
};
|
|
|
|
|
2014-08-27 11:15:24 +07:00
|
|
|
enum aarch64_insn_movewide_type {
|
|
|
|
AARCH64_INSN_MOVEWIDE_ZERO,
|
|
|
|
AARCH64_INSN_MOVEWIDE_KEEP,
|
|
|
|
AARCH64_INSN_MOVEWIDE_INVERSE
|
|
|
|
};
|
|
|
|
|
2014-08-27 11:15:23 +07:00
|
|
|
enum aarch64_insn_bitfield_type {
|
|
|
|
AARCH64_INSN_BITFIELD_MOVE,
|
|
|
|
AARCH64_INSN_BITFIELD_MOVE_UNSIGNED,
|
|
|
|
AARCH64_INSN_BITFIELD_MOVE_SIGNED
|
|
|
|
};
|
|
|
|
|
2014-08-27 11:15:26 +07:00
|
|
|
enum aarch64_insn_data1_type {
|
|
|
|
AARCH64_INSN_DATA1_REVERSE_16,
|
|
|
|
AARCH64_INSN_DATA1_REVERSE_32,
|
|
|
|
AARCH64_INSN_DATA1_REVERSE_64,
|
|
|
|
};
|
|
|
|
|
2014-08-27 11:15:27 +07:00
|
|
|
enum aarch64_insn_data2_type {
|
|
|
|
AARCH64_INSN_DATA2_UDIV,
|
|
|
|
AARCH64_INSN_DATA2_SDIV,
|
|
|
|
AARCH64_INSN_DATA2_LSLV,
|
|
|
|
AARCH64_INSN_DATA2_LSRV,
|
|
|
|
AARCH64_INSN_DATA2_ASRV,
|
|
|
|
AARCH64_INSN_DATA2_RORV,
|
|
|
|
};
|
|
|
|
|
2014-08-27 11:15:28 +07:00
|
|
|
enum aarch64_insn_data3_type {
|
|
|
|
AARCH64_INSN_DATA3_MADD,
|
|
|
|
AARCH64_INSN_DATA3_MSUB,
|
|
|
|
};
|
|
|
|
|
2014-08-27 11:15:29 +07:00
|
|
|
enum aarch64_insn_logic_type {
|
|
|
|
AARCH64_INSN_LOGIC_AND,
|
|
|
|
AARCH64_INSN_LOGIC_BIC,
|
|
|
|
AARCH64_INSN_LOGIC_ORR,
|
|
|
|
AARCH64_INSN_LOGIC_ORN,
|
|
|
|
AARCH64_INSN_LOGIC_EOR,
|
|
|
|
AARCH64_INSN_LOGIC_EON,
|
|
|
|
AARCH64_INSN_LOGIC_AND_SETFLAGS,
|
|
|
|
AARCH64_INSN_LOGIC_BIC_SETFLAGS
|
|
|
|
};
|
|
|
|
|
bpf, arm64: implement jiting of BPF_XADD
This work adds BPF_XADD for BPF_W/BPF_DW to the arm64 JIT and therefore
completes JITing of all BPF instructions, meaning we can thus also remove
the 'notyet' label and do not need to fall back to the interpreter when
BPF_XADD is used in a program!
This now also brings arm64 JIT in line with x86_64, s390x, ppc64, sparc64,
where all current eBPF features are supported.
BPF_W example from test_bpf:
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_W, R10, -40, 0x10),
BPF_STX_XADD(BPF_W, R10, R0, -40),
BPF_LDX_MEM(BPF_W, R0, R10, -40),
BPF_EXIT_INSN(),
},
[...]
00000020: 52800247 mov w7, #0x12 // #18
00000024: 928004eb mov x11, #0xffffffffffffffd8 // #-40
00000028: d280020a mov x10, #0x10 // #16
0000002c: b82b6b2a str w10, [x25,x11]
// start of xadd mapping:
00000030: 928004ea mov x10, #0xffffffffffffffd8 // #-40
00000034: 8b19014a add x10, x10, x25
00000038: f9800151 prfm pstl1strm, [x10]
0000003c: 885f7d4b ldxr w11, [x10]
00000040: 0b07016b add w11, w11, w7
00000044: 880b7d4b stxr w11, w11, [x10]
00000048: 35ffffab cbnz w11, 0x0000003c
// end of xadd mapping:
[...]
BPF_DW example from test_bpf:
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
BPF_STX_XADD(BPF_DW, R10, R0, -40),
BPF_LDX_MEM(BPF_DW, R0, R10, -40),
BPF_EXIT_INSN(),
},
[...]
00000020: 52800247 mov w7, #0x12 // #18
00000024: 928004eb mov x11, #0xffffffffffffffd8 // #-40
00000028: d280020a mov x10, #0x10 // #16
0000002c: f82b6b2a str x10, [x25,x11]
// start of xadd mapping:
00000030: 928004ea mov x10, #0xffffffffffffffd8 // #-40
00000034: 8b19014a add x10, x10, x25
00000038: f9800151 prfm pstl1strm, [x10]
0000003c: c85f7d4b ldxr x11, [x10]
00000040: 8b07016b add x11, x11, x7
00000044: c80b7d4b stxr w11, x11, [x10]
00000048: 35ffffab cbnz w11, 0x0000003c
// end of xadd mapping:
[...]
Tested on Cavium ThunderX ARMv8, test suite results after the patch:
No JIT: [ 3751.855362] test_bpf: Summary: 311 PASSED, 0 FAILED, [0/303 JIT'ed]
With JIT: [ 3573.759527] test_bpf: Summary: 311 PASSED, 0 FAILED, [303/303 JIT'ed]
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-01 07:57:20 +07:00
|
|
|
enum aarch64_insn_prfm_type {
|
|
|
|
AARCH64_INSN_PRFM_TYPE_PLD,
|
|
|
|
AARCH64_INSN_PRFM_TYPE_PLI,
|
|
|
|
AARCH64_INSN_PRFM_TYPE_PST,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum aarch64_insn_prfm_target {
|
|
|
|
AARCH64_INSN_PRFM_TARGET_L1,
|
|
|
|
AARCH64_INSN_PRFM_TARGET_L2,
|
|
|
|
AARCH64_INSN_PRFM_TARGET_L3,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum aarch64_insn_prfm_policy {
|
|
|
|
AARCH64_INSN_PRFM_POLICY_KEEP,
|
|
|
|
AARCH64_INSN_PRFM_POLICY_STRM,
|
|
|
|
};
|
|
|
|
|
2014-01-07 21:17:08 +07:00
|
|
|
#define __AARCH64_INSN_FUNCS(abbr, mask, val) \
|
|
|
|
static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
|
|
|
|
{ return (code & (mask)) == (val); } \
|
|
|
|
static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
|
|
|
|
{ return (val); }
|
|
|
|
|
2016-09-09 20:07:12 +07:00
|
|
|
__AARCH64_INSN_FUNCS(adr, 0x9F000000, 0x10000000)
|
|
|
|
__AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000)
|
bpf, arm64: implement jiting of BPF_XADD
This work adds BPF_XADD for BPF_W/BPF_DW to the arm64 JIT and therefore
completes JITing of all BPF instructions, meaning we can thus also remove
the 'notyet' label and do not need to fall back to the interpreter when
BPF_XADD is used in a program!
This now also brings arm64 JIT in line with x86_64, s390x, ppc64, sparc64,
where all current eBPF features are supported.
BPF_W example from test_bpf:
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_W, R10, -40, 0x10),
BPF_STX_XADD(BPF_W, R10, R0, -40),
BPF_LDX_MEM(BPF_W, R0, R10, -40),
BPF_EXIT_INSN(),
},
[...]
00000020: 52800247 mov w7, #0x12 // #18
00000024: 928004eb mov x11, #0xffffffffffffffd8 // #-40
00000028: d280020a mov x10, #0x10 // #16
0000002c: b82b6b2a str w10, [x25,x11]
// start of xadd mapping:
00000030: 928004ea mov x10, #0xffffffffffffffd8 // #-40
00000034: 8b19014a add x10, x10, x25
00000038: f9800151 prfm pstl1strm, [x10]
0000003c: 885f7d4b ldxr w11, [x10]
00000040: 0b07016b add w11, w11, w7
00000044: 880b7d4b stxr w11, w11, [x10]
00000048: 35ffffab cbnz w11, 0x0000003c
// end of xadd mapping:
[...]
BPF_DW example from test_bpf:
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
BPF_STX_XADD(BPF_DW, R10, R0, -40),
BPF_LDX_MEM(BPF_DW, R0, R10, -40),
BPF_EXIT_INSN(),
},
[...]
00000020: 52800247 mov w7, #0x12 // #18
00000024: 928004eb mov x11, #0xffffffffffffffd8 // #-40
00000028: d280020a mov x10, #0x10 // #16
0000002c: f82b6b2a str x10, [x25,x11]
// start of xadd mapping:
00000030: 928004ea mov x10, #0xffffffffffffffd8 // #-40
00000034: 8b19014a add x10, x10, x25
00000038: f9800151 prfm pstl1strm, [x10]
0000003c: c85f7d4b ldxr x11, [x10]
00000040: 8b07016b add x11, x11, x7
00000044: c80b7d4b stxr w11, x11, [x10]
00000048: 35ffffab cbnz w11, 0x0000003c
// end of xadd mapping:
[...]
Tested on Cavium ThunderX ARMv8, test suite results after the patch:
No JIT: [ 3751.855362] test_bpf: Summary: 311 PASSED, 0 FAILED, [0/303 JIT'ed]
With JIT: [ 3573.759527] test_bpf: Summary: 311 PASSED, 0 FAILED, [303/303 JIT'ed]
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-01 07:57:20 +07:00
|
|
|
__AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000)
|
2016-07-08 23:35:46 +07:00
|
|
|
__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
|
2014-08-27 11:15:20 +07:00
|
|
|
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
|
|
|
|
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
|
2016-07-08 23:35:46 +07:00
|
|
|
__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
|
|
|
|
__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
|
|
|
|
__AARCH64_INSN_FUNCS(exclusive, 0x3F800000, 0x08000000)
|
arm64: Kprobes with single stepping support
Add support for basic kernel probes(kprobes) and jump probes
(jprobes) for ARM64.
Kprobes utilizes software breakpoint and single step debug
exceptions supported on ARM v8.
A software breakpoint is placed at the probe address to trap the
kernel execution into the kprobe handler.
ARM v8 supports enabling single stepping before the break exception
return (ERET), with next PC in exception return address (ELR_EL1). The
kprobe handler prepares an executable memory slot for out-of-line
execution with a copy of the original instruction being probed, and
enables single stepping. The PC is set to the out-of-line slot address
before the ERET. With this scheme, the instruction is executed with the
exact same register context except for the PC (and DAIF) registers.
Debug mask (PSTATE.D) is enabled only when single stepping a recursive
kprobe, e.g.: during kprobes reenter so that probed instruction can be
single stepped within the kprobe handler -exception- context.
The recursion depth of kprobe is always 2, i.e. upon probe re-entry,
any further re-entry is prevented by not calling handlers and the case
counted as a missed kprobe).
Single stepping from the x-o-l slot has a drawback for PC-relative accesses
like branching and symbolic literals access as the offset from the new PC
(slot address) may not be ensured to fit in the immediate value of
the opcode. Such instructions need simulation, so reject
probing them.
Instructions generating exceptions or cpu mode change are rejected
for probing.
Exclusive load/store instructions are rejected too. Additionally, the
code is checked to see if it is inside an exclusive load/store sequence
(code from Pratyush).
System instructions are mostly enabled for stepping, except MSR/MRS
accesses to "DAIF" flags in PSTATE, which are not safe for
probing.
This also changes arch/arm64/include/asm/ptrace.h to use
include/asm-generic/ptrace.h.
Thanks to Steve Capper and Pratyush Anand for several suggested
Changes.
Signed-off-by: Sandeepa Prabhu <sandeepa.s.prabhu@gmail.com>
Signed-off-by: David A. Long <dave.long@linaro.org>
Signed-off-by: Pratyush Anand <panand@redhat.com>
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-07-08 23:35:48 +07:00
|
|
|
__AARCH64_INSN_FUNCS(load_ex, 0x3F400000, 0x08400000)
|
|
|
|
__AARCH64_INSN_FUNCS(store_ex, 0x3F400000, 0x08000000)
|
2014-08-27 11:15:21 +07:00
|
|
|
__AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000)
|
|
|
|
__AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000)
|
|
|
|
__AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000)
|
|
|
|
__AARCH64_INSN_FUNCS(ldp_pre, 0x7FC00000, 0x29C00000)
|
2014-08-27 11:15:22 +07:00
|
|
|
__AARCH64_INSN_FUNCS(add_imm, 0x7F000000, 0x11000000)
|
|
|
|
__AARCH64_INSN_FUNCS(adds_imm, 0x7F000000, 0x31000000)
|
|
|
|
__AARCH64_INSN_FUNCS(sub_imm, 0x7F000000, 0x51000000)
|
|
|
|
__AARCH64_INSN_FUNCS(subs_imm, 0x7F000000, 0x71000000)
|
2014-08-27 11:15:24 +07:00
|
|
|
__AARCH64_INSN_FUNCS(movn, 0x7F800000, 0x12800000)
|
2014-08-27 11:15:23 +07:00
|
|
|
__AARCH64_INSN_FUNCS(sbfm, 0x7F800000, 0x13000000)
|
|
|
|
__AARCH64_INSN_FUNCS(bfm, 0x7F800000, 0x33000000)
|
2014-08-27 11:15:24 +07:00
|
|
|
__AARCH64_INSN_FUNCS(movz, 0x7F800000, 0x52800000)
|
2014-08-27 11:15:23 +07:00
|
|
|
__AARCH64_INSN_FUNCS(ubfm, 0x7F800000, 0x53000000)
|
2014-08-27 11:15:24 +07:00
|
|
|
__AARCH64_INSN_FUNCS(movk, 0x7F800000, 0x72800000)
|
2014-08-27 11:15:25 +07:00
|
|
|
__AARCH64_INSN_FUNCS(add, 0x7F200000, 0x0B000000)
|
|
|
|
__AARCH64_INSN_FUNCS(adds, 0x7F200000, 0x2B000000)
|
|
|
|
__AARCH64_INSN_FUNCS(sub, 0x7F200000, 0x4B000000)
|
|
|
|
__AARCH64_INSN_FUNCS(subs, 0x7F200000, 0x6B000000)
|
2014-08-27 11:15:28 +07:00
|
|
|
__AARCH64_INSN_FUNCS(madd, 0x7FE08000, 0x1B000000)
|
|
|
|
__AARCH64_INSN_FUNCS(msub, 0x7FE08000, 0x1B008000)
|
2014-08-27 11:15:27 +07:00
|
|
|
__AARCH64_INSN_FUNCS(udiv, 0x7FE0FC00, 0x1AC00800)
|
|
|
|
__AARCH64_INSN_FUNCS(sdiv, 0x7FE0FC00, 0x1AC00C00)
|
|
|
|
__AARCH64_INSN_FUNCS(lslv, 0x7FE0FC00, 0x1AC02000)
|
|
|
|
__AARCH64_INSN_FUNCS(lsrv, 0x7FE0FC00, 0x1AC02400)
|
|
|
|
__AARCH64_INSN_FUNCS(asrv, 0x7FE0FC00, 0x1AC02800)
|
|
|
|
__AARCH64_INSN_FUNCS(rorv, 0x7FE0FC00, 0x1AC02C00)
|
2014-08-27 11:15:26 +07:00
|
|
|
__AARCH64_INSN_FUNCS(rev16, 0x7FFFFC00, 0x5AC00400)
|
|
|
|
__AARCH64_INSN_FUNCS(rev32, 0x7FFFFC00, 0x5AC00800)
|
|
|
|
__AARCH64_INSN_FUNCS(rev64, 0x7FFFFC00, 0x5AC00C00)
|
2014-08-27 11:15:29 +07:00
|
|
|
__AARCH64_INSN_FUNCS(and, 0x7F200000, 0x0A000000)
|
|
|
|
__AARCH64_INSN_FUNCS(bic, 0x7F200000, 0x0A200000)
|
|
|
|
__AARCH64_INSN_FUNCS(orr, 0x7F200000, 0x2A000000)
|
|
|
|
__AARCH64_INSN_FUNCS(orn, 0x7F200000, 0x2A200000)
|
|
|
|
__AARCH64_INSN_FUNCS(eor, 0x7F200000, 0x4A000000)
|
|
|
|
__AARCH64_INSN_FUNCS(eon, 0x7F200000, 0x4A200000)
|
|
|
|
__AARCH64_INSN_FUNCS(ands, 0x7F200000, 0x6A000000)
|
|
|
|
__AARCH64_INSN_FUNCS(bics, 0x7F200000, 0x6A200000)
|
2014-01-07 21:17:08 +07:00
|
|
|
__AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000)
|
|
|
|
__AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000)
|
2015-02-18 22:40:17 +07:00
|
|
|
__AARCH64_INSN_FUNCS(cbz, 0x7F000000, 0x34000000)
|
|
|
|
__AARCH64_INSN_FUNCS(cbnz, 0x7F000000, 0x35000000)
|
|
|
|
__AARCH64_INSN_FUNCS(tbz, 0x7F000000, 0x36000000)
|
|
|
|
__AARCH64_INSN_FUNCS(tbnz, 0x7F000000, 0x37000000)
|
2014-08-27 11:15:19 +07:00
|
|
|
__AARCH64_INSN_FUNCS(bcond, 0xFF000010, 0x54000000)
|
2014-01-07 21:17:08 +07:00
|
|
|
__AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001)
|
|
|
|
__AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002)
|
|
|
|
__AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003)
|
|
|
|
__AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000)
|
2016-07-08 23:35:46 +07:00
|
|
|
__AARCH64_INSN_FUNCS(exception, 0xFF000000, 0xD4000000)
|
2014-01-07 21:17:08 +07:00
|
|
|
__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
|
2014-08-27 11:15:18 +07:00
|
|
|
__AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000)
|
|
|
|
__AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000)
|
|
|
|
__AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000)
|
2016-07-08 23:35:46 +07:00
|
|
|
__AARCH64_INSN_FUNCS(eret, 0xFFFFFFFF, 0xD69F03E0)
|
|
|
|
__AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000)
|
|
|
|
__AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F)
|
|
|
|
__AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000)
|
2014-01-07 21:17:08 +07:00
|
|
|
|
|
|
|
#undef __AARCH64_INSN_FUNCS
|
|
|
|
|
|
|
|
bool aarch64_insn_is_nop(u32 insn);
|
2015-06-01 16:47:39 +07:00
|
|
|
bool aarch64_insn_is_branch_imm(u32 insn);
|
2014-01-07 21:17:08 +07:00
|
|
|
|
2016-09-09 20:07:12 +07:00
|
|
|
static inline bool aarch64_insn_is_adr_adrp(u32 insn)
|
|
|
|
{
|
|
|
|
return aarch64_insn_is_adr(insn) || aarch64_insn_is_adrp(insn);
|
|
|
|
}
|
|
|
|
|
2014-01-07 21:17:09 +07:00
|
|
|
int aarch64_insn_read(void *addr, u32 *insnp);
|
|
|
|
int aarch64_insn_write(void *addr, u32 insn);
|
2014-01-07 21:17:08 +07:00
|
|
|
enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
|
2016-07-08 23:35:46 +07:00
|
|
|
bool aarch64_insn_uses_literal(u32 insn);
|
|
|
|
bool aarch64_insn_is_branch(u32 insn);
|
2015-03-27 20:09:21 +07:00
|
|
|
u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn);
|
2014-01-07 21:17:10 +07:00
|
|
|
u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
|
|
|
|
u32 insn, u64 imm);
|
2017-01-10 00:28:29 +07:00
|
|
|
u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
|
|
|
|
u32 insn);
|
2014-01-07 21:17:11 +07:00
|
|
|
u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
|
|
|
|
enum aarch64_insn_branch_type type);
|
2014-08-27 11:15:17 +07:00
|
|
|
u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
|
|
|
|
enum aarch64_insn_register reg,
|
|
|
|
enum aarch64_insn_variant variant,
|
|
|
|
enum aarch64_insn_branch_type type);
|
2014-08-27 11:15:19 +07:00
|
|
|
u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
|
|
|
|
enum aarch64_insn_condition cond);
|
2014-01-07 21:17:11 +07:00
|
|
|
u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
|
|
|
|
u32 aarch64_insn_gen_nop(void);
|
2014-08-27 11:15:18 +07:00
|
|
|
u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
|
|
|
|
enum aarch64_insn_branch_type type);
|
2014-08-27 11:15:20 +07:00
|
|
|
u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
|
|
|
|
enum aarch64_insn_register base,
|
|
|
|
enum aarch64_insn_register offset,
|
|
|
|
enum aarch64_insn_size_type size,
|
|
|
|
enum aarch64_insn_ldst_type type);
|
2014-08-27 11:15:21 +07:00
|
|
|
u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
|
|
|
|
enum aarch64_insn_register reg2,
|
|
|
|
enum aarch64_insn_register base,
|
|
|
|
int offset,
|
|
|
|
enum aarch64_insn_variant variant,
|
|
|
|
enum aarch64_insn_ldst_type type);
|
bpf, arm64: implement jiting of BPF_XADD
This work adds BPF_XADD for BPF_W/BPF_DW to the arm64 JIT and therefore
completes JITing of all BPF instructions, meaning we can thus also remove
the 'notyet' label and do not need to fall back to the interpreter when
BPF_XADD is used in a program!
This now also brings arm64 JIT in line with x86_64, s390x, ppc64, sparc64,
where all current eBPF features are supported.
BPF_W example from test_bpf:
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_W, R10, -40, 0x10),
BPF_STX_XADD(BPF_W, R10, R0, -40),
BPF_LDX_MEM(BPF_W, R0, R10, -40),
BPF_EXIT_INSN(),
},
[...]
00000020: 52800247 mov w7, #0x12 // #18
00000024: 928004eb mov x11, #0xffffffffffffffd8 // #-40
00000028: d280020a mov x10, #0x10 // #16
0000002c: b82b6b2a str w10, [x25,x11]
// start of xadd mapping:
00000030: 928004ea mov x10, #0xffffffffffffffd8 // #-40
00000034: 8b19014a add x10, x10, x25
00000038: f9800151 prfm pstl1strm, [x10]
0000003c: 885f7d4b ldxr w11, [x10]
00000040: 0b07016b add w11, w11, w7
00000044: 880b7d4b stxr w11, w11, [x10]
00000048: 35ffffab cbnz w11, 0x0000003c
// end of xadd mapping:
[...]
BPF_DW example from test_bpf:
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
BPF_STX_XADD(BPF_DW, R10, R0, -40),
BPF_LDX_MEM(BPF_DW, R0, R10, -40),
BPF_EXIT_INSN(),
},
[...]
00000020: 52800247 mov w7, #0x12 // #18
00000024: 928004eb mov x11, #0xffffffffffffffd8 // #-40
00000028: d280020a mov x10, #0x10 // #16
0000002c: f82b6b2a str x10, [x25,x11]
// start of xadd mapping:
00000030: 928004ea mov x10, #0xffffffffffffffd8 // #-40
00000034: 8b19014a add x10, x10, x25
00000038: f9800151 prfm pstl1strm, [x10]
0000003c: c85f7d4b ldxr x11, [x10]
00000040: 8b07016b add x11, x11, x7
00000044: c80b7d4b stxr w11, x11, [x10]
00000048: 35ffffab cbnz w11, 0x0000003c
// end of xadd mapping:
[...]
Tested on Cavium ThunderX ARMv8, test suite results after the patch:
No JIT: [ 3751.855362] test_bpf: Summary: 311 PASSED, 0 FAILED, [0/303 JIT'ed]
With JIT: [ 3573.759527] test_bpf: Summary: 311 PASSED, 0 FAILED, [303/303 JIT'ed]
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-01 07:57:20 +07:00
|
|
|
u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
|
|
|
|
enum aarch64_insn_register base,
|
|
|
|
enum aarch64_insn_register state,
|
|
|
|
enum aarch64_insn_size_type size,
|
|
|
|
enum aarch64_insn_ldst_type type);
|
2014-08-27 11:15:22 +07:00
|
|
|
u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
|
|
|
|
enum aarch64_insn_register src,
|
|
|
|
int imm, enum aarch64_insn_variant variant,
|
|
|
|
enum aarch64_insn_adsb_type type);
|
2014-08-27 11:15:23 +07:00
|
|
|
u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
|
|
|
|
enum aarch64_insn_register src,
|
|
|
|
int immr, int imms,
|
|
|
|
enum aarch64_insn_variant variant,
|
|
|
|
enum aarch64_insn_bitfield_type type);
|
2014-08-27 11:15:24 +07:00
|
|
|
u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
|
|
|
|
int imm, int shift,
|
|
|
|
enum aarch64_insn_variant variant,
|
|
|
|
enum aarch64_insn_movewide_type type);
|
2014-08-27 11:15:25 +07:00
|
|
|
u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
|
|
|
|
enum aarch64_insn_register src,
|
|
|
|
enum aarch64_insn_register reg,
|
|
|
|
int shift,
|
|
|
|
enum aarch64_insn_variant variant,
|
|
|
|
enum aarch64_insn_adsb_type type);
|
2014-08-27 11:15:26 +07:00
|
|
|
u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
|
|
|
|
enum aarch64_insn_register src,
|
|
|
|
enum aarch64_insn_variant variant,
|
|
|
|
enum aarch64_insn_data1_type type);
|
2014-08-27 11:15:27 +07:00
|
|
|
u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
|
|
|
|
enum aarch64_insn_register src,
|
|
|
|
enum aarch64_insn_register reg,
|
|
|
|
enum aarch64_insn_variant variant,
|
|
|
|
enum aarch64_insn_data2_type type);
|
2014-08-27 11:15:28 +07:00
|
|
|
u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
|
|
|
|
enum aarch64_insn_register src,
|
|
|
|
enum aarch64_insn_register reg1,
|
|
|
|
enum aarch64_insn_register reg2,
|
|
|
|
enum aarch64_insn_variant variant,
|
|
|
|
enum aarch64_insn_data3_type type);
|
2014-08-27 11:15:29 +07:00
|
|
|
u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
|
|
|
|
enum aarch64_insn_register src,
|
|
|
|
enum aarch64_insn_register reg,
|
|
|
|
int shift,
|
|
|
|
enum aarch64_insn_variant variant,
|
|
|
|
enum aarch64_insn_logic_type type);
|
bpf, arm64: implement jiting of BPF_XADD
This work adds BPF_XADD for BPF_W/BPF_DW to the arm64 JIT and therefore
completes JITing of all BPF instructions, meaning we can thus also remove
the 'notyet' label and do not need to fall back to the interpreter when
BPF_XADD is used in a program!
This now also brings arm64 JIT in line with x86_64, s390x, ppc64, sparc64,
where all current eBPF features are supported.
BPF_W example from test_bpf:
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_W, R10, -40, 0x10),
BPF_STX_XADD(BPF_W, R10, R0, -40),
BPF_LDX_MEM(BPF_W, R0, R10, -40),
BPF_EXIT_INSN(),
},
[...]
00000020: 52800247 mov w7, #0x12 // #18
00000024: 928004eb mov x11, #0xffffffffffffffd8 // #-40
00000028: d280020a mov x10, #0x10 // #16
0000002c: b82b6b2a str w10, [x25,x11]
// start of xadd mapping:
00000030: 928004ea mov x10, #0xffffffffffffffd8 // #-40
00000034: 8b19014a add x10, x10, x25
00000038: f9800151 prfm pstl1strm, [x10]
0000003c: 885f7d4b ldxr w11, [x10]
00000040: 0b07016b add w11, w11, w7
00000044: 880b7d4b stxr w11, w11, [x10]
00000048: 35ffffab cbnz w11, 0x0000003c
// end of xadd mapping:
[...]
BPF_DW example from test_bpf:
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
BPF_STX_XADD(BPF_DW, R10, R0, -40),
BPF_LDX_MEM(BPF_DW, R0, R10, -40),
BPF_EXIT_INSN(),
},
[...]
00000020: 52800247 mov w7, #0x12 // #18
00000024: 928004eb mov x11, #0xffffffffffffffd8 // #-40
00000028: d280020a mov x10, #0x10 // #16
0000002c: f82b6b2a str x10, [x25,x11]
// start of xadd mapping:
00000030: 928004ea mov x10, #0xffffffffffffffd8 // #-40
00000034: 8b19014a add x10, x10, x25
00000038: f9800151 prfm pstl1strm, [x10]
0000003c: c85f7d4b ldxr x11, [x10]
00000040: 8b07016b add x11, x11, x7
00000044: c80b7d4b stxr w11, x11, [x10]
00000048: 35ffffab cbnz w11, 0x0000003c
// end of xadd mapping:
[...]
Tested on Cavium ThunderX ARMv8, test suite results after the patch:
No JIT: [ 3751.855362] test_bpf: Summary: 311 PASSED, 0 FAILED, [0/303 JIT'ed]
With JIT: [ 3573.759527] test_bpf: Summary: 311 PASSED, 0 FAILED, [303/303 JIT'ed]
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-01 07:57:20 +07:00
|
|
|
u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
|
|
|
|
enum aarch64_insn_prfm_type type,
|
|
|
|
enum aarch64_insn_prfm_target target,
|
|
|
|
enum aarch64_insn_prfm_policy policy);
|
2015-06-01 16:47:39 +07:00
|
|
|
s32 aarch64_get_branch_offset(u32 insn);
|
|
|
|
u32 aarch64_set_branch_offset(u32 insn, s32 offset);
|
2014-01-07 21:17:11 +07:00
|
|
|
|
2014-01-07 21:17:08 +07:00
|
|
|
bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
|
|
|
|
|
2014-01-07 21:17:09 +07:00
|
|
|
int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
|
|
|
|
int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
|
2014-11-18 18:41:22 +07:00
|
|
|
|
2016-09-09 20:07:12 +07:00
|
|
|
s32 aarch64_insn_adrp_get_offset(u32 insn);
|
|
|
|
u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset);
|
|
|
|
|
2014-11-18 18:41:22 +07:00
|
|
|
bool aarch32_insn_is_wide(u32 insn);
|
2014-11-18 18:41:25 +07:00
|
|
|
|
|
|
|
#define A32_RN_OFFSET 16
|
|
|
|
#define A32_RT_OFFSET 12
|
|
|
|
#define A32_RT2_OFFSET 0
|
|
|
|
|
2016-07-08 23:35:46 +07:00
|
|
|
u32 aarch64_insn_extract_system_reg(u32 insn);
|
2014-11-18 18:41:25 +07:00
|
|
|
u32 aarch32_insn_extract_reg_num(u32 insn, int offset);
|
2014-11-18 18:41:26 +07:00
|
|
|
u32 aarch32_insn_mcr_extract_opc2(u32 insn);
|
|
|
|
u32 aarch32_insn_mcr_extract_crm(u32 insn);
|
2016-07-08 23:35:47 +07:00
|
|
|
|
|
|
|
typedef bool (pstate_check_t)(unsigned long);
|
|
|
|
extern pstate_check_t * const aarch32_opcode_cond_checks[16];
|
2014-04-30 16:54:30 +07:00
|
|
|
#endif /* __ASSEMBLY__ */
|
2014-01-07 21:17:09 +07:00
|
|
|
|
2014-01-07 21:17:08 +07:00
|
|
|
#endif /* __ASM_INSN_H */
|