mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-25 23:15:26 +07:00
6ba48ff46f
The current x86 instruction decoder steps along through the instruction stream but always ensures that it never steps farther than the largest possible instruction size (MAX_INSN_SIZE). The MPX code is now going to be doing some decoding of userspace instructions. We copy those from userspace in to the kernel and they're obviously completely untrusted coming from userspace. In addition to the constraint that instructions can only be so long, we also have to be aware of how long the buffer is that came in from userspace. This _looks_ to be similar to what the perf and kprobes is doing, but it's unclear to me whether they are affected. The whole reason we need this is that it is perfectly valid to be executing an instruction within MAX_INSN_SIZE bytes of an unreadable page. We should be able to gracefully handle short reads in those cases. This adds support to the decoder to record how long the buffer being decoded is and to refuse to "validate" the instruction if we would have gone over the end of the buffer to decode it. The kprobes code probably needs to be looked at here a bit more carefully. This patch still respects the MAX_INSN_SIZE limit there but the kprobes code does look like it might be able to be a bit more strict than it currently is. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Jim Keniston <jkenisto@us.ibm.com> Acked-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: x86@kernel.org Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: "David S. Miller" <davem@davemloft.net> Link: http://lkml.kernel.org/r/20141114153957.E6B01535@viggo.jf.intel.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
202 lines
6.0 KiB
C
202 lines
6.0 KiB
C
#ifndef _ASM_X86_INSN_H
|
|
#define _ASM_X86_INSN_H
|
|
/*
|
|
* x86 instruction analysis
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
*
|
|
* Copyright (C) IBM Corporation, 2009
|
|
*/
|
|
|
|
/* insn_attr_t is defined in inat.h */
|
|
#include <asm/inat.h>
|
|
|
|
struct insn_field {
|
|
union {
|
|
insn_value_t value;
|
|
insn_byte_t bytes[4];
|
|
};
|
|
/* !0 if we've run insn_get_xxx() for this field */
|
|
unsigned char got;
|
|
unsigned char nbytes;
|
|
};
|
|
|
|
struct insn {
|
|
struct insn_field prefixes; /*
|
|
* Prefixes
|
|
* prefixes.bytes[3]: last prefix
|
|
*/
|
|
struct insn_field rex_prefix; /* REX prefix */
|
|
struct insn_field vex_prefix; /* VEX prefix */
|
|
struct insn_field opcode; /*
|
|
* opcode.bytes[0]: opcode1
|
|
* opcode.bytes[1]: opcode2
|
|
* opcode.bytes[2]: opcode3
|
|
*/
|
|
struct insn_field modrm;
|
|
struct insn_field sib;
|
|
struct insn_field displacement;
|
|
union {
|
|
struct insn_field immediate;
|
|
struct insn_field moffset1; /* for 64bit MOV */
|
|
struct insn_field immediate1; /* for 64bit imm or off16/32 */
|
|
};
|
|
union {
|
|
struct insn_field moffset2; /* for 64bit MOV */
|
|
struct insn_field immediate2; /* for 64bit imm or seg16 */
|
|
};
|
|
|
|
insn_attr_t attr;
|
|
unsigned char opnd_bytes;
|
|
unsigned char addr_bytes;
|
|
unsigned char length;
|
|
unsigned char x86_64;
|
|
|
|
const insn_byte_t *kaddr; /* kernel address of insn to analyze */
|
|
const insn_byte_t *end_kaddr; /* kernel address of last insn in buffer */
|
|
const insn_byte_t *next_byte;
|
|
};
|
|
|
|
#define MAX_INSN_SIZE 16
|
|
|
|
#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
|
|
#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
|
|
#define X86_MODRM_RM(modrm) ((modrm) & 0x07)
|
|
|
|
#define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6)
|
|
#define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3)
|
|
#define X86_SIB_BASE(sib) ((sib) & 0x07)
|
|
|
|
#define X86_REX_W(rex) ((rex) & 8)
|
|
#define X86_REX_R(rex) ((rex) & 4)
|
|
#define X86_REX_X(rex) ((rex) & 2)
|
|
#define X86_REX_B(rex) ((rex) & 1)
|
|
|
|
/* VEX bit flags */
|
|
#define X86_VEX_W(vex) ((vex) & 0x80) /* VEX3 Byte2 */
|
|
#define X86_VEX_R(vex) ((vex) & 0x80) /* VEX2/3 Byte1 */
|
|
#define X86_VEX_X(vex) ((vex) & 0x40) /* VEX3 Byte1 */
|
|
#define X86_VEX_B(vex) ((vex) & 0x20) /* VEX3 Byte1 */
|
|
#define X86_VEX_L(vex) ((vex) & 0x04) /* VEX3 Byte2, VEX2 Byte1 */
|
|
/* VEX bit fields */
|
|
#define X86_VEX3_M(vex) ((vex) & 0x1f) /* VEX3 Byte1 */
|
|
#define X86_VEX2_M 1 /* VEX2.M always 1 */
|
|
#define X86_VEX_V(vex) (((vex) & 0x78) >> 3) /* VEX3 Byte2, VEX2 Byte1 */
|
|
#define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */
|
|
#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
|
|
|
|
extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
|
|
extern void insn_get_prefixes(struct insn *insn);
|
|
extern void insn_get_opcode(struct insn *insn);
|
|
extern void insn_get_modrm(struct insn *insn);
|
|
extern void insn_get_sib(struct insn *insn);
|
|
extern void insn_get_displacement(struct insn *insn);
|
|
extern void insn_get_immediate(struct insn *insn);
|
|
extern void insn_get_length(struct insn *insn);
|
|
|
|
/* Attribute will be determined after getting ModRM (for opcode groups) */
|
|
static inline void insn_get_attribute(struct insn *insn)
|
|
{
|
|
insn_get_modrm(insn);
|
|
}
|
|
|
|
/* Instruction uses RIP-relative addressing */
|
|
extern int insn_rip_relative(struct insn *insn);
|
|
|
|
/* Init insn for kernel text */
|
|
static inline void kernel_insn_init(struct insn *insn,
|
|
const void *kaddr, int buf_len)
|
|
{
|
|
#ifdef CONFIG_X86_64
|
|
insn_init(insn, kaddr, buf_len, 1);
|
|
#else /* CONFIG_X86_32 */
|
|
insn_init(insn, kaddr, buf_len, 0);
|
|
#endif
|
|
}
|
|
|
|
static inline int insn_is_avx(struct insn *insn)
|
|
{
|
|
if (!insn->prefixes.got)
|
|
insn_get_prefixes(insn);
|
|
return (insn->vex_prefix.value != 0);
|
|
}
|
|
|
|
/* Ensure this instruction is decoded completely */
|
|
static inline int insn_complete(struct insn *insn)
|
|
{
|
|
return insn->opcode.got && insn->modrm.got && insn->sib.got &&
|
|
insn->displacement.got && insn->immediate.got;
|
|
}
|
|
|
|
static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
|
|
{
|
|
if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
|
|
return X86_VEX2_M;
|
|
else
|
|
return X86_VEX3_M(insn->vex_prefix.bytes[1]);
|
|
}
|
|
|
|
static inline insn_byte_t insn_vex_p_bits(struct insn *insn)
|
|
{
|
|
if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
|
|
return X86_VEX_P(insn->vex_prefix.bytes[1]);
|
|
else
|
|
return X86_VEX_P(insn->vex_prefix.bytes[2]);
|
|
}
|
|
|
|
/* Get the last prefix id from last prefix or VEX prefix */
|
|
static inline int insn_last_prefix_id(struct insn *insn)
|
|
{
|
|
if (insn_is_avx(insn))
|
|
return insn_vex_p_bits(insn); /* VEX_p is a SIMD prefix id */
|
|
|
|
if (insn->prefixes.bytes[3])
|
|
return inat_get_last_prefix_id(insn->prefixes.bytes[3]);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* Offset of each field from kaddr */
|
|
static inline int insn_offset_rex_prefix(struct insn *insn)
|
|
{
|
|
return insn->prefixes.nbytes;
|
|
}
|
|
static inline int insn_offset_vex_prefix(struct insn *insn)
|
|
{
|
|
return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes;
|
|
}
|
|
static inline int insn_offset_opcode(struct insn *insn)
|
|
{
|
|
return insn_offset_vex_prefix(insn) + insn->vex_prefix.nbytes;
|
|
}
|
|
static inline int insn_offset_modrm(struct insn *insn)
|
|
{
|
|
return insn_offset_opcode(insn) + insn->opcode.nbytes;
|
|
}
|
|
static inline int insn_offset_sib(struct insn *insn)
|
|
{
|
|
return insn_offset_modrm(insn) + insn->modrm.nbytes;
|
|
}
|
|
static inline int insn_offset_displacement(struct insn *insn)
|
|
{
|
|
return insn_offset_sib(insn) + insn->sib.nbytes;
|
|
}
|
|
static inline int insn_offset_immediate(struct insn *insn)
|
|
{
|
|
return insn_offset_displacement(insn) + insn->displacement.nbytes;
|
|
}
|
|
|
|
#endif /* _ASM_X86_INSN_H */
|