mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-09 07:56:39 +07:00
KVM: x86 emulator: Use ctxt->_eip directly in do_insn_fetch_byte()
Instead of passing ctxt->_eip from insn_fetch() call sites, get it from ctxt in do_insn_fetch_byte(). This is done by replacing the argument _eip of insn_fetch() with _ctxt, which should be better than letting the macro use ctxt silently in its body. Though this changes the place where ctxt->_eip is incremented from insn_fetch() to do_insn_fetch_byte(), this does not have any real effect. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
743eeb0b01
commit
807941b121
@ -651,18 +651,26 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
|
|||||||
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
|
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt,
|
/*
|
||||||
unsigned long eip, u8 *dest)
|
* Fetch the next byte of the instruction being emulated which is pointed to
|
||||||
|
* by ctxt->_eip, then increment ctxt->_eip.
|
||||||
|
*
|
||||||
|
* Also prefetch the remaining bytes of the instruction without crossing page
|
||||||
|
* boundary if they are not in fetch_cache yet.
|
||||||
|
*/
|
||||||
|
static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
|
||||||
{
|
{
|
||||||
struct fetch_cache *fc = &ctxt->fetch;
|
struct fetch_cache *fc = &ctxt->fetch;
|
||||||
int rc;
|
int rc;
|
||||||
int size, cur_size;
|
int size, cur_size;
|
||||||
|
|
||||||
if (eip == fc->end) {
|
if (ctxt->_eip == fc->end) {
|
||||||
unsigned long linear;
|
unsigned long linear;
|
||||||
struct segmented_address addr = { .seg=VCPU_SREG_CS, .ea=eip};
|
struct segmented_address addr = { .seg = VCPU_SREG_CS,
|
||||||
|
.ea = ctxt->_eip };
|
||||||
cur_size = fc->end - fc->start;
|
cur_size = fc->end - fc->start;
|
||||||
size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
|
size = min(15UL - cur_size,
|
||||||
|
PAGE_SIZE - offset_in_page(ctxt->_eip));
|
||||||
rc = __linearize(ctxt, addr, size, false, true, &linear);
|
rc = __linearize(ctxt, addr, size, false, true, &linear);
|
||||||
if (rc != X86EMUL_CONTINUE)
|
if (rc != X86EMUL_CONTINUE)
|
||||||
return rc;
|
return rc;
|
||||||
@ -672,20 +680,21 @@ static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt,
|
|||||||
return rc;
|
return rc;
|
||||||
fc->end += size;
|
fc->end += size;
|
||||||
}
|
}
|
||||||
*dest = fc->data[eip - fc->start];
|
*dest = fc->data[ctxt->_eip - fc->start];
|
||||||
|
ctxt->_eip++;
|
||||||
return X86EMUL_CONTINUE;
|
return X86EMUL_CONTINUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
|
static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
|
||||||
unsigned long eip, void *dest, unsigned size)
|
void *dest, unsigned size)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
/* x86 instructions are limited to 15 bytes. */
|
/* x86 instructions are limited to 15 bytes. */
|
||||||
if (eip + size - ctxt->eip > 15)
|
if (ctxt->_eip + size - ctxt->eip > 15)
|
||||||
return X86EMUL_UNHANDLEABLE;
|
return X86EMUL_UNHANDLEABLE;
|
||||||
while (size--) {
|
while (size--) {
|
||||||
rc = do_insn_fetch_byte(ctxt, eip++, dest++);
|
rc = do_insn_fetch_byte(ctxt, dest++);
|
||||||
if (rc != X86EMUL_CONTINUE)
|
if (rc != X86EMUL_CONTINUE)
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
@ -693,20 +702,18 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Fetch next part of the instruction being emulated. */
|
/* Fetch next part of the instruction being emulated. */
|
||||||
#define insn_fetch(_type, _size, _eip) \
|
#define insn_fetch(_type, _size, _ctxt) \
|
||||||
({ unsigned long _x; \
|
({ unsigned long _x; \
|
||||||
rc = do_insn_fetch(ctxt, (_eip), &_x, (_size)); \
|
rc = do_insn_fetch(_ctxt, &_x, (_size)); \
|
||||||
if (rc != X86EMUL_CONTINUE) \
|
if (rc != X86EMUL_CONTINUE) \
|
||||||
goto done; \
|
goto done; \
|
||||||
(_eip) += (_size); \
|
|
||||||
(_type)_x; \
|
(_type)_x; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define insn_fetch_arr(_arr, _size, _eip) \
|
#define insn_fetch_arr(_arr, _size, _ctxt) \
|
||||||
({ rc = do_insn_fetch(ctxt, (_eip), _arr, (_size)); \
|
({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \
|
||||||
if (rc != X86EMUL_CONTINUE) \
|
if (rc != X86EMUL_CONTINUE) \
|
||||||
goto done; \
|
goto done; \
|
||||||
(_eip) += (_size); \
|
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -894,7 +901,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
|
|||||||
ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
|
ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
|
||||||
}
|
}
|
||||||
|
|
||||||
ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip);
|
ctxt->modrm = insn_fetch(u8, 1, ctxt);
|
||||||
ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
|
ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
|
||||||
ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
|
ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
|
||||||
ctxt->modrm_rm |= (ctxt->modrm & 0x07);
|
ctxt->modrm_rm |= (ctxt->modrm & 0x07);
|
||||||
@ -928,13 +935,13 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
|
|||||||
switch (ctxt->modrm_mod) {
|
switch (ctxt->modrm_mod) {
|
||||||
case 0:
|
case 0:
|
||||||
if (ctxt->modrm_rm == 6)
|
if (ctxt->modrm_rm == 6)
|
||||||
modrm_ea += insn_fetch(u16, 2, ctxt->_eip);
|
modrm_ea += insn_fetch(u16, 2, ctxt);
|
||||||
break;
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
modrm_ea += insn_fetch(s8, 1, ctxt->_eip);
|
modrm_ea += insn_fetch(s8, 1, ctxt);
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
modrm_ea += insn_fetch(u16, 2, ctxt->_eip);
|
modrm_ea += insn_fetch(u16, 2, ctxt);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
switch (ctxt->modrm_rm) {
|
switch (ctxt->modrm_rm) {
|
||||||
@ -971,13 +978,13 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
|
|||||||
} else {
|
} else {
|
||||||
/* 32/64-bit ModR/M decode. */
|
/* 32/64-bit ModR/M decode. */
|
||||||
if ((ctxt->modrm_rm & 7) == 4) {
|
if ((ctxt->modrm_rm & 7) == 4) {
|
||||||
sib = insn_fetch(u8, 1, ctxt->_eip);
|
sib = insn_fetch(u8, 1, ctxt);
|
||||||
index_reg |= (sib >> 3) & 7;
|
index_reg |= (sib >> 3) & 7;
|
||||||
base_reg |= sib & 7;
|
base_reg |= sib & 7;
|
||||||
scale = sib >> 6;
|
scale = sib >> 6;
|
||||||
|
|
||||||
if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
|
if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
|
||||||
modrm_ea += insn_fetch(s32, 4, ctxt->_eip);
|
modrm_ea += insn_fetch(s32, 4, ctxt);
|
||||||
else
|
else
|
||||||
modrm_ea += ctxt->regs[base_reg];
|
modrm_ea += ctxt->regs[base_reg];
|
||||||
if (index_reg != 4)
|
if (index_reg != 4)
|
||||||
@ -990,13 +997,13 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
|
|||||||
switch (ctxt->modrm_mod) {
|
switch (ctxt->modrm_mod) {
|
||||||
case 0:
|
case 0:
|
||||||
if (ctxt->modrm_rm == 5)
|
if (ctxt->modrm_rm == 5)
|
||||||
modrm_ea += insn_fetch(s32, 4, ctxt->_eip);
|
modrm_ea += insn_fetch(s32, 4, ctxt);
|
||||||
break;
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
modrm_ea += insn_fetch(s8, 1, ctxt->_eip);
|
modrm_ea += insn_fetch(s8, 1, ctxt);
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
modrm_ea += insn_fetch(s32, 4, ctxt->_eip);
|
modrm_ea += insn_fetch(s32, 4, ctxt);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1013,13 +1020,13 @@ static int decode_abs(struct x86_emulate_ctxt *ctxt,
|
|||||||
op->type = OP_MEM;
|
op->type = OP_MEM;
|
||||||
switch (ctxt->ad_bytes) {
|
switch (ctxt->ad_bytes) {
|
||||||
case 2:
|
case 2:
|
||||||
op->addr.mem.ea = insn_fetch(u16, 2, ctxt->_eip);
|
op->addr.mem.ea = insn_fetch(u16, 2, ctxt);
|
||||||
break;
|
break;
|
||||||
case 4:
|
case 4:
|
||||||
op->addr.mem.ea = insn_fetch(u32, 4, ctxt->_eip);
|
op->addr.mem.ea = insn_fetch(u32, 4, ctxt);
|
||||||
break;
|
break;
|
||||||
case 8:
|
case 8:
|
||||||
op->addr.mem.ea = insn_fetch(u64, 8, ctxt->_eip);
|
op->addr.mem.ea = insn_fetch(u64, 8, ctxt);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
done:
|
done:
|
||||||
@ -3309,13 +3316,13 @@ static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
|
|||||||
/* NB. Immediates are sign-extended as necessary. */
|
/* NB. Immediates are sign-extended as necessary. */
|
||||||
switch (op->bytes) {
|
switch (op->bytes) {
|
||||||
case 1:
|
case 1:
|
||||||
op->val = insn_fetch(s8, 1, ctxt->_eip);
|
op->val = insn_fetch(s8, 1, ctxt);
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
op->val = insn_fetch(s16, 2, ctxt->_eip);
|
op->val = insn_fetch(s16, 2, ctxt);
|
||||||
break;
|
break;
|
||||||
case 4:
|
case 4:
|
||||||
op->val = insn_fetch(s32, 4, ctxt->_eip);
|
op->val = insn_fetch(s32, 4, ctxt);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (!sign_extension) {
|
if (!sign_extension) {
|
||||||
@ -3374,7 +3381,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
|
|||||||
|
|
||||||
/* Legacy prefixes. */
|
/* Legacy prefixes. */
|
||||||
for (;;) {
|
for (;;) {
|
||||||
switch (ctxt->b = insn_fetch(u8, 1, ctxt->_eip)) {
|
switch (ctxt->b = insn_fetch(u8, 1, ctxt)) {
|
||||||
case 0x66: /* operand-size override */
|
case 0x66: /* operand-size override */
|
||||||
op_prefix = true;
|
op_prefix = true;
|
||||||
/* switch between 2/4 bytes */
|
/* switch between 2/4 bytes */
|
||||||
@ -3430,7 +3437,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
|
|||||||
/* Two-byte opcode? */
|
/* Two-byte opcode? */
|
||||||
if (ctxt->b == 0x0f) {
|
if (ctxt->b == 0x0f) {
|
||||||
ctxt->twobyte = 1;
|
ctxt->twobyte = 1;
|
||||||
ctxt->b = insn_fetch(u8, 1, ctxt->_eip);
|
ctxt->b = insn_fetch(u8, 1, ctxt);
|
||||||
opcode = twobyte_table[ctxt->b];
|
opcode = twobyte_table[ctxt->b];
|
||||||
}
|
}
|
||||||
ctxt->d = opcode.flags;
|
ctxt->d = opcode.flags;
|
||||||
@ -3438,13 +3445,13 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
|
|||||||
while (ctxt->d & GroupMask) {
|
while (ctxt->d & GroupMask) {
|
||||||
switch (ctxt->d & GroupMask) {
|
switch (ctxt->d & GroupMask) {
|
||||||
case Group:
|
case Group:
|
||||||
ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip);
|
ctxt->modrm = insn_fetch(u8, 1, ctxt);
|
||||||
--ctxt->_eip;
|
--ctxt->_eip;
|
||||||
goffset = (ctxt->modrm >> 3) & 7;
|
goffset = (ctxt->modrm >> 3) & 7;
|
||||||
opcode = opcode.u.group[goffset];
|
opcode = opcode.u.group[goffset];
|
||||||
break;
|
break;
|
||||||
case GroupDual:
|
case GroupDual:
|
||||||
ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip);
|
ctxt->modrm = insn_fetch(u8, 1, ctxt);
|
||||||
--ctxt->_eip;
|
--ctxt->_eip;
|
||||||
goffset = (ctxt->modrm >> 3) & 7;
|
goffset = (ctxt->modrm >> 3) & 7;
|
||||||
if ((ctxt->modrm >> 6) == 3)
|
if ((ctxt->modrm >> 6) == 3)
|
||||||
@ -3577,7 +3584,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
|
|||||||
ctxt->src.type = OP_IMM;
|
ctxt->src.type = OP_IMM;
|
||||||
ctxt->src.addr.mem.ea = ctxt->_eip;
|
ctxt->src.addr.mem.ea = ctxt->_eip;
|
||||||
ctxt->src.bytes = ctxt->op_bytes + 2;
|
ctxt->src.bytes = ctxt->op_bytes + 2;
|
||||||
insn_fetch_arr(ctxt->src.valptr, ctxt->src.bytes, ctxt->_eip);
|
insn_fetch_arr(ctxt->src.valptr, ctxt->src.bytes, ctxt);
|
||||||
break;
|
break;
|
||||||
case SrcMemFAddr:
|
case SrcMemFAddr:
|
||||||
memop.bytes = ctxt->op_bytes + 2;
|
memop.bytes = ctxt->op_bytes + 2;
|
||||||
@ -3630,7 +3637,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
|
|||||||
ctxt->dst.type = OP_IMM;
|
ctxt->dst.type = OP_IMM;
|
||||||
ctxt->dst.addr.mem.ea = ctxt->_eip;
|
ctxt->dst.addr.mem.ea = ctxt->_eip;
|
||||||
ctxt->dst.bytes = 1;
|
ctxt->dst.bytes = 1;
|
||||||
ctxt->dst.val = insn_fetch(u8, 1, ctxt->_eip);
|
ctxt->dst.val = insn_fetch(u8, 1, ctxt);
|
||||||
break;
|
break;
|
||||||
case DstMem:
|
case DstMem:
|
||||||
case DstMem64:
|
case DstMem64:
|
||||||
|
Loading…
Reference in New Issue
Block a user