mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 06:46:41 +07:00
KVM: PPC: Emulator: clean up instruction parsing
Instructions on PPC are pretty similarly encoded. So instead of every instruction emulation code decoding the instruction fields itself, we can move that code to more generic places and rely on the compiler to optimize the unused bits away. This has 2 advantages. It makes the code smaller and it makes the code less error prone, as the instruction fields are always available, so accidental misusage is reduced. Functionally, this patch doesn't change anything. Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
parent
5b74716eba
commit
c46dc9a861
@ -37,22 +37,19 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance)
|
||||
{
|
||||
int emulated = EMULATE_DONE;
|
||||
int dcrn;
|
||||
int ra;
|
||||
int rb;
|
||||
int rc;
|
||||
int rs;
|
||||
int rt;
|
||||
int ws;
|
||||
int dcrn = get_dcrn(inst);
|
||||
int ra = get_ra(inst);
|
||||
int rb = get_rb(inst);
|
||||
int rc = get_rc(inst);
|
||||
int rs = get_rs(inst);
|
||||
int rt = get_rt(inst);
|
||||
int ws = get_ws(inst);
|
||||
|
||||
switch (get_op(inst)) {
|
||||
case 31:
|
||||
switch (get_xop(inst)) {
|
||||
|
||||
case XOP_MFDCR:
|
||||
dcrn = get_dcrn(inst);
|
||||
rt = get_rt(inst);
|
||||
|
||||
/* The guest may access CPR0 registers to determine the timebase
|
||||
* frequency, and it must know the real host frequency because it
|
||||
* can directly access the timebase registers.
|
||||
@ -88,9 +85,6 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
break;
|
||||
|
||||
case XOP_MTDCR:
|
||||
dcrn = get_dcrn(inst);
|
||||
rs = get_rs(inst);
|
||||
|
||||
/* emulate some access in kernel */
|
||||
switch (dcrn) {
|
||||
case DCRN_CPR0_CONFIG_ADDR:
|
||||
@ -108,17 +102,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
break;
|
||||
|
||||
case XOP_TLBWE:
|
||||
ra = get_ra(inst);
|
||||
rs = get_rs(inst);
|
||||
ws = get_ws(inst);
|
||||
emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws);
|
||||
break;
|
||||
|
||||
case XOP_TLBSX:
|
||||
rt = get_rt(inst);
|
||||
ra = get_ra(inst);
|
||||
rb = get_rb(inst);
|
||||
rc = get_rc(inst);
|
||||
emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc);
|
||||
break;
|
||||
|
||||
|
@ -87,6 +87,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance)
|
||||
{
|
||||
int emulated = EMULATE_DONE;
|
||||
int rt = get_rt(inst);
|
||||
int rs = get_rs(inst);
|
||||
int ra = get_ra(inst);
|
||||
int rb = get_rb(inst);
|
||||
|
||||
switch (get_op(inst)) {
|
||||
case 19:
|
||||
@ -106,21 +110,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
case 31:
|
||||
switch (get_xop(inst)) {
|
||||
case OP_31_XOP_MFMSR:
|
||||
kvmppc_set_gpr(vcpu, get_rt(inst),
|
||||
vcpu->arch.shared->msr);
|
||||
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
|
||||
break;
|
||||
case OP_31_XOP_MTMSRD:
|
||||
{
|
||||
ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
|
||||
ulong rs_val = kvmppc_get_gpr(vcpu, rs);
|
||||
if (inst & 0x10000) {
|
||||
vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE);
|
||||
vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE);
|
||||
ulong new_msr = vcpu->arch.shared->msr;
|
||||
new_msr &= ~(MSR_RI | MSR_EE);
|
||||
new_msr |= rs_val & (MSR_RI | MSR_EE);
|
||||
vcpu->arch.shared->msr = new_msr;
|
||||
} else
|
||||
kvmppc_set_msr(vcpu, rs);
|
||||
kvmppc_set_msr(vcpu, rs_val);
|
||||
break;
|
||||
}
|
||||
case OP_31_XOP_MTMSR:
|
||||
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
|
||||
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
|
||||
break;
|
||||
case OP_31_XOP_MFSR:
|
||||
{
|
||||
@ -130,7 +135,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
if (vcpu->arch.mmu.mfsrin) {
|
||||
u32 sr;
|
||||
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
|
||||
kvmppc_set_gpr(vcpu, get_rt(inst), sr);
|
||||
kvmppc_set_gpr(vcpu, rt, sr);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -138,29 +143,29 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
{
|
||||
int srnum;
|
||||
|
||||
srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf;
|
||||
srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
|
||||
if (vcpu->arch.mmu.mfsrin) {
|
||||
u32 sr;
|
||||
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
|
||||
kvmppc_set_gpr(vcpu, get_rt(inst), sr);
|
||||
kvmppc_set_gpr(vcpu, rt, sr);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OP_31_XOP_MTSR:
|
||||
vcpu->arch.mmu.mtsrin(vcpu,
|
||||
(inst >> 16) & 0xf,
|
||||
kvmppc_get_gpr(vcpu, get_rs(inst)));
|
||||
kvmppc_get_gpr(vcpu, rs));
|
||||
break;
|
||||
case OP_31_XOP_MTSRIN:
|
||||
vcpu->arch.mmu.mtsrin(vcpu,
|
||||
(kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
|
||||
kvmppc_get_gpr(vcpu, get_rs(inst)));
|
||||
(kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
|
||||
kvmppc_get_gpr(vcpu, rs));
|
||||
break;
|
||||
case OP_31_XOP_TLBIE:
|
||||
case OP_31_XOP_TLBIEL:
|
||||
{
|
||||
bool large = (inst & 0x00200000) ? true : false;
|
||||
ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst));
|
||||
ulong addr = kvmppc_get_gpr(vcpu, rb);
|
||||
vcpu->arch.mmu.tlbie(vcpu, addr, large);
|
||||
break;
|
||||
}
|
||||
@ -171,15 +176,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
return EMULATE_FAIL;
|
||||
|
||||
vcpu->arch.mmu.slbmte(vcpu,
|
||||
kvmppc_get_gpr(vcpu, get_rs(inst)),
|
||||
kvmppc_get_gpr(vcpu, get_rb(inst)));
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
kvmppc_get_gpr(vcpu, rb));
|
||||
break;
|
||||
case OP_31_XOP_SLBIE:
|
||||
if (!vcpu->arch.mmu.slbie)
|
||||
return EMULATE_FAIL;
|
||||
|
||||
vcpu->arch.mmu.slbie(vcpu,
|
||||
kvmppc_get_gpr(vcpu, get_rb(inst)));
|
||||
kvmppc_get_gpr(vcpu, rb));
|
||||
break;
|
||||
case OP_31_XOP_SLBIA:
|
||||
if (!vcpu->arch.mmu.slbia)
|
||||
@ -191,22 +196,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
if (!vcpu->arch.mmu.slbmfee) {
|
||||
emulated = EMULATE_FAIL;
|
||||
} else {
|
||||
ulong t, rb;
|
||||
ulong t, rb_val;
|
||||
|
||||
rb = kvmppc_get_gpr(vcpu, get_rb(inst));
|
||||
t = vcpu->arch.mmu.slbmfee(vcpu, rb);
|
||||
kvmppc_set_gpr(vcpu, get_rt(inst), t);
|
||||
rb_val = kvmppc_get_gpr(vcpu, rb);
|
||||
t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
|
||||
kvmppc_set_gpr(vcpu, rt, t);
|
||||
}
|
||||
break;
|
||||
case OP_31_XOP_SLBMFEV:
|
||||
if (!vcpu->arch.mmu.slbmfev) {
|
||||
emulated = EMULATE_FAIL;
|
||||
} else {
|
||||
ulong t, rb;
|
||||
ulong t, rb_val;
|
||||
|
||||
rb = kvmppc_get_gpr(vcpu, get_rb(inst));
|
||||
t = vcpu->arch.mmu.slbmfev(vcpu, rb);
|
||||
kvmppc_set_gpr(vcpu, get_rt(inst), t);
|
||||
rb_val = kvmppc_get_gpr(vcpu, rb);
|
||||
t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
|
||||
kvmppc_set_gpr(vcpu, rt, t);
|
||||
}
|
||||
break;
|
||||
case OP_31_XOP_DCBA:
|
||||
@ -214,17 +219,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
break;
|
||||
case OP_31_XOP_DCBZ:
|
||||
{
|
||||
ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
|
||||
ulong ra = 0;
|
||||
ulong rb_val = kvmppc_get_gpr(vcpu, rb);
|
||||
ulong ra_val = 0;
|
||||
ulong addr, vaddr;
|
||||
u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
|
||||
u32 dsisr;
|
||||
int r;
|
||||
|
||||
if (get_ra(inst))
|
||||
ra = kvmppc_get_gpr(vcpu, get_ra(inst));
|
||||
if (ra)
|
||||
ra_val = kvmppc_get_gpr(vcpu, ra);
|
||||
|
||||
addr = (ra + rb) & ~31ULL;
|
||||
addr = (ra_val + rb_val) & ~31ULL;
|
||||
if (!(vcpu->arch.shared->msr & MSR_SF))
|
||||
addr &= 0xffffffff;
|
||||
vaddr = addr;
|
||||
@ -565,23 +570,22 @@ u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
|
||||
ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
|
||||
{
|
||||
ulong dar = 0;
|
||||
ulong ra;
|
||||
ulong ra = get_ra(inst);
|
||||
ulong rb = get_rb(inst);
|
||||
|
||||
switch (get_op(inst)) {
|
||||
case OP_LFS:
|
||||
case OP_LFD:
|
||||
case OP_STFD:
|
||||
case OP_STFS:
|
||||
ra = get_ra(inst);
|
||||
if (ra)
|
||||
dar = kvmppc_get_gpr(vcpu, ra);
|
||||
dar += (s32)((s16)inst);
|
||||
break;
|
||||
case 31:
|
||||
ra = get_ra(inst);
|
||||
if (ra)
|
||||
dar = kvmppc_get_gpr(vcpu, ra);
|
||||
dar += kvmppc_get_gpr(vcpu, get_rb(inst));
|
||||
dar += kvmppc_get_gpr(vcpu, rb);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
|
||||
|
@ -40,8 +40,8 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance)
|
||||
{
|
||||
int emulated = EMULATE_DONE;
|
||||
int rs;
|
||||
int rt;
|
||||
int rs = get_rs(inst);
|
||||
int rt = get_rt(inst);
|
||||
|
||||
switch (get_op(inst)) {
|
||||
case 19:
|
||||
@ -62,19 +62,16 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
switch (get_xop(inst)) {
|
||||
|
||||
case OP_31_XOP_MFMSR:
|
||||
rt = get_rt(inst);
|
||||
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
|
||||
kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_MTMSR:
|
||||
rs = get_rs(inst);
|
||||
kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
|
||||
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
|
||||
break;
|
||||
|
||||
case OP_31_XOP_WRTEE:
|
||||
rs = get_rs(inst);
|
||||
vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
|
||||
| (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
|
||||
kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
|
||||
|
@ -86,9 +86,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance)
|
||||
{
|
||||
int emulated = EMULATE_DONE;
|
||||
int ra;
|
||||
int rb;
|
||||
int rt;
|
||||
int ra = get_ra(inst);
|
||||
int rb = get_rb(inst);
|
||||
int rt = get_rt(inst);
|
||||
|
||||
switch (get_op(inst)) {
|
||||
case 31:
|
||||
@ -96,11 +96,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
|
||||
#ifdef CONFIG_KVM_E500MC
|
||||
case XOP_MSGSND:
|
||||
emulated = kvmppc_e500_emul_msgsnd(vcpu, get_rb(inst));
|
||||
emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
|
||||
break;
|
||||
|
||||
case XOP_MSGCLR:
|
||||
emulated = kvmppc_e500_emul_msgclr(vcpu, get_rb(inst));
|
||||
emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
|
||||
break;
|
||||
#endif
|
||||
|
||||
@ -113,20 +113,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
break;
|
||||
|
||||
case XOP_TLBSX:
|
||||
rb = get_rb(inst);
|
||||
emulated = kvmppc_e500_emul_tlbsx(vcpu,rb);
|
||||
break;
|
||||
|
||||
case XOP_TLBILX:
|
||||
ra = get_ra(inst);
|
||||
rb = get_rb(inst);
|
||||
rt = get_rt(inst);
|
||||
emulated = kvmppc_e500_emul_tlbilx(vcpu, rt, ra, rb);
|
||||
break;
|
||||
|
||||
case XOP_TLBIVAX:
|
||||
ra = get_ra(inst);
|
||||
rb = get_rb(inst);
|
||||
emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb);
|
||||
break;
|
||||
|
||||
|
@ -148,11 +148,10 @@ u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
|
||||
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 inst = kvmppc_get_last_inst(vcpu);
|
||||
int ra;
|
||||
int rb;
|
||||
int rs;
|
||||
int rt;
|
||||
int sprn;
|
||||
int ra = get_ra(inst);
|
||||
int rs = get_rs(inst);
|
||||
int rt = get_rt(inst);
|
||||
int sprn = get_sprn(inst);
|
||||
enum emulation_result emulated = EMULATE_DONE;
|
||||
int advance = 1;
|
||||
|
||||
@ -189,43 +188,31 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
advance = 0;
|
||||
break;
|
||||
case OP_31_XOP_LWZX:
|
||||
rt = get_rt(inst);
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LBZX:
|
||||
rt = get_rt(inst);
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LBZUX:
|
||||
rt = get_rt(inst);
|
||||
ra = get_ra(inst);
|
||||
rb = get_rb(inst);
|
||||
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STWX:
|
||||
rs = get_rs(inst);
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
4, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STBX:
|
||||
rs = get_rs(inst);
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
1, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STBUX:
|
||||
rs = get_rs(inst);
|
||||
ra = get_ra(inst);
|
||||
rb = get_rb(inst);
|
||||
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
1, 1);
|
||||
@ -233,28 +220,19 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LHAX:
|
||||
rt = get_rt(inst);
|
||||
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LHZX:
|
||||
rt = get_rt(inst);
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LHZUX:
|
||||
rt = get_rt(inst);
|
||||
ra = get_ra(inst);
|
||||
rb = get_rb(inst);
|
||||
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_MFSPR:
|
||||
sprn = get_sprn(inst);
|
||||
rt = get_rt(inst);
|
||||
|
||||
switch (sprn) {
|
||||
case SPRN_SRR0:
|
||||
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0);
|
||||
@ -310,20 +288,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STHX:
|
||||
rs = get_rs(inst);
|
||||
ra = get_ra(inst);
|
||||
rb = get_rb(inst);
|
||||
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
2, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STHUX:
|
||||
rs = get_rs(inst);
|
||||
ra = get_ra(inst);
|
||||
rb = get_rb(inst);
|
||||
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
2, 1);
|
||||
@ -331,8 +301,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
|
||||
case OP_31_XOP_MTSPR:
|
||||
sprn = get_sprn(inst);
|
||||
rs = get_rs(inst);
|
||||
switch (sprn) {
|
||||
case SPRN_SRR0:
|
||||
vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs);
|
||||
@ -384,7 +352,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LWBRX:
|
||||
rt = get_rt(inst);
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
|
||||
break;
|
||||
|
||||
@ -392,25 +359,16 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STWBRX:
|
||||
rs = get_rs(inst);
|
||||
ra = get_ra(inst);
|
||||
rb = get_rb(inst);
|
||||
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
4, 0);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LHBRX:
|
||||
rt = get_rt(inst);
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STHBRX:
|
||||
rs = get_rs(inst);
|
||||
ra = get_ra(inst);
|
||||
rb = get_rb(inst);
|
||||
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
2, 0);
|
||||
@ -423,39 +381,30 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
|
||||
case OP_LWZ:
|
||||
rt = get_rt(inst);
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
|
||||
break;
|
||||
|
||||
case OP_LWZU:
|
||||
ra = get_ra(inst);
|
||||
rt = get_rt(inst);
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_LBZ:
|
||||
rt = get_rt(inst);
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
|
||||
break;
|
||||
|
||||
case OP_LBZU:
|
||||
ra = get_ra(inst);
|
||||
rt = get_rt(inst);
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_STW:
|
||||
rs = get_rs(inst);
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
4, 1);
|
||||
break;
|
||||
|
||||
case OP_STWU:
|
||||
ra = get_ra(inst);
|
||||
rs = get_rs(inst);
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
4, 1);
|
||||
@ -463,15 +412,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
|
||||
case OP_STB:
|
||||
rs = get_rs(inst);
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
1, 1);
|
||||
break;
|
||||
|
||||
case OP_STBU:
|
||||
ra = get_ra(inst);
|
||||
rs = get_rs(inst);
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
1, 1);
|
||||
@ -479,39 +425,30 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
|
||||
case OP_LHZ:
|
||||
rt = get_rt(inst);
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
|
||||
break;
|
||||
|
||||
case OP_LHZU:
|
||||
ra = get_ra(inst);
|
||||
rt = get_rt(inst);
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_LHA:
|
||||
rt = get_rt(inst);
|
||||
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
|
||||
break;
|
||||
|
||||
case OP_LHAU:
|
||||
ra = get_ra(inst);
|
||||
rt = get_rt(inst);
|
||||
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_STH:
|
||||
rs = get_rs(inst);
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
2, 1);
|
||||
break;
|
||||
|
||||
case OP_STHU:
|
||||
ra = get_ra(inst);
|
||||
rs = get_rs(inst);
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
2, 1);
|
||||
|
Loading…
Reference in New Issue
Block a user