2009-02-11 03:10:44 +07:00
|
|
|
/*
|
|
|
|
* Copyright 2009 Freescale Semicondutor, Inc.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* provides masks and opcode images for use by code generation, emulation
|
|
|
|
* and for instructions that older assemblers might not know about
|
|
|
|
*/
|
|
|
|
#ifndef _ASM_POWERPC_PPC_OPCODE_H
|
|
|
|
#define _ASM_POWERPC_PPC_OPCODE_H
|
|
|
|
|
|
|
|
#include <linux/stringify.h>
|
|
|
|
#include <asm/asm-compat.h>
|
|
|
|
|
2012-06-25 20:33:22 +07:00
|
|
|
#define __REG_R0 0
|
|
|
|
#define __REG_R1 1
|
|
|
|
#define __REG_R2 2
|
|
|
|
#define __REG_R3 3
|
|
|
|
#define __REG_R4 4
|
|
|
|
#define __REG_R5 5
|
|
|
|
#define __REG_R6 6
|
|
|
|
#define __REG_R7 7
|
|
|
|
#define __REG_R8 8
|
|
|
|
#define __REG_R9 9
|
|
|
|
#define __REG_R10 10
|
|
|
|
#define __REG_R11 11
|
|
|
|
#define __REG_R12 12
|
|
|
|
#define __REG_R13 13
|
|
|
|
#define __REG_R14 14
|
|
|
|
#define __REG_R15 15
|
|
|
|
#define __REG_R16 16
|
|
|
|
#define __REG_R17 17
|
|
|
|
#define __REG_R18 18
|
|
|
|
#define __REG_R19 19
|
|
|
|
#define __REG_R20 20
|
|
|
|
#define __REG_R21 21
|
|
|
|
#define __REG_R22 22
|
|
|
|
#define __REG_R23 23
|
|
|
|
#define __REG_R24 24
|
|
|
|
#define __REG_R25 25
|
|
|
|
#define __REG_R26 26
|
|
|
|
#define __REG_R27 27
|
|
|
|
#define __REG_R28 28
|
|
|
|
#define __REG_R29 29
|
|
|
|
#define __REG_R30 30
|
|
|
|
#define __REG_R31 31
|
|
|
|
|
2012-06-25 20:33:24 +07:00
|
|
|
#define __REGA0_0 0
|
|
|
|
#define __REGA0_R1 1
|
|
|
|
#define __REGA0_R2 2
|
|
|
|
#define __REGA0_R3 3
|
|
|
|
#define __REGA0_R4 4
|
|
|
|
#define __REGA0_R5 5
|
|
|
|
#define __REGA0_R6 6
|
|
|
|
#define __REGA0_R7 7
|
|
|
|
#define __REGA0_R8 8
|
|
|
|
#define __REGA0_R9 9
|
|
|
|
#define __REGA0_R10 10
|
|
|
|
#define __REGA0_R11 11
|
|
|
|
#define __REGA0_R12 12
|
|
|
|
#define __REGA0_R13 13
|
|
|
|
#define __REGA0_R14 14
|
|
|
|
#define __REGA0_R15 15
|
|
|
|
#define __REGA0_R16 16
|
|
|
|
#define __REGA0_R17 17
|
|
|
|
#define __REGA0_R18 18
|
|
|
|
#define __REGA0_R19 19
|
|
|
|
#define __REGA0_R20 20
|
|
|
|
#define __REGA0_R21 21
|
|
|
|
#define __REGA0_R22 22
|
|
|
|
#define __REGA0_R23 23
|
|
|
|
#define __REGA0_R24 24
|
|
|
|
#define __REGA0_R25 25
|
|
|
|
#define __REGA0_R26 26
|
|
|
|
#define __REGA0_R27 27
|
|
|
|
#define __REGA0_R28 28
|
|
|
|
#define __REGA0_R29 29
|
|
|
|
#define __REGA0_R30 30
|
|
|
|
#define __REGA0_R31 31
|
|
|
|
|
2009-02-11 03:10:44 +07:00
|
|
|
/* sorted alphabetically */
|
|
|
|
#define PPC_INST_DCBA 0x7c0005ec
|
|
|
|
#define PPC_INST_DCBA_MASK 0xfc0007fe
|
|
|
|
#define PPC_INST_DCBAL 0x7c2005ec
|
|
|
|
#define PPC_INST_DCBZL 0x7c2007ec
|
|
|
|
#define PPC_INST_ISEL 0x7c00001e
|
|
|
|
#define PPC_INST_ISEL_MASK 0xfc00003e
|
2010-02-10 08:02:36 +07:00
|
|
|
#define PPC_INST_LDARX 0x7c0000a8
|
2009-02-11 03:10:44 +07:00
|
|
|
#define PPC_INST_LSWI 0x7c0004aa
|
|
|
|
#define PPC_INST_LSWX 0x7c00042a
|
2010-03-11 12:33:25 +07:00
|
|
|
#define PPC_INST_LWARX 0x7c000028
|
2009-02-11 03:10:44 +07:00
|
|
|
#define PPC_INST_LWSYNC 0x7c2004ac
|
2009-04-30 03:58:01 +07:00
|
|
|
#define PPC_INST_LXVD2X 0x7c000698
|
2009-02-11 03:10:44 +07:00
|
|
|
#define PPC_INST_MCRXR 0x7c000400
|
|
|
|
#define PPC_INST_MCRXR_MASK 0xfc0007fe
|
|
|
|
#define PPC_INST_MFSPR_PVR 0x7c1f42a6
|
|
|
|
#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
|
|
|
|
#define PPC_INST_MSGSND 0x7c00019c
|
|
|
|
#define PPC_INST_NOP 0x60000000
|
|
|
|
#define PPC_INST_POPCNTB 0x7c0000f4
|
|
|
|
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
|
2010-12-08 02:58:17 +07:00
|
|
|
#define PPC_INST_POPCNTD 0x7c0003f4
|
|
|
|
#define PPC_INST_POPCNTW 0x7c0002f4
|
2009-02-11 03:10:44 +07:00
|
|
|
#define PPC_INST_RFCI 0x4c000066
|
|
|
|
#define PPC_INST_RFDI 0x4c00004e
|
|
|
|
#define PPC_INST_RFMCI 0x4c00004c
|
2011-03-02 22:18:48 +07:00
|
|
|
#define PPC_INST_MFSPR_DSCR 0x7c1102a6
|
|
|
|
#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
|
|
|
|
#define PPC_INST_MTSPR_DSCR 0x7c1103a6
|
|
|
|
#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
|
KVM: PPC: Implement MMIO emulation support for Book3S HV guests
This provides the low-level support for MMIO emulation in Book3S HV
guests. When the guest tries to map a page which is not covered by
any memslot, that page is taken to be an MMIO emulation page. Instead
of inserting a valid HPTE, we insert an HPTE that has the valid bit
clear but another hypervisor software-use bit set, which we call
HPTE_V_ABSENT, to indicate that this is an absent page. An
absent page is treated much like a valid page as far as guest hcalls
(H_ENTER, H_REMOVE, H_READ etc.) are concerned, except of course that
an absent HPTE doesn't need to be invalidated with tlbie since it
was never valid as far as the hardware is concerned.
When the guest accesses a page for which there is an absent HPTE, it
will take a hypervisor data storage interrupt (HDSI) since we now set
the VPM1 bit in the LPCR. Our HDSI handler for HPTE-not-present faults
looks up the hash table and if it finds an absent HPTE mapping the
requested virtual address, will switch to kernel mode and handle the
fault in kvmppc_book3s_hv_page_fault(), which at present just calls
kvmppc_hv_emulate_mmio() to set up the MMIO emulation.
This is based on an earlier patch by Benjamin Herrenschmidt, but since
heavily reworked.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
2011-12-12 19:36:37 +07:00
|
|
|
#define PPC_INST_SLBFEE 0x7c0007a7
|
2009-02-11 03:10:44 +07:00
|
|
|
|
|
|
|
#define PPC_INST_STRING 0x7c00042a
|
|
|
|
#define PPC_INST_STRING_MASK 0xfc0007fe
|
|
|
|
#define PPC_INST_STRING_GEN_MASK 0xfc00067e
|
|
|
|
|
|
|
|
#define PPC_INST_STSWI 0x7c0005aa
|
|
|
|
#define PPC_INST_STSWX 0x7c00052a
|
2009-04-30 03:58:01 +07:00
|
|
|
#define PPC_INST_STXVD2X 0x7c000798
|
2009-04-30 03:58:01 +07:00
|
|
|
#define PPC_INST_TLBIE 0x7c000264
|
2009-04-07 03:25:52 +07:00
|
|
|
#define PPC_INST_TLBILX 0x7c000024
|
2009-02-11 03:10:44 +07:00
|
|
|
#define PPC_INST_WAIT 0x7c00007c
|
2009-07-24 06:15:11 +07:00
|
|
|
#define PPC_INST_TLBIVAX 0x7c000624
|
|
|
|
#define PPC_INST_TLBSRX_DOT 0x7c0006a5
|
powerpc: Emulate most Book I instructions in emulate_step()
This extends the emulate_step() function to handle a large proportion
of the Book I instructions implemented on current 64-bit server
processors. The aim is to handle all the load and store instructions
used in the kernel, plus all of the instructions that appear between
l[wd]arx and st[wd]cx., so this handles the Altivec/VMX lvx and stvx
and the VSX lxv2dx and stxv2dx instructions (implemented in POWER7).
The new code can emulate user mode instructions, and checks the
effective address for a load or store if the saved state is for
user mode. It doesn't handle little-endian mode at present.
For floating-point, Altivec/VMX and VSX instructions, it checks
that the saved MSR has the enable bit for the relevant facility
set, and if so, assumes that the FP/VMX/VSX registers contain
valid state, and does loads or stores directly to/from the
FP/VMX/VSX registers, using assembly helpers in ldstfp.S.
Instructions supported now include:
* Loads and stores, including some but not all VMX and VSX instructions,
and lmw/stmw
* Atomic loads and stores (l[dw]arx, st[dw]cx.)
* Arithmetic instructions (add, subtract, multiply, divide, etc.)
* Compare instructions
* Rotate and mask instructions
* Shift instructions
* Logical instructions (and, or, xor, etc.)
* Condition register logical instructions
* mtcrf, cntlz[wd], exts[bhw]
* isync, sync, lwsync, ptesync, eieio
* Cache operations (dcbf, dcbst, dcbt, dcbtst)
The overflow-checking arithmetic instructions are not included, but
they appear not to be ever used in C code.
This uses decimal values for the minor opcodes in the switch statements
because that is what appears in the Power ISA specification, thus it is
easier to check that they are correct if they are in decimal.
If this is used to single-step an instruction where a data breakpoint
interrupt occurred, then there is the possibility that the instruction
is a lwarx or ldarx. In that case we have to be careful not to lose the
reservation until we get to the matching st[wd]cx., or we'll never make
forward progress. One alternative is to try to arrange that we can
return from interrupts and handle data breakpoint interrupts without
losing the reservation, which means not using any spinlocks, mutexes,
or atomic ops (including bitops). That seems rather fragile. The
other alternative is to emulate the larx/stcx and all the instructions
in between. This is why this commit adds support for a wide range
of integer instructions.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2010-06-15 11:48:58 +07:00
|
|
|
#define PPC_INST_XXLOR 0xf0000510
|
2009-02-11 03:10:44 +07:00
|
|
|
|
2011-01-24 14:42:41 +07:00
|
|
|
#define PPC_INST_NAP 0x4c000364
|
|
|
|
#define PPC_INST_SLEEP 0x4c0003a4
|
|
|
|
|
2011-04-15 05:31:56 +07:00
|
|
|
/* A2 specific instructions */
|
|
|
|
#define PPC_INST_ERATWE 0x7c0001a6
|
|
|
|
#define PPC_INST_ERATRE 0x7c000166
|
|
|
|
#define PPC_INST_ERATILX 0x7c000066
|
|
|
|
#define PPC_INST_ERATIVAX 0x7c000666
|
|
|
|
#define PPC_INST_ERATSX 0x7c000126
|
|
|
|
#define PPC_INST_ERATSX_DOT 0x7c000127
|
|
|
|
|
2011-07-20 22:51:00 +07:00
|
|
|
/* Misc instructions for BPF compiler */
|
|
|
|
#define PPC_INST_LD 0xe8000000
|
|
|
|
#define PPC_INST_LHZ 0xa0000000
|
|
|
|
#define PPC_INST_LWZ 0x80000000
|
|
|
|
#define PPC_INST_STD 0xf8000000
|
|
|
|
#define PPC_INST_STDU 0xf8000001
|
|
|
|
#define PPC_INST_MFLR 0x7c0802a6
|
|
|
|
#define PPC_INST_MTLR 0x7c0803a6
|
|
|
|
#define PPC_INST_CMPWI 0x2c000000
|
|
|
|
#define PPC_INST_CMPDI 0x2c200000
|
|
|
|
#define PPC_INST_CMPLW 0x7c000040
|
|
|
|
#define PPC_INST_CMPLWI 0x28000000
|
|
|
|
#define PPC_INST_ADDI 0x38000000
|
|
|
|
#define PPC_INST_ADDIS 0x3c000000
|
|
|
|
#define PPC_INST_ADD 0x7c000214
|
|
|
|
#define PPC_INST_SUB 0x7c000050
|
|
|
|
#define PPC_INST_BLR 0x4e800020
|
|
|
|
#define PPC_INST_BLRL 0x4e800021
|
|
|
|
#define PPC_INST_MULLW 0x7c0001d6
|
|
|
|
#define PPC_INST_MULHWU 0x7c000016
|
|
|
|
#define PPC_INST_MULLI 0x1c000000
|
|
|
|
#define PPC_INST_DIVWU 0x7c0003d6
|
|
|
|
#define PPC_INST_RLWINM 0x54000000
|
|
|
|
#define PPC_INST_RLDICR 0x78000004
|
|
|
|
#define PPC_INST_SLW 0x7c000030
|
|
|
|
#define PPC_INST_SRW 0x7c000430
|
|
|
|
#define PPC_INST_AND 0x7c000038
|
|
|
|
#define PPC_INST_ANDDOT 0x7c000039
|
|
|
|
#define PPC_INST_OR 0x7c000378
|
|
|
|
#define PPC_INST_ANDI 0x70000000
|
|
|
|
#define PPC_INST_ORI 0x60000000
|
|
|
|
#define PPC_INST_ORIS 0x64000000
|
|
|
|
#define PPC_INST_NEG 0x7c0000d0
|
|
|
|
#define PPC_INST_BRANCH 0x48000000
|
|
|
|
#define PPC_INST_BRANCH_COND 0x40800000
|
2012-06-25 20:33:13 +07:00
|
|
|
#define PPC_INST_LBZCIX 0x7c0006aa
|
|
|
|
#define PPC_INST_STBCIX 0x7c0007aa
|
2011-07-20 22:51:00 +07:00
|
|
|
|
2009-02-11 03:10:44 +07:00
|
|
|
/* macros to insert fields into opcodes */
|
2012-06-25 20:33:20 +07:00
|
|
|
#define ___PPC_RA(a) (((a) & 0x1f) << 16)
|
|
|
|
#define ___PPC_RB(b) (((b) & 0x1f) << 11)
|
|
|
|
#define ___PPC_RS(s) (((s) & 0x1f) << 21)
|
|
|
|
#define ___PPC_RT(t) ___PPC_RS(t)
|
2012-06-25 20:33:23 +07:00
|
|
|
#define __PPC_RA(a) ___PPC_RA(__REG_##a)
|
2012-06-25 20:33:24 +07:00
|
|
|
#define __PPC_RA0(a) ___PPC_RA(__REGA0_##a)
|
2012-06-25 20:33:23 +07:00
|
|
|
#define __PPC_RB(b) ___PPC_RB(__REG_##b)
|
|
|
|
#define __PPC_RS(s) ___PPC_RS(__REG_##s)
|
|
|
|
#define __PPC_RT(t) ___PPC_RT(__REG_##t)
|
powerpc: Emulate most Book I instructions in emulate_step()
This extends the emulate_step() function to handle a large proportion
of the Book I instructions implemented on current 64-bit server
processors. The aim is to handle all the load and store instructions
used in the kernel, plus all of the instructions that appear between
l[wd]arx and st[wd]cx., so this handles the Altivec/VMX lvx and stvx
and the VSX lxv2dx and stxv2dx instructions (implemented in POWER7).
The new code can emulate user mode instructions, and checks the
effective address for a load or store if the saved state is for
user mode. It doesn't handle little-endian mode at present.
For floating-point, Altivec/VMX and VSX instructions, it checks
that the saved MSR has the enable bit for the relevant facility
set, and if so, assumes that the FP/VMX/VSX registers contain
valid state, and does loads or stores directly to/from the
FP/VMX/VSX registers, using assembly helpers in ldstfp.S.
Instructions supported now include:
* Loads and stores, including some but not all VMX and VSX instructions,
and lmw/stmw
* Atomic loads and stores (l[dw]arx, st[dw]cx.)
* Arithmetic instructions (add, subtract, multiply, divide, etc.)
* Compare instructions
* Rotate and mask instructions
* Shift instructions
* Logical instructions (and, or, xor, etc.)
* Condition register logical instructions
* mtcrf, cntlz[wd], exts[bhw]
* isync, sync, lwsync, ptesync, eieio
* Cache operations (dcbf, dcbst, dcbt, dcbtst)
The overflow-checking arithmetic instructions are not included, but
they appear not to be ever used in C code.
This uses decimal values for the minor opcodes in the switch statements
because that is what appears in the Power ISA specification, thus it is
easier to check that they are correct if they are in decimal.
If this is used to single-step an instruction where a data breakpoint
interrupt occurred, then there is the possibility that the instruction
is a lwarx or ldarx. In that case we have to be careful not to lose the
reservation until we get to the matching st[wd]cx., or we'll never make
forward progress. One alternative is to try to arrange that we can
return from interrupts and handle data breakpoint interrupts without
losing the reservation, which means not using any spinlocks, mutexes,
or atomic ops (including bitops). That seems rather fragile. The
other alternative is to emulate the larx/stcx and all the instructions
in between. This is why this commit adds support for a wide range
of integer instructions.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2010-06-15 11:48:58 +07:00
|
|
|
#define __PPC_XA(a) ((((a) & 0x1f) << 16) | (((a) & 0x20) >> 3))
|
|
|
|
#define __PPC_XB(b) ((((b) & 0x1f) << 11) | (((b) & 0x20) >> 4))
|
2009-04-30 03:58:01 +07:00
|
|
|
#define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5))
|
powerpc: Emulate most Book I instructions in emulate_step()
This extends the emulate_step() function to handle a large proportion
of the Book I instructions implemented on current 64-bit server
processors. The aim is to handle all the load and store instructions
used in the kernel, plus all of the instructions that appear between
l[wd]arx and st[wd]cx., so this handles the Altivec/VMX lvx and stvx
and the VSX lxv2dx and stxv2dx instructions (implemented in POWER7).
The new code can emulate user mode instructions, and checks the
effective address for a load or store if the saved state is for
user mode. It doesn't handle little-endian mode at present.
For floating-point, Altivec/VMX and VSX instructions, it checks
that the saved MSR has the enable bit for the relevant facility
set, and if so, assumes that the FP/VMX/VSX registers contain
valid state, and does loads or stores directly to/from the
FP/VMX/VSX registers, using assembly helpers in ldstfp.S.
Instructions supported now include:
* Loads and stores, including some but not all VMX and VSX instructions,
and lmw/stmw
* Atomic loads and stores (l[dw]arx, st[dw]cx.)
* Arithmetic instructions (add, subtract, multiply, divide, etc.)
* Compare instructions
* Rotate and mask instructions
* Shift instructions
* Logical instructions (and, or, xor, etc.)
* Condition register logical instructions
* mtcrf, cntlz[wd], exts[bhw]
* isync, sync, lwsync, ptesync, eieio
* Cache operations (dcbf, dcbst, dcbt, dcbtst)
The overflow-checking arithmetic instructions are not included, but
they appear not to be ever used in C code.
This uses decimal values for the minor opcodes in the switch statements
because that is what appears in the Power ISA specification, thus it is
easier to check that they are correct if they are in decimal.
If this is used to single-step an instruction where a data breakpoint
interrupt occurred, then there is the possibility that the instruction
is a lwarx or ldarx. In that case we have to be careful not to lose the
reservation until we get to the matching st[wd]cx., or we'll never make
forward progress. One alternative is to try to arrange that we can
return from interrupts and handle data breakpoint interrupts without
losing the reservation, which means not using any spinlocks, mutexes,
or atomic ops (including bitops). That seems rather fragile. The
other alternative is to emulate the larx/stcx and all the instructions
in between. This is why this commit adds support for a wide range
of integer instructions.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2010-06-15 11:48:58 +07:00
|
|
|
#define __PPC_XT(s) __PPC_XS(s)
|
2009-04-30 03:58:01 +07:00
|
|
|
#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
|
|
|
|
#define __PPC_WC(w) (((w) & 0x3) << 21)
|
2011-04-15 05:31:56 +07:00
|
|
|
#define __PPC_WS(w) (((w) & 0x1f) << 11)
|
2011-07-20 22:51:00 +07:00
|
|
|
#define __PPC_SH(s) __PPC_WS(s)
|
|
|
|
#define __PPC_MB(s) (((s) & 0x1f) << 6)
|
|
|
|
#define __PPC_ME(s) (((s) & 0x1f) << 1)
|
|
|
|
#define __PPC_BI(s) (((s) & 0x1f) << 16)
|
2011-04-15 05:31:56 +07:00
|
|
|
|
2010-02-10 07:57:28 +07:00
|
|
|
/*
|
2010-03-11 12:33:25 +07:00
|
|
|
* Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
|
|
|
|
* larx with EH set as an illegal instruction.
|
2010-02-10 07:57:28 +07:00
|
|
|
*/
|
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
#define __PPC_EH(eh) (((eh) & 0x1) << 0)
|
|
|
|
#else
|
|
|
|
#define __PPC_EH(eh) 0
|
|
|
|
#endif
|
2009-02-11 03:10:44 +07:00
|
|
|
|
|
|
|
/* Deal with instructions that older assemblers aren't aware of */
|
|
|
|
#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
|
|
|
|
__PPC_RA(a) | __PPC_RB(b))
|
|
|
|
#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \
|
|
|
|
__PPC_RA(a) | __PPC_RB(b))
|
2010-02-10 08:02:36 +07:00
|
|
|
#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
|
2012-06-25 20:33:21 +07:00
|
|
|
___PPC_RT(t) | ___PPC_RA(a) | \
|
|
|
|
___PPC_RB(b) | __PPC_EH(eh))
|
2010-02-10 07:57:28 +07:00
|
|
|
#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
|
2012-06-25 20:33:21 +07:00
|
|
|
___PPC_RT(t) | ___PPC_RA(a) | \
|
|
|
|
___PPC_RB(b) | __PPC_EH(eh))
|
2009-02-11 03:10:44 +07:00
|
|
|
#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
|
2012-06-25 20:33:21 +07:00
|
|
|
___PPC_RB(b))
|
2010-12-08 02:58:17 +07:00
|
|
|
#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \
|
|
|
|
__PPC_RA(a) | __PPC_RS(s))
|
|
|
|
#define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_INST_POPCNTD | \
|
|
|
|
__PPC_RA(a) | __PPC_RS(s))
|
|
|
|
#define PPC_POPCNTW(a, s) stringify_in_c(.long PPC_INST_POPCNTW | \
|
|
|
|
__PPC_RA(a) | __PPC_RS(s))
|
2009-02-11 03:10:44 +07:00
|
|
|
#define PPC_RFCI stringify_in_c(.long PPC_INST_RFCI)
|
|
|
|
#define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI)
|
|
|
|
#define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI)
|
|
|
|
#define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_INST_TLBILX | \
|
2012-06-25 20:33:25 +07:00
|
|
|
__PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b))
|
2009-02-11 03:10:44 +07:00
|
|
|
#define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b)
|
|
|
|
#define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b)
|
|
|
|
#define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b)
|
|
|
|
#define PPC_WAIT(w) stringify_in_c(.long PPC_INST_WAIT | \
|
|
|
|
__PPC_WC(w))
|
2009-04-30 03:58:01 +07:00
|
|
|
#define PPC_TLBIE(lp,a) stringify_in_c(.long PPC_INST_TLBIE | \
|
2012-06-25 20:33:21 +07:00
|
|
|
___PPC_RB(a) | ___PPC_RS(lp))
|
2009-07-24 06:15:11 +07:00
|
|
|
#define PPC_TLBSRX_DOT(a,b) stringify_in_c(.long PPC_INST_TLBSRX_DOT | \
|
2012-06-25 20:33:25 +07:00
|
|
|
__PPC_RA0(a) | __PPC_RB(b))
|
2009-07-24 06:15:11 +07:00
|
|
|
#define PPC_TLBIVAX(a,b) stringify_in_c(.long PPC_INST_TLBIVAX | \
|
2012-06-25 20:33:25 +07:00
|
|
|
__PPC_RA0(a) | __PPC_RB(b))
|
2009-02-11 03:10:44 +07:00
|
|
|
|
2011-04-15 05:31:56 +07:00
|
|
|
#define PPC_ERATWE(s, a, w) stringify_in_c(.long PPC_INST_ERATWE | \
|
|
|
|
__PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
|
|
|
|
#define PPC_ERATRE(s, a, w) stringify_in_c(.long PPC_INST_ERATRE | \
|
|
|
|
__PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
|
|
|
|
#define PPC_ERATILX(t, a, b) stringify_in_c(.long PPC_INST_ERATILX | \
|
2012-06-25 20:33:25 +07:00
|
|
|
__PPC_T_TLB(t) | __PPC_RA0(a) | \
|
2011-04-15 05:31:56 +07:00
|
|
|
__PPC_RB(b))
|
|
|
|
#define PPC_ERATIVAX(s, a, b) stringify_in_c(.long PPC_INST_ERATIVAX | \
|
2012-06-25 20:33:25 +07:00
|
|
|
__PPC_RS(s) | __PPC_RA0(a) | __PPC_RB(b))
|
2011-04-15 05:31:56 +07:00
|
|
|
#define PPC_ERATSX(t, a, w) stringify_in_c(.long PPC_INST_ERATSX | \
|
2012-06-25 20:33:25 +07:00
|
|
|
__PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
|
2011-04-15 05:31:56 +07:00
|
|
|
#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \
|
2012-06-25 20:33:25 +07:00
|
|
|
__PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
|
KVM: PPC: Implement MMIO emulation support for Book3S HV guests
This provides the low-level support for MMIO emulation in Book3S HV
guests. When the guest tries to map a page which is not covered by
any memslot, that page is taken to be an MMIO emulation page. Instead
of inserting a valid HPTE, we insert an HPTE that has the valid bit
clear but another hypervisor software-use bit set, which we call
HPTE_V_ABSENT, to indicate that this is an absent page. An
absent page is treated much like a valid page as far as guest hcalls
(H_ENTER, H_REMOVE, H_READ etc.) are concerned, except of course that
an absent HPTE doesn't need to be invalidated with tlbie since it
was never valid as far as the hardware is concerned.
When the guest accesses a page for which there is an absent HPTE, it
will take a hypervisor data storage interrupt (HDSI) since we now set
the VPM1 bit in the LPCR. Our HDSI handler for HPTE-not-present faults
looks up the hash table and if it finds an absent HPTE mapping the
requested virtual address, will switch to kernel mode and handle the
fault in kvmppc_book3s_hv_page_fault(), which at present just calls
kvmppc_hv_emulate_mmio() to set up the MMIO emulation.
This is based on an earlier patch by Benjamin Herrenschmidt, but since
heavily reworked.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
2011-12-12 19:36:37 +07:00
|
|
|
#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
|
|
|
|
__PPC_RT(t) | __PPC_RB(b))
|
2012-06-25 20:33:13 +07:00
|
|
|
/* PASemi instructions */
|
|
|
|
#define LBZCIX(t,a,b) stringify_in_c(.long PPC_INST_LBZCIX | \
|
|
|
|
__PPC_RT(t) | __PPC_RA(a) | __PPC_RB(b))
|
|
|
|
#define STBCIX(s,a,b) stringify_in_c(.long PPC_INST_STBCIX | \
|
|
|
|
__PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b))
|
2011-04-15 05:31:56 +07:00
|
|
|
|
2009-04-30 03:58:01 +07:00
|
|
|
/*
|
|
|
|
* Define what the VSX XX1 form instructions will look like, then add
|
|
|
|
* the 128 bit load store instructions based on that.
|
|
|
|
*/
|
|
|
|
#define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b))
|
powerpc: Emulate most Book I instructions in emulate_step()
This extends the emulate_step() function to handle a large proportion
of the Book I instructions implemented on current 64-bit server
processors. The aim is to handle all the load and store instructions
used in the kernel, plus all of the instructions that appear between
l[wd]arx and st[wd]cx., so this handles the Altivec/VMX lvx and stvx
and the VSX lxv2dx and stxv2dx instructions (implemented in POWER7).
The new code can emulate user mode instructions, and checks the
effective address for a load or store if the saved state is for
user mode. It doesn't handle little-endian mode at present.
For floating-point, Altivec/VMX and VSX instructions, it checks
that the saved MSR has the enable bit for the relevant facility
set, and if so, assumes that the FP/VMX/VSX registers contain
valid state, and does loads or stores directly to/from the
FP/VMX/VSX registers, using assembly helpers in ldstfp.S.
Instructions supported now include:
* Loads and stores, including some but not all VMX and VSX instructions,
and lmw/stmw
* Atomic loads and stores (l[dw]arx, st[dw]cx.)
* Arithmetic instructions (add, subtract, multiply, divide, etc.)
* Compare instructions
* Rotate and mask instructions
* Shift instructions
* Logical instructions (and, or, xor, etc.)
* Condition register logical instructions
* mtcrf, cntlz[wd], exts[bhw]
* isync, sync, lwsync, ptesync, eieio
* Cache operations (dcbf, dcbst, dcbt, dcbtst)
The overflow-checking arithmetic instructions are not included, but
they appear not to be ever used in C code.
This uses decimal values for the minor opcodes in the switch statements
because that is what appears in the Power ISA specification, thus it is
easier to check that they are correct if they are in decimal.
If this is used to single-step an instruction where a data breakpoint
interrupt occurred, then there is the possibility that the instruction
is a lwarx or ldarx. In that case we have to be careful not to lose the
reservation until we get to the matching st[wd]cx., or we'll never make
forward progress. One alternative is to try to arrange that we can
return from interrupts and handle data breakpoint interrupts without
losing the reservation, which means not using any spinlocks, mutexes,
or atomic ops (including bitops). That seems rather fragile. The
other alternative is to emulate the larx/stcx and all the instructions
in between. This is why this commit adds support for a wide range
of integer instructions.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2010-06-15 11:48:58 +07:00
|
|
|
#define VSX_XX3(t, a, b) (__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b))
|
2009-04-30 03:58:01 +07:00
|
|
|
#define STXVD2X(s, a, b) stringify_in_c(.long PPC_INST_STXVD2X | \
|
2012-06-25 20:33:19 +07:00
|
|
|
VSX_XX1((s), a, b))
|
2009-04-30 03:58:01 +07:00
|
|
|
#define LXVD2X(s, a, b) stringify_in_c(.long PPC_INST_LXVD2X | \
|
2012-06-25 20:33:19 +07:00
|
|
|
VSX_XX1((s), a, b))
|
powerpc: Emulate most Book I instructions in emulate_step()
This extends the emulate_step() function to handle a large proportion
of the Book I instructions implemented on current 64-bit server
processors. The aim is to handle all the load and store instructions
used in the kernel, plus all of the instructions that appear between
l[wd]arx and st[wd]cx., so this handles the Altivec/VMX lvx and stvx
and the VSX lxv2dx and stxv2dx instructions (implemented in POWER7).
The new code can emulate user mode instructions, and checks the
effective address for a load or store if the saved state is for
user mode. It doesn't handle little-endian mode at present.
For floating-point, Altivec/VMX and VSX instructions, it checks
that the saved MSR has the enable bit for the relevant facility
set, and if so, assumes that the FP/VMX/VSX registers contain
valid state, and does loads or stores directly to/from the
FP/VMX/VSX registers, using assembly helpers in ldstfp.S.
Instructions supported now include:
* Loads and stores, including some but not all VMX and VSX instructions,
and lmw/stmw
* Atomic loads and stores (l[dw]arx, st[dw]cx.)
* Arithmetic instructions (add, subtract, multiply, divide, etc.)
* Compare instructions
* Rotate and mask instructions
* Shift instructions
* Logical instructions (and, or, xor, etc.)
* Condition register logical instructions
* mtcrf, cntlz[wd], exts[bhw]
* isync, sync, lwsync, ptesync, eieio
* Cache operations (dcbf, dcbst, dcbt, dcbtst)
The overflow-checking arithmetic instructions are not included, but
they appear not to be ever used in C code.
This uses decimal values for the minor opcodes in the switch statements
because that is what appears in the Power ISA specification, thus it is
easier to check that they are correct if they are in decimal.
If this is used to single-step an instruction where a data breakpoint
interrupt occurred, then there is the possibility that the instruction
is a lwarx or ldarx. In that case we have to be careful not to lose the
reservation until we get to the matching st[wd]cx., or we'll never make
forward progress. One alternative is to try to arrange that we can
return from interrupts and handle data breakpoint interrupts without
losing the reservation, which means not using any spinlocks, mutexes,
or atomic ops (including bitops). That seems rather fragile. The
other alternative is to emulate the larx/stcx and all the instructions
in between. This is why this commit adds support for a wide range
of integer instructions.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2010-06-15 11:48:58 +07:00
|
|
|
#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \
|
2012-06-25 20:33:19 +07:00
|
|
|
VSX_XX3((t), a, b))
|
2009-04-30 03:58:01 +07:00
|
|
|
|
2011-01-24 14:42:41 +07:00
|
|
|
#define PPC_NAP stringify_in_c(.long PPC_INST_NAP)
|
|
|
|
#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP)
|
|
|
|
|
2009-02-11 03:10:44 +07:00
|
|
|
#endif /* _ASM_POWERPC_PPC_OPCODE_H */
|