mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 09:16:41 +07:00
0016a4cf55
This extends the emulate_step() function to handle a large proportion of the Book I instructions implemented on current 64-bit server processors. The aim is to handle all the load and store instructions used in the kernel, plus all of the instructions that appear between l[wd]arx and st[wd]cx., so this handles the Altivec/VMX lvx and stvx and the VSX lxv2dx and stxv2dx instructions (implemented in POWER7). The new code can emulate user mode instructions, and checks the effective address for a load or store if the saved state is for user mode. It doesn't handle little-endian mode at present. For floating-point, Altivec/VMX and VSX instructions, it checks that the saved MSR has the enable bit for the relevant facility set, and if so, assumes that the FP/VMX/VSX registers contain valid state, and does loads or stores directly to/from the FP/VMX/VSX registers, using assembly helpers in ldstfp.S. Instructions supported now include: * Loads and stores, including some but not all VMX and VSX instructions, and lmw/stmw * Atomic loads and stores (l[dw]arx, st[dw]cx.) * Arithmetic instructions (add, subtract, multiply, divide, etc.) * Compare instructions * Rotate and mask instructions * Shift instructions * Logical instructions (and, or, xor, etc.) * Condition register logical instructions * mtcrf, cntlz[wd], exts[bhw] * isync, sync, lwsync, ptesync, eieio * Cache operations (dcbf, dcbst, dcbt, dcbtst) The overflow-checking arithmetic instructions are not included, but they appear not to be ever used in C code. This uses decimal values for the minor opcodes in the switch statements because that is what appears in the Power ISA specification, thus it is easier to check that they are correct if they are in decimal. If this is used to single-step an instruction where a data breakpoint interrupt occurred, then there is the possibility that the instruction is a lwarx or ldarx. In that case we have to be careful not to lose the reservation until we get to the matching st[wd]cx., or we'll never make forward progress. One alternative is to try to arrange that we can return from interrupts and handle data breakpoint interrupts without losing the reservation, which means not using any spinlocks, mutexes, or atomic ops (including bitops). That seems rather fragile. The other alternative is to emulate the larx/stcx and all the instructions in between. This is why this commit adds support for a wide range of integer instructions. Signed-off-by: Paul Mackerras <paulus@samba.org>
77 lines
2.1 KiB
C
77 lines
2.1 KiB
C
#ifndef _ASM_POWERPC_ASM_COMPAT_H
|
|
#define _ASM_POWERPC_ASM_COMPAT_H
|
|
|
|
#include <asm/types.h>
|
|
#include <asm/ppc-opcode.h>
|
|
|
|
#ifdef __ASSEMBLY__
|
|
# define stringify_in_c(...) __VA_ARGS__
|
|
# define ASM_CONST(x) x
|
|
#else
|
|
/* This version of stringify will deal with commas... */
|
|
# define __stringify_in_c(...) #__VA_ARGS__
|
|
# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
|
|
# define __ASM_CONST(x) x##UL
|
|
# define ASM_CONST(x) __ASM_CONST(x)
|
|
#endif
|
|
|
|
|
|
#ifdef __powerpc64__
|
|
|
|
/* operations for longs and pointers */
|
|
#define PPC_LL stringify_in_c(ld)
|
|
#define PPC_STL stringify_in_c(std)
|
|
#define PPC_STLU stringify_in_c(stdu)
|
|
#define PPC_LCMPI stringify_in_c(cmpdi)
|
|
#define PPC_LONG stringify_in_c(.llong)
|
|
#define PPC_LONG_ALIGN stringify_in_c(.balign 8)
|
|
#define PPC_TLNEI stringify_in_c(tdnei)
|
|
#define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh)
|
|
#define PPC_STLCX stringify_in_c(stdcx.)
|
|
#define PPC_CNTLZL stringify_in_c(cntlzd)
|
|
#define PPC_LR_STKOFF 16
|
|
#define PPC_MIN_STKFRM 112
|
|
|
|
/* Move to CR, single-entry optimized version. Only available
|
|
* on POWER4 and later.
|
|
*/
|
|
#ifdef CONFIG_POWER4_ONLY
|
|
#define PPC_MTOCRF stringify_in_c(mtocrf)
|
|
#else
|
|
#define PPC_MTOCRF stringify_in_c(mtcrf)
|
|
#endif
|
|
|
|
#else /* 32-bit */
|
|
|
|
/* operations for longs and pointers */
|
|
#define PPC_LL stringify_in_c(lwz)
|
|
#define PPC_STL stringify_in_c(stw)
|
|
#define PPC_STLU stringify_in_c(stwu)
|
|
#define PPC_LCMPI stringify_in_c(cmpwi)
|
|
#define PPC_LONG stringify_in_c(.long)
|
|
#define PPC_LONG_ALIGN stringify_in_c(.balign 4)
|
|
#define PPC_TLNEI stringify_in_c(twnei)
|
|
#define PPC_LLARX(t, a, b, eh) PPC_LWARX(t, a, b, eh)
|
|
#define PPC_STLCX stringify_in_c(stwcx.)
|
|
#define PPC_CNTLZL stringify_in_c(cntlzw)
|
|
#define PPC_MTOCRF stringify_in_c(mtcrf)
|
|
#define PPC_LR_STKOFF 4
|
|
#define PPC_MIN_STKFRM 16
|
|
|
|
#endif
|
|
|
|
#ifdef __KERNEL__
|
|
#ifdef CONFIG_IBM405_ERR77
|
|
/* Erratum #77 on the 405 means we need a sync or dcbt before every
|
|
* stwcx. The old ATOMIC_SYNC_FIX covered some but not all of this.
|
|
*/
|
|
#define PPC405_ERR77(ra,rb) stringify_in_c(dcbt ra, rb;)
|
|
#define PPC405_ERR77_SYNC stringify_in_c(sync;)
|
|
#else
|
|
#define PPC405_ERR77(ra,rb)
|
|
#define PPC405_ERR77_SYNC
|
|
#endif
|
|
#endif
|
|
|
|
#endif /* _ASM_POWERPC_ASM_COMPAT_H */
|