mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-16 04:46:51 +07:00
56fb4df6da
UltraSPARC has special sets of global registers which are switched to for certain trap types. There is one set for MMU related traps, one set of Interrupt Vector processing, and another set (called the Alternate globals) for all other trap types. For what seems like forever we've hard coded the values in some of these trap registers. Some examples include: 1) Interrupt Vector global %g6 holds current processors interrupt work struct where received interrupts are managed for IRQ handler dispatch. 2) MMU global %g7 holds the base of the page tables of the currently active address space. 3) Alternate global %g6 held the current_thread_info() value. Such hardcoding has resulted in some serious issues in many areas. There are some code sequences where having another register available would help clean up the implementation. Taking traps such as cross-calls from the OBP firmware requires some trick code sequences wherein we have to save away and restore all of the special sets of global registers when we enter/exit OBP. We were also using the IMMU TSB register on SMP to hold the per-cpu area base address, which doesn't work any longer now that we actually use the TSB facility of the cpu. The implementation is pretty straight forward. One tricky bit is getting the current processor ID as that is different on different cpu variants. We use a stub with a fancy calling convention which we patch at boot time. The calling convention is that the stub is branched to and the (PC - 4) to return to is in register %g1. The cpu number is left in %g6. This stub can be invoked by using the __GET_CPUID macro. We use an array of per-cpu trap state to store the current thread and physical address of the current address space's page tables. The TRAP_LOAD_THREAD_REG loads %g6 with the current thread from this table, it uses __GET_CPUID and also clobbers %g1. TRAP_LOAD_IRQ_WORK is used by the interrupt vector processing to load the current processor's IRQ software state into %g6. It also uses __GET_CPUID and clobbers %g1. Finally, TRAP_LOAD_PGD_PHYS loads the physical address base of the current address space's page tables into %g7, it clobbers %g1 and uses __GET_CPUID. Many refinements are possible, as well as some tuning, with this stuff in place. Signed-off-by: David S. Miller <davem@davemloft.net>
184 lines
4.0 KiB
ArmAsm
184 lines
4.0 KiB
ArmAsm
/* tsb.S: Sparc64 TSB table handling.
|
|
*
|
|
* Copyright (C) 2006 David S. Miller <davem@davemloft.net>
|
|
*/
|
|
|
|
#include <asm/tsb.h>
|
|
|
|
.text
|
|
.align 32
|
|
|
|
/* Invoked from TLB miss handler, we are in the
|
|
* MMU global registers and they are setup like
|
|
* this:
|
|
*
|
|
* %g1: TSB entry pointer
|
|
* %g2: available temporary
|
|
* %g3: FAULT_CODE_{D,I}TLB
|
|
* %g4: available temporary
|
|
* %g5: available temporary
|
|
* %g6: TAG TARGET
|
|
* %g7: physical address base of the linux page
|
|
* tables for the current address space
|
|
*/
|
|
.globl tsb_miss_dtlb
|
|
tsb_miss_dtlb:
|
|
mov TLB_TAG_ACCESS, %g4
|
|
ldxa [%g4] ASI_DMMU, %g4
|
|
ba,pt %xcc, tsb_miss_page_table_walk
|
|
nop
|
|
|
|
.globl tsb_miss_itlb
|
|
tsb_miss_itlb:
|
|
mov TLB_TAG_ACCESS, %g4
|
|
ldxa [%g4] ASI_IMMU, %g4
|
|
ba,pt %xcc, tsb_miss_page_table_walk
|
|
nop
|
|
|
|
tsb_miss_page_table_walk:
|
|
/* This clobbers %g1 and %g6, preserve them... */
|
|
mov %g1, %g5
|
|
mov %g6, %g2
|
|
|
|
TRAP_LOAD_PGD_PHYS
|
|
|
|
mov %g2, %g6
|
|
mov %g5, %g1
|
|
|
|
USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
|
|
|
|
tsb_reload:
|
|
TSB_LOCK_TAG(%g1, %g2, %g4)
|
|
|
|
/* Load and check PTE. */
|
|
ldxa [%g5] ASI_PHYS_USE_EC, %g5
|
|
brgez,a,pn %g5, tsb_do_fault
|
|
stx %g0, [%g1]
|
|
|
|
TSB_WRITE(%g1, %g5, %g6)
|
|
|
|
/* Finally, load TLB and return from trap. */
|
|
tsb_tlb_reload:
|
|
cmp %g3, FAULT_CODE_DTLB
|
|
bne,pn %xcc, tsb_itlb_load
|
|
nop
|
|
|
|
tsb_dtlb_load:
|
|
stxa %g5, [%g0] ASI_DTLB_DATA_IN
|
|
retry
|
|
|
|
tsb_itlb_load:
|
|
stxa %g5, [%g0] ASI_ITLB_DATA_IN
|
|
retry
|
|
|
|
/* No valid entry in the page tables, do full fault
|
|
* processing.
|
|
*/
|
|
|
|
.globl tsb_do_fault
|
|
tsb_do_fault:
|
|
cmp %g3, FAULT_CODE_DTLB
|
|
rdpr %pstate, %g5
|
|
bne,pn %xcc, tsb_do_itlb_fault
|
|
wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
|
|
|
|
tsb_do_dtlb_fault:
|
|
rdpr %tl, %g4
|
|
cmp %g4, 1
|
|
mov TLB_TAG_ACCESS, %g4
|
|
ldxa [%g4] ASI_DMMU, %g5
|
|
be,pt %xcc, sparc64_realfault_common
|
|
mov FAULT_CODE_DTLB, %g4
|
|
ba,pt %xcc, winfix_trampoline
|
|
nop
|
|
|
|
tsb_do_itlb_fault:
|
|
rdpr %tpc, %g5
|
|
ba,pt %xcc, sparc64_realfault_common
|
|
mov FAULT_CODE_ITLB, %g4
|
|
|
|
.globl sparc64_realfault_common
|
|
sparc64_realfault_common:
|
|
stb %g4, [%g6 + TI_FAULT_CODE] ! Save fault code
|
|
stx %g5, [%g6 + TI_FAULT_ADDR] ! Save fault address
|
|
ba,pt %xcc, etrap ! Save trap state
|
|
1: rd %pc, %g7 ! ...
|
|
call do_sparc64_fault ! Call fault handler
|
|
add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg
|
|
ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state
|
|
nop ! Delay slot (fill me)
|
|
|
|
.globl winfix_trampoline
|
|
winfix_trampoline:
|
|
rdpr %tpc, %g3 ! Prepare winfixup TNPC
|
|
or %g3, 0x7c, %g3 ! Compute branch offset
|
|
wrpr %g3, %tnpc ! Write it into TNPC
|
|
done ! Trap return
|
|
|
|
/* Reload MMU related context switch state at
|
|
* schedule() time.
|
|
*
|
|
* %o0: page table physical address
|
|
* %o1: TSB address
|
|
*/
|
|
.align 32
|
|
.globl tsb_context_switch
|
|
tsb_context_switch:
|
|
rdpr %pstate, %o5
|
|
wrpr %o5, PSTATE_IE, %pstate
|
|
|
|
ldub [%g6 + TI_CPU], %o3
|
|
sethi %hi(trap_block), %o4
|
|
sllx %o3, TRAP_BLOCK_SZ_SHIFT, %o3
|
|
or %o4, %lo(trap_block), %o4
|
|
add %o4, %o3, %o4
|
|
stx %o0, [%o4 + TRAP_PER_CPU_PGD_PADDR]
|
|
|
|
brgez %o1, 9f
|
|
nop
|
|
|
|
/* Lock TSB into D-TLB. */
|
|
sethi %hi(PAGE_SIZE), %o3
|
|
and %o3, %o1, %o3
|
|
sethi %hi(TSBMAP_BASE), %o2
|
|
add %o2, %o3, %o2
|
|
|
|
/* XXX handle PAGE_SIZE != 8K correctly... */
|
|
mov TSB_REG, %g1
|
|
stxa %o2, [%g1] ASI_DMMU
|
|
membar #Sync
|
|
|
|
stxa %o2, [%g1] ASI_IMMU
|
|
membar #Sync
|
|
|
|
#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZBITS)^0xfffff80000000000)
|
|
#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W | _PAGE_L)
|
|
sethi %uhi(KERN_HIGHBITS), %g2
|
|
or %g2, %ulo(KERN_HIGHBITS), %g2
|
|
sllx %g2, 32, %g2
|
|
or %g2, KERN_LOWBITS, %g2
|
|
#undef KERN_HIGHBITS
|
|
#undef KERN_LOWBITS
|
|
|
|
xor %o1, %g2, %o1
|
|
|
|
/* We use entry 61 for this locked entry. This is the spitfire
|
|
* TLB entry number, and luckily cheetah masks the value with
|
|
* 15 ending us up with entry 13 which is what we want in that
|
|
* case too.
|
|
*
|
|
* XXX Interactions with prom_world()...
|
|
*/
|
|
mov TLB_TAG_ACCESS, %g1
|
|
stxa %o2, [%g1] ASI_DMMU
|
|
membar #Sync
|
|
mov (61 << 3), %g1
|
|
stxa %o1, [%g1] ASI_DTLB_DATA_ACCESS
|
|
membar #Sync
|
|
|
|
9:
|
|
wrpr %o5, %pstate
|
|
|
|
retl
|
|
mov %o2, %o0
|