2008-01-07 08:11:57 +07:00
|
|
|
/*
|
|
|
|
* File: mca_asm.S
|
|
|
|
* Purpose: assembly portion of the IA64 MCA handling
|
|
|
|
*
|
|
|
|
* Mods by cfleck to integrate into kernel build
|
|
|
|
*
|
|
|
|
* 2000-03-15 David Mosberger-Tang <davidm@hpl.hp.com>
|
|
|
|
* Added various stop bits to get a clean compile
|
|
|
|
*
|
|
|
|
* 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com>
|
|
|
|
* Added code to save INIT handoff state in pt_regs format,
|
|
|
|
* switch to temp kstack, switch modes, jump to C INIT handler
|
|
|
|
*
|
|
|
|
* 2002-01-04 J.Hall <jenna.s.hall@intel.com>
|
|
|
|
* Before entering virtual mode code:
|
|
|
|
* 1. Check for TLB CPU error
|
|
|
|
* 2. Restore current thread pointer to kr6
|
|
|
|
* 3. Move stack ptr 16 bytes to conform to C calling convention
|
|
|
|
*
|
|
|
|
* 2004-11-12 Russ Anderson <rja@sgi.com>
|
|
|
|
* Added per cpu MCA/INIT stack save areas.
|
|
|
|
*
|
|
|
|
* 2005-12-08 Keith Owens <kaos@sgi.com>
|
|
|
|
* Use per cpu MCA/INIT stacks for all data.
|
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/threads.h>
|
|
|
|
|
|
|
|
#include <asm/asmmacro.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/mca_asm.h>
|
|
|
|
#include <asm/mca.h>
|
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
#include "entry.h"
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#define GET_IA64_MCA_DATA(reg) \
|
|
|
|
GET_THIS_PADDR(reg, ia64_mca_data) \
|
|
|
|
;; \
|
|
|
|
ld8 reg=[reg]
|
|
|
|
|
2005-04-23 04:44:40 +07:00
|
|
|
.global ia64_do_tlb_purge
|
2005-09-11 14:22:53 +07:00
|
|
|
.global ia64_os_mca_dispatch
|
[IA64] kexec: Make INIT safe while transition to
kdump/kexec kernel
Summary:
Asserting INIT on the beginning of kdump/kexec kernel will result
in unexpected behavior because INIT handler for previous kernel is
invoked on new kernel.
Description:
In panic situation, we can receive INIT while kernel transition,
i.e. from beginning of panic to bootstrap of kdump kernel.
Since we initialize registers on leave from current kernel, no
longer monarch/slave handlers of current kernel in virtual mode are
called safely. (In fact system goes hang as far as I confirmed)
How to Reproduce:
Start kdump
# echo c > /proc/sysrq-trigger
Then assert INIT while kdump kernel is booting, before new INIT
handler for kdump kernel is registered.
Expected(Desirable) result:
kdump kernel boots without any problem, crashdump retrieved
Actual result:
INIT handler for previous kernel is invoked on kdump kernel
=> panic, hang etc. (unexpected)
Proposed fix:
We can unregister these init handlers from SAL before jumping into
new kernel, however then the INIT will fallback to default behavior,
result in warmboot by SAL (according to the SAL specification) and
we cannot retrieve the crashdump.
Therefore this patch introduces a NOP init handler and register it
to SAL before leave from current kernel, to start kdump safely by
preventing INITs from entering virtual mode and resulting in warmboot.
On the other hand, in case of kexec that not for kdump, it also
has same problem with INIT while kernel transition.
This patch handles this case differently, because for kexec
unregistering handlers will be preferred than registering NOP
handler, since the situation "no handlers registered" is usual
state for kernel's entry.
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Haren Myneni <hbabu@us.ibm.com>
Cc: kexec@lists.infradead.org
Acked-by: Fenghua Yu <fenghua.yu@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
2009-08-07 04:51:56 +07:00
|
|
|
.global ia64_os_init_on_kdump
|
2005-09-11 14:22:53 +07:00
|
|
|
.global ia64_os_init_dispatch_monarch
|
|
|
|
.global ia64_os_init_dispatch_slave
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
.text
|
|
|
|
.align 16
|
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
//StartMain////////////////////////////////////////////////////////////////////
|
|
|
|
|
2005-04-23 04:44:40 +07:00
|
|
|
/*
|
|
|
|
* Just the TLB purge part is moved to a separate function
|
|
|
|
* so we can re-use the code for cpu hotplug code as well
|
|
|
|
* Caller should now setup b1, so we can branch once the
|
|
|
|
* tlb flush is complete.
|
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-04-23 04:44:40 +07:00
|
|
|
ia64_do_tlb_purge:
|
2005-04-17 05:20:36 +07:00
|
|
|
#define O(member) IA64_CPUINFO_##member##_OFFSET
|
|
|
|
|
2009-10-29 20:34:14 +07:00
|
|
|
GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
|
2005-04-17 05:20:36 +07:00
|
|
|
;;
|
|
|
|
addl r17=O(PTCE_STRIDE),r2
|
|
|
|
addl r2=O(PTCE_BASE),r2
|
|
|
|
;;
|
|
|
|
ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
|
|
|
|
ld4 r19=[r2],4 // r19=ptce_count[0]
|
|
|
|
ld4 r21=[r17],4 // r21=ptce_stride[0]
|
|
|
|
;;
|
|
|
|
ld4 r20=[r2] // r20=ptce_count[1]
|
|
|
|
ld4 r22=[r17] // r22=ptce_stride[1]
|
|
|
|
mov r24=0
|
|
|
|
;;
|
|
|
|
adds r20=-1,r20
|
|
|
|
;;
|
|
|
|
#undef O
|
|
|
|
|
|
|
|
2:
|
|
|
|
cmp.ltu p6,p7=r24,r19
|
|
|
|
(p7) br.cond.dpnt.few 4f
|
|
|
|
mov ar.lc=r20
|
|
|
|
3:
|
|
|
|
ptc.e r18
|
|
|
|
;;
|
|
|
|
add r18=r22,r18
|
|
|
|
br.cloop.sptk.few 3b
|
|
|
|
;;
|
|
|
|
add r18=r21,r18
|
|
|
|
add r24=1,r24
|
|
|
|
;;
|
|
|
|
br.sptk.few 2b
|
|
|
|
4:
|
|
|
|
srlz.i // srlz.i implies srlz.d
|
|
|
|
;;
|
|
|
|
|
|
|
|
// Now purge addresses formerly mapped by TR registers
|
|
|
|
// 1. Purge ITR&DTR for kernel.
|
|
|
|
movl r16=KERNEL_START
|
|
|
|
mov r18=KERNEL_TR_PAGE_SHIFT<<2
|
|
|
|
;;
|
|
|
|
ptr.i r16, r18
|
|
|
|
ptr.d r16, r18
|
|
|
|
;;
|
|
|
|
srlz.i
|
|
|
|
;;
|
|
|
|
srlz.d
|
|
|
|
;;
|
|
|
|
// 3. Purge ITR for PAL code.
|
|
|
|
GET_THIS_PADDR(r2, ia64_mca_pal_base)
|
|
|
|
;;
|
|
|
|
ld8 r16=[r2]
|
|
|
|
mov r18=IA64_GRANULE_SHIFT<<2
|
|
|
|
;;
|
|
|
|
ptr.i r16,r18
|
|
|
|
;;
|
|
|
|
srlz.i
|
|
|
|
;;
|
|
|
|
// 4. Purge DTR for stack.
|
|
|
|
mov r16=IA64_KR(CURRENT_STACK)
|
|
|
|
;;
|
|
|
|
shl r16=r16,IA64_GRANULE_SHIFT
|
|
|
|
movl r19=PAGE_OFFSET
|
|
|
|
;;
|
|
|
|
add r16=r19,r16
|
|
|
|
mov r18=IA64_GRANULE_SHIFT<<2
|
|
|
|
;;
|
|
|
|
ptr.d r16,r18
|
|
|
|
;;
|
|
|
|
srlz.i
|
|
|
|
;;
|
2005-04-23 04:44:40 +07:00
|
|
|
// Now branch away to caller.
|
|
|
|
br.sptk.many b1
|
|
|
|
;;
|
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
//EndMain//////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
//StartMain////////////////////////////////////////////////////////////////////
|
2005-04-23 04:44:40 +07:00
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
ia64_os_mca_dispatch:
|
|
|
|
mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
|
|
|
|
LOAD_PHYSICAL(p0,r2,1f) // return address
|
|
|
|
mov r19=1 // All MCA events are treated as monarch (for now)
|
|
|
|
br.sptk ia64_state_save // save the state that is not in minstate
|
|
|
|
1:
|
2005-04-23 04:44:40 +07:00
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
GET_IA64_MCA_DATA(r2)
|
|
|
|
// Using MCA stack, struct ia64_sal_os_state, variable proc_state_param
|
|
|
|
;;
|
2006-04-07 15:08:11 +07:00
|
|
|
add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+SOS(PROC_STATE_PARAM), r2
|
2005-04-23 04:44:40 +07:00
|
|
|
;;
|
2005-09-11 14:22:53 +07:00
|
|
|
ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK.
|
2005-04-23 04:44:40 +07:00
|
|
|
;;
|
|
|
|
tbit.nz p6,p7=r18,60
|
|
|
|
(p7) br.spnt done_tlb_purge_and_reload
|
|
|
|
|
|
|
|
// The following code purges TC and TR entries. Then reload all TC entries.
|
|
|
|
// Purge percpu data TC entries.
|
|
|
|
begin_tlb_purge_and_reload:
|
|
|
|
movl r18=ia64_reload_tr;;
|
|
|
|
LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
|
|
|
|
mov b1=r18;;
|
|
|
|
br.sptk.many ia64_do_tlb_purge;;
|
|
|
|
|
|
|
|
ia64_reload_tr:
|
2005-04-17 05:20:36 +07:00
|
|
|
// Finally reload the TR registers.
|
|
|
|
// 1. Reload DTR/ITR registers for kernel.
|
|
|
|
mov r18=KERNEL_TR_PAGE_SHIFT<<2
|
|
|
|
movl r17=KERNEL_START
|
|
|
|
;;
|
|
|
|
mov cr.itir=r18
|
|
|
|
mov cr.ifa=r17
|
|
|
|
mov r16=IA64_TR_KERNEL
|
|
|
|
mov r19=ip
|
|
|
|
movl r18=PAGE_KERNEL
|
|
|
|
;;
|
|
|
|
dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
|
|
|
|
;;
|
|
|
|
or r18=r17,r18
|
|
|
|
;;
|
|
|
|
itr.i itr[r16]=r18
|
|
|
|
;;
|
|
|
|
itr.d dtr[r16]=r18
|
|
|
|
;;
|
|
|
|
srlz.i
|
|
|
|
srlz.d
|
|
|
|
;;
|
|
|
|
// 3. Reload ITR for PAL code.
|
|
|
|
GET_THIS_PADDR(r2, ia64_mca_pal_pte)
|
|
|
|
;;
|
|
|
|
ld8 r18=[r2] // load PAL PTE
|
|
|
|
;;
|
|
|
|
GET_THIS_PADDR(r2, ia64_mca_pal_base)
|
|
|
|
;;
|
|
|
|
ld8 r16=[r2] // load PAL vaddr
|
|
|
|
mov r19=IA64_GRANULE_SHIFT<<2
|
|
|
|
;;
|
|
|
|
mov cr.itir=r19
|
|
|
|
mov cr.ifa=r16
|
|
|
|
mov r20=IA64_TR_PALCODE
|
|
|
|
;;
|
|
|
|
itr.i itr[r20]=r18
|
|
|
|
;;
|
|
|
|
srlz.i
|
|
|
|
;;
|
|
|
|
// 4. Reload DTR for stack.
|
|
|
|
mov r16=IA64_KR(CURRENT_STACK)
|
|
|
|
;;
|
|
|
|
shl r16=r16,IA64_GRANULE_SHIFT
|
|
|
|
movl r19=PAGE_OFFSET
|
|
|
|
;;
|
|
|
|
add r18=r19,r16
|
|
|
|
movl r20=PAGE_KERNEL
|
|
|
|
;;
|
|
|
|
add r16=r20,r16
|
|
|
|
mov r19=IA64_GRANULE_SHIFT<<2
|
|
|
|
;;
|
|
|
|
mov cr.itir=r19
|
|
|
|
mov cr.ifa=r18
|
|
|
|
mov r20=IA64_TR_CURRENT_STACK
|
|
|
|
;;
|
|
|
|
itr.d dtr[r20]=r16
|
2008-04-04 01:02:58 +07:00
|
|
|
GET_THIS_PADDR(r2, ia64_mca_tr_reload)
|
|
|
|
mov r18 = 1
|
2005-04-17 05:20:36 +07:00
|
|
|
;;
|
|
|
|
srlz.d
|
2008-04-04 01:02:58 +07:00
|
|
|
;;
|
|
|
|
st8 [r2] =r18
|
|
|
|
;;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
done_tlb_purge_and_reload:
|
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
// switch to per cpu MCA stack
|
|
|
|
mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
|
|
|
|
LOAD_PHYSICAL(p0,r2,1f) // return address
|
|
|
|
br.sptk ia64_new_stack
|
|
|
|
1:
|
|
|
|
|
|
|
|
// everything saved, now we can set the kernel registers
|
|
|
|
mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
|
|
|
|
LOAD_PHYSICAL(p0,r2,1f) // return address
|
|
|
|
br.sptk ia64_set_kernel_registers
|
|
|
|
1:
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
// This must be done in physical mode
|
2005-04-17 05:20:36 +07:00
|
|
|
GET_IA64_MCA_DATA(r2)
|
|
|
|
;;
|
2005-09-11 14:22:53 +07:00
|
|
|
mov r7=r2
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
// Enter virtual mode from physical mode
|
|
|
|
VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
|
2005-09-11 14:22:53 +07:00
|
|
|
|
|
|
|
// This code returns to SAL via SOS r2, in general SAL has no unwind
|
|
|
|
// data. To get a clean termination when backtracing the C MCA/INIT
|
|
|
|
// handler, set a dummy return address of 0 in this routine. That
|
|
|
|
// requires that ia64_os_mca_virtual_begin be a global function.
|
|
|
|
ENTRY(ia64_os_mca_virtual_begin)
|
|
|
|
.prologue
|
|
|
|
.save rp,r0
|
|
|
|
.body
|
|
|
|
|
|
|
|
mov ar.rsc=3 // set eager mode for C handler
|
|
|
|
mov r2=r7 // see GET_IA64_MCA_DATA above
|
|
|
|
;;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
// Call virtual mode handler
|
2005-09-11 14:22:53 +07:00
|
|
|
alloc r14=ar.pfs,0,0,3,0
|
|
|
|
;;
|
|
|
|
DATA_PA_TO_VA(r2,r7)
|
|
|
|
;;
|
|
|
|
add out0=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2
|
|
|
|
add out1=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2
|
|
|
|
add out2=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET, r2
|
|
|
|
br.call.sptk.many b0=ia64_mca_handler
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
// Revert back to physical mode before going back to SAL
|
|
|
|
PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
|
|
|
|
ia64_os_mca_virtual_end:
|
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
END(ia64_os_mca_virtual_begin)
|
|
|
|
|
|
|
|
// switch back to previous stack
|
|
|
|
alloc r14=ar.pfs,0,0,0,0 // remove the MCA handler frame
|
|
|
|
mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
|
|
|
|
LOAD_PHYSICAL(p0,r2,1f) // return address
|
|
|
|
br.sptk ia64_old_stack
|
|
|
|
1:
|
|
|
|
|
|
|
|
mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
|
|
|
|
LOAD_PHYSICAL(p0,r2,1f) // return address
|
|
|
|
br.sptk ia64_state_restore // restore the SAL state
|
|
|
|
1:
|
|
|
|
|
|
|
|
mov b0=r12 // SAL_CHECK return address
|
|
|
|
|
|
|
|
br b0
|
|
|
|
|
|
|
|
//EndMain//////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
//StartMain////////////////////////////////////////////////////////////////////
|
|
|
|
|
[IA64] kexec: Make INIT safe while transition to
kdump/kexec kernel
Summary:
Asserting INIT on the beginning of kdump/kexec kernel will result
in unexpected behavior because INIT handler for previous kernel is
invoked on new kernel.
Description:
In panic situation, we can receive INIT while kernel transition,
i.e. from beginning of panic to bootstrap of kdump kernel.
Since we initialize registers on leave from current kernel, no
longer monarch/slave handlers of current kernel in virtual mode are
called safely. (In fact system goes hang as far as I confirmed)
How to Reproduce:
Start kdump
# echo c > /proc/sysrq-trigger
Then assert INIT while kdump kernel is booting, before new INIT
handler for kdump kernel is registered.
Expected(Desirable) result:
kdump kernel boots without any problem, crashdump retrieved
Actual result:
INIT handler for previous kernel is invoked on kdump kernel
=> panic, hang etc. (unexpected)
Proposed fix:
We can unregister these init handlers from SAL before jumping into
new kernel, however then the INIT will fallback to default behavior,
result in warmboot by SAL (according to the SAL specification) and
we cannot retrieve the crashdump.
Therefore this patch introduces a NOP init handler and register it
to SAL before leave from current kernel, to start kdump safely by
preventing INITs from entering virtual mode and resulting in warmboot.
On the other hand, in case of kexec that not for kdump, it also
has same problem with INIT while kernel transition.
This patch handles this case differently, because for kexec
unregistering handlers will be preferred than registering NOP
handler, since the situation "no handlers registered" is usual
state for kernel's entry.
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Haren Myneni <hbabu@us.ibm.com>
Cc: kexec@lists.infradead.org
Acked-by: Fenghua Yu <fenghua.yu@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
2009-08-07 04:51:56 +07:00
|
|
|
//
|
|
|
|
// NOP init handler for kdump. In panic situation, we may receive INIT
|
|
|
|
// while kernel transition. Since we initialize registers on leave from
|
|
|
|
// current kernel, no longer monarch/slave handlers of current kernel in
|
|
|
|
// virtual mode are called safely.
|
|
|
|
// We can unregister these init handlers from SAL, however then the INIT
|
|
|
|
// will result in warmboot by SAL and we cannot retrieve the crashdump.
|
|
|
|
// Therefore register this NOP function to SAL, to prevent entering virtual
|
|
|
|
// mode and resulting warmboot by SAL.
|
|
|
|
//
|
|
|
|
ia64_os_init_on_kdump:
|
|
|
|
mov r8=r0 // IA64_INIT_RESUME
|
|
|
|
mov r9=r10 // SAL_GP
|
|
|
|
mov r22=r17 // *minstate
|
|
|
|
;;
|
|
|
|
mov r10=r0 // return to same context
|
|
|
|
mov b0=r12 // SAL_CHECK return address
|
|
|
|
br b0
|
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
//
|
|
|
|
// SAL to OS entry point for INIT on all processors. This has been defined for
|
|
|
|
// registration purposes with SAL as a part of ia64_mca_init. Monarch and
|
|
|
|
// slave INIT have identical processing, except for the value of the
|
|
|
|
// sos->monarch flag in r19.
|
|
|
|
//
|
|
|
|
|
|
|
|
ia64_os_init_dispatch_monarch:
|
|
|
|
mov r19=1 // Bow, bow, ye lower middle classes!
|
|
|
|
br.sptk ia64_os_init_dispatch
|
|
|
|
|
|
|
|
ia64_os_init_dispatch_slave:
|
|
|
|
mov r19=0 // <igor>yeth, mathter</igor>
|
|
|
|
|
|
|
|
ia64_os_init_dispatch:
|
|
|
|
|
|
|
|
mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
|
|
|
|
LOAD_PHYSICAL(p0,r2,1f) // return address
|
|
|
|
br.sptk ia64_state_save // save the state that is not in minstate
|
|
|
|
1:
|
|
|
|
|
|
|
|
// switch to per cpu INIT stack
|
|
|
|
mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
|
|
|
|
LOAD_PHYSICAL(p0,r2,1f) // return address
|
|
|
|
br.sptk ia64_new_stack
|
|
|
|
1:
|
|
|
|
|
|
|
|
// everything saved, now we can set the kernel registers
|
|
|
|
mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
|
|
|
|
LOAD_PHYSICAL(p0,r2,1f) // return address
|
|
|
|
br.sptk ia64_set_kernel_registers
|
|
|
|
1:
|
|
|
|
|
|
|
|
// This must be done in physical mode
|
2005-04-17 05:20:36 +07:00
|
|
|
GET_IA64_MCA_DATA(r2)
|
|
|
|
;;
|
2005-09-11 14:22:53 +07:00
|
|
|
mov r7=r2
|
|
|
|
|
|
|
|
// Enter virtual mode from physical mode
|
|
|
|
VIRTUAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_begin, r4)
|
|
|
|
|
|
|
|
// This code returns to SAL via SOS r2, in general SAL has no unwind
|
|
|
|
// data. To get a clean termination when backtracing the C MCA/INIT
|
|
|
|
// handler, set a dummy return address of 0 in this routine. That
|
|
|
|
// requires that ia64_os_init_virtual_begin be a global function.
|
|
|
|
ENTRY(ia64_os_init_virtual_begin)
|
|
|
|
.prologue
|
|
|
|
.save rp,r0
|
|
|
|
.body
|
|
|
|
|
|
|
|
mov ar.rsc=3 // set eager mode for C handler
|
|
|
|
mov r2=r7 // see GET_IA64_MCA_DATA above
|
2005-04-17 05:20:36 +07:00
|
|
|
;;
|
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
// Call virtual mode handler
|
|
|
|
alloc r14=ar.pfs,0,0,3,0
|
|
|
|
;;
|
|
|
|
DATA_PA_TO_VA(r2,r7)
|
2005-04-17 05:20:36 +07:00
|
|
|
;;
|
2005-09-11 14:22:53 +07:00
|
|
|
add out0=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2
|
|
|
|
add out1=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2
|
|
|
|
add out2=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SOS_OFFSET, r2
|
|
|
|
br.call.sptk.many b0=ia64_init_handler
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
// Revert back to physical mode before going back to SAL
|
|
|
|
PHYSICAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_end, r4)
|
|
|
|
ia64_os_init_virtual_end:
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
END(ia64_os_init_virtual_begin)
|
|
|
|
|
|
|
|
mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
|
|
|
|
LOAD_PHYSICAL(p0,r2,1f) // return address
|
|
|
|
br.sptk ia64_state_restore // restore the SAL state
|
|
|
|
1:
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
// switch back to previous stack
|
|
|
|
alloc r14=ar.pfs,0,0,0,0 // remove the INIT handler frame
|
|
|
|
mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
|
|
|
|
LOAD_PHYSICAL(p0,r2,1f) // return address
|
|
|
|
br.sptk ia64_old_stack
|
|
|
|
1:
|
|
|
|
|
|
|
|
mov b0=r12 // SAL_CHECK return address
|
2005-04-17 05:20:36 +07:00
|
|
|
br b0
|
2005-09-11 14:22:53 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
//EndMain//////////////////////////////////////////////////////////////////////
|
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
// common defines for the stubs
|
|
|
|
#define ms r4
|
|
|
|
#define regs r5
|
|
|
|
#define temp1 r2 /* careful, it overlaps with input registers */
|
|
|
|
#define temp2 r3 /* careful, it overlaps with input registers */
|
|
|
|
#define temp3 r7
|
|
|
|
#define temp4 r14
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
//++
|
|
|
|
// Name:
|
2005-09-11 14:22:53 +07:00
|
|
|
// ia64_state_save()
|
2005-04-17 05:20:36 +07:00
|
|
|
//
|
|
|
|
// Stub Description:
|
|
|
|
//
|
2005-09-11 14:22:53 +07:00
|
|
|
// Save the state that is not in minstate. This is sensitive to the layout of
|
|
|
|
// struct ia64_sal_os_state in mca.h.
|
|
|
|
//
|
|
|
|
// r2 contains the return address, r3 contains either
|
|
|
|
// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
|
|
|
|
//
|
|
|
|
// The OS to SAL section of struct ia64_sal_os_state is set to a default
|
|
|
|
// value of cold boot (MCA) or warm boot (INIT) and return to the same
|
|
|
|
// context. ia64_sal_os_state is also used to hold some registers that
|
|
|
|
// need to be saved and restored across the stack switches.
|
|
|
|
//
|
|
|
|
// Most input registers to this stub come from PAL/SAL
|
|
|
|
// r1 os gp, physical
|
|
|
|
// r8 pal_proc entry point
|
|
|
|
// r9 sal_proc entry point
|
|
|
|
// r10 sal gp
|
|
|
|
// r11 MCA - rendevzous state, INIT - reason code
|
|
|
|
// r12 sal return address
|
|
|
|
// r17 pal min_state
|
|
|
|
// r18 processor state parameter
|
|
|
|
// r19 monarch flag, set by the caller of this routine
|
|
|
|
//
|
|
|
|
// In addition to the SAL to OS state, this routine saves all the
|
|
|
|
// registers that appear in struct pt_regs and struct switch_stack,
|
|
|
|
// excluding those that are already in the PAL minstate area. This
|
|
|
|
// results in a partial pt_regs and switch_stack, the C code copies the
|
|
|
|
// remaining registers from PAL minstate to pt_regs and switch_stack. The
|
|
|
|
// resulting structures contain all the state of the original process when
|
|
|
|
// MCA/INIT occurred.
|
2005-04-17 05:20:36 +07:00
|
|
|
//
|
|
|
|
//--
|
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
ia64_state_save:
|
|
|
|
add regs=MCA_SOS_OFFSET, r3
|
|
|
|
add ms=MCA_SOS_OFFSET+8, r3
|
|
|
|
mov b0=r2 // save return address
|
|
|
|
cmp.eq p1,p2=IA64_MCA_CPU_MCA_STACK_OFFSET, r3
|
|
|
|
;;
|
|
|
|
GET_IA64_MCA_DATA(temp2)
|
|
|
|
;;
|
|
|
|
add temp1=temp2, regs // struct ia64_sal_os_state on MCA or INIT stack
|
|
|
|
add temp2=temp2, ms // struct ia64_sal_os_state+8 on MCA or INIT stack
|
|
|
|
;;
|
|
|
|
mov regs=temp1 // save the start of sos
|
|
|
|
st8 [temp1]=r1,16 // os_gp
|
|
|
|
st8 [temp2]=r8,16 // pal_proc
|
|
|
|
;;
|
|
|
|
st8 [temp1]=r9,16 // sal_proc
|
|
|
|
st8 [temp2]=r11,16 // rv_rc
|
|
|
|
mov r11=cr.iipa
|
|
|
|
;;
|
2006-04-07 15:08:11 +07:00
|
|
|
st8 [temp1]=r18 // proc_state_param
|
|
|
|
st8 [temp2]=r19 // monarch
|
2005-09-11 14:22:53 +07:00
|
|
|
mov r6=IA64_KR(CURRENT)
|
2006-04-07 15:08:11 +07:00
|
|
|
add temp1=SOS(SAL_RA), regs
|
|
|
|
add temp2=SOS(SAL_GP), regs
|
2005-09-11 14:22:53 +07:00
|
|
|
;;
|
|
|
|
st8 [temp1]=r12,16 // sal_ra
|
|
|
|
st8 [temp2]=r10,16 // sal_gp
|
|
|
|
mov r12=cr.isr
|
|
|
|
;;
|
|
|
|
st8 [temp1]=r17,16 // pal_min_state
|
|
|
|
st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT
|
2005-09-22 15:49:15 +07:00
|
|
|
mov r6=IA64_KR(CURRENT_STACK)
|
|
|
|
;;
|
|
|
|
st8 [temp1]=r6,16 // prev_IA64_KR_CURRENT_STACK
|
|
|
|
st8 [temp2]=r0,16 // prev_task, starts off as NULL
|
2005-09-11 14:22:53 +07:00
|
|
|
mov r6=cr.ifa
|
|
|
|
;;
|
2005-09-22 15:49:15 +07:00
|
|
|
st8 [temp1]=r12,16 // cr.isr
|
|
|
|
st8 [temp2]=r6,16 // cr.ifa
|
2005-09-11 14:22:53 +07:00
|
|
|
mov r12=cr.itir
|
|
|
|
;;
|
2005-09-22 15:49:15 +07:00
|
|
|
st8 [temp1]=r12,16 // cr.itir
|
|
|
|
st8 [temp2]=r11,16 // cr.iipa
|
2005-09-11 14:22:53 +07:00
|
|
|
mov r12=cr.iim
|
|
|
|
;;
|
2006-04-07 15:08:11 +07:00
|
|
|
st8 [temp1]=r12 // cr.iim
|
2005-09-11 14:22:53 +07:00
|
|
|
(p1) mov r12=IA64_MCA_COLD_BOOT
|
|
|
|
(p2) mov r12=IA64_INIT_WARM_BOOT
|
2005-09-22 15:49:15 +07:00
|
|
|
mov r6=cr.iha
|
2006-04-07 15:08:11 +07:00
|
|
|
add temp1=SOS(OS_STATUS), regs
|
2005-09-11 14:22:53 +07:00
|
|
|
;;
|
2006-04-07 15:08:11 +07:00
|
|
|
st8 [temp2]=r6 // cr.iha
|
|
|
|
add temp2=SOS(CONTEXT), regs
|
2005-09-22 15:49:15 +07:00
|
|
|
st8 [temp1]=r12 // os_status, default is cold boot
|
2005-09-11 14:22:53 +07:00
|
|
|
mov r6=IA64_MCA_SAME_CONTEXT
|
|
|
|
;;
|
2006-01-24 08:31:26 +07:00
|
|
|
st8 [temp2]=r6 // context, default is same context
|
2005-09-11 14:22:53 +07:00
|
|
|
|
|
|
|
// Save the pt_regs data that is not in minstate. The previous code
|
|
|
|
// left regs at sos.
|
|
|
|
add regs=MCA_PT_REGS_OFFSET-MCA_SOS_OFFSET, regs
|
|
|
|
;;
|
|
|
|
add temp1=PT(B6), regs
|
|
|
|
mov temp3=b6
|
|
|
|
mov temp4=b7
|
|
|
|
add temp2=PT(B7), regs
|
|
|
|
;;
|
|
|
|
st8 [temp1]=temp3,PT(AR_CSD)-PT(B6) // save b6
|
|
|
|
st8 [temp2]=temp4,PT(AR_SSD)-PT(B7) // save b7
|
|
|
|
mov temp3=ar.csd
|
|
|
|
mov temp4=ar.ssd
|
|
|
|
cover // must be last in group
|
2005-04-17 05:20:36 +07:00
|
|
|
;;
|
2005-09-11 14:22:53 +07:00
|
|
|
st8 [temp1]=temp3,PT(AR_UNAT)-PT(AR_CSD) // save ar.csd
|
|
|
|
st8 [temp2]=temp4,PT(AR_PFS)-PT(AR_SSD) // save ar.ssd
|
|
|
|
mov temp3=ar.unat
|
|
|
|
mov temp4=ar.pfs
|
|
|
|
;;
|
|
|
|
st8 [temp1]=temp3,PT(AR_RNAT)-PT(AR_UNAT) // save ar.unat
|
|
|
|
st8 [temp2]=temp4,PT(AR_BSPSTORE)-PT(AR_PFS) // save ar.pfs
|
|
|
|
mov temp3=ar.rnat
|
|
|
|
mov temp4=ar.bspstore
|
|
|
|
;;
|
|
|
|
st8 [temp1]=temp3,PT(LOADRS)-PT(AR_RNAT) // save ar.rnat
|
|
|
|
st8 [temp2]=temp4,PT(AR_FPSR)-PT(AR_BSPSTORE) // save ar.bspstore
|
|
|
|
mov temp3=ar.bsp
|
|
|
|
;;
|
|
|
|
sub temp3=temp3, temp4 // ar.bsp - ar.bspstore
|
|
|
|
mov temp4=ar.fpsr
|
|
|
|
;;
|
|
|
|
shl temp3=temp3,16 // compute ar.rsc to be used for "loadrs"
|
|
|
|
;;
|
|
|
|
st8 [temp1]=temp3,PT(AR_CCV)-PT(LOADRS) // save loadrs
|
|
|
|
st8 [temp2]=temp4,PT(F6)-PT(AR_FPSR) // save ar.fpsr
|
|
|
|
mov temp3=ar.ccv
|
|
|
|
;;
|
|
|
|
st8 [temp1]=temp3,PT(F7)-PT(AR_CCV) // save ar.ccv
|
|
|
|
stf.spill [temp2]=f6,PT(F8)-PT(F6)
|
|
|
|
;;
|
|
|
|
stf.spill [temp1]=f7,PT(F9)-PT(F7)
|
|
|
|
stf.spill [temp2]=f8,PT(F10)-PT(F8)
|
|
|
|
;;
|
|
|
|
stf.spill [temp1]=f9,PT(F11)-PT(F9)
|
|
|
|
stf.spill [temp2]=f10
|
|
|
|
;;
|
|
|
|
stf.spill [temp1]=f11
|
|
|
|
|
|
|
|
// Save the switch_stack data that is not in minstate nor pt_regs. The
|
|
|
|
// previous code left regs at pt_regs.
|
|
|
|
add regs=MCA_SWITCH_STACK_OFFSET-MCA_PT_REGS_OFFSET, regs
|
|
|
|
;;
|
|
|
|
add temp1=SW(F2), regs
|
|
|
|
add temp2=SW(F3), regs
|
|
|
|
;;
|
|
|
|
stf.spill [temp1]=f2,32
|
|
|
|
stf.spill [temp2]=f3,32
|
|
|
|
;;
|
|
|
|
stf.spill [temp1]=f4,32
|
|
|
|
stf.spill [temp2]=f5,32
|
|
|
|
;;
|
|
|
|
stf.spill [temp1]=f12,32
|
|
|
|
stf.spill [temp2]=f13,32
|
|
|
|
;;
|
|
|
|
stf.spill [temp1]=f14,32
|
|
|
|
stf.spill [temp2]=f15,32
|
|
|
|
;;
|
|
|
|
stf.spill [temp1]=f16,32
|
|
|
|
stf.spill [temp2]=f17,32
|
|
|
|
;;
|
|
|
|
stf.spill [temp1]=f18,32
|
|
|
|
stf.spill [temp2]=f19,32
|
|
|
|
;;
|
|
|
|
stf.spill [temp1]=f20,32
|
|
|
|
stf.spill [temp2]=f21,32
|
|
|
|
;;
|
|
|
|
stf.spill [temp1]=f22,32
|
|
|
|
stf.spill [temp2]=f23,32
|
|
|
|
;;
|
|
|
|
stf.spill [temp1]=f24,32
|
|
|
|
stf.spill [temp2]=f25,32
|
|
|
|
;;
|
|
|
|
stf.spill [temp1]=f26,32
|
|
|
|
stf.spill [temp2]=f27,32
|
|
|
|
;;
|
|
|
|
stf.spill [temp1]=f28,32
|
|
|
|
stf.spill [temp2]=f29,32
|
|
|
|
;;
|
|
|
|
stf.spill [temp1]=f30,SW(B2)-SW(F30)
|
|
|
|
stf.spill [temp2]=f31,SW(B3)-SW(F31)
|
|
|
|
mov temp3=b2
|
|
|
|
mov temp4=b3
|
|
|
|
;;
|
|
|
|
st8 [temp1]=temp3,16 // save b2
|
|
|
|
st8 [temp2]=temp4,16 // save b3
|
|
|
|
mov temp3=b4
|
|
|
|
mov temp4=b5
|
|
|
|
;;
|
|
|
|
st8 [temp1]=temp3,SW(AR_LC)-SW(B4) // save b4
|
|
|
|
st8 [temp2]=temp4 // save b5
|
|
|
|
mov temp3=ar.lc
|
|
|
|
;;
|
|
|
|
st8 [temp1]=temp3 // save ar.lc
|
|
|
|
|
|
|
|
// FIXME: Some proms are incorrectly accessing the minstate area as
|
|
|
|
// cached data. The C code uses region 6, uncached virtual. Ensure
|
|
|
|
// that there is no cache data lying around for the first 1K of the
|
|
|
|
// minstate area.
|
|
|
|
// Remove this code in September 2006, that gives platforms a year to
|
|
|
|
// fix their proms and get their customers updated.
|
|
|
|
|
|
|
|
add r1=32*1,r17
|
|
|
|
add r2=32*2,r17
|
|
|
|
add r3=32*3,r17
|
|
|
|
add r4=32*4,r17
|
|
|
|
add r5=32*5,r17
|
|
|
|
add r6=32*6,r17
|
|
|
|
add r7=32*7,r17
|
|
|
|
;;
|
|
|
|
fc r17
|
|
|
|
fc r1
|
|
|
|
fc r2
|
|
|
|
fc r3
|
|
|
|
fc r4
|
|
|
|
fc r5
|
|
|
|
fc r6
|
|
|
|
fc r7
|
|
|
|
add r17=32*8,r17
|
|
|
|
add r1=32*8,r1
|
|
|
|
add r2=32*8,r2
|
|
|
|
add r3=32*8,r3
|
|
|
|
add r4=32*8,r4
|
|
|
|
add r5=32*8,r5
|
|
|
|
add r6=32*8,r6
|
|
|
|
add r7=32*8,r7
|
|
|
|
;;
|
|
|
|
fc r17
|
|
|
|
fc r1
|
|
|
|
fc r2
|
|
|
|
fc r3
|
|
|
|
fc r4
|
|
|
|
fc r5
|
|
|
|
fc r6
|
|
|
|
fc r7
|
|
|
|
add r17=32*8,r17
|
|
|
|
add r1=32*8,r1
|
|
|
|
add r2=32*8,r2
|
|
|
|
add r3=32*8,r3
|
|
|
|
add r4=32*8,r4
|
|
|
|
add r5=32*8,r5
|
|
|
|
add r6=32*8,r6
|
|
|
|
add r7=32*8,r7
|
|
|
|
;;
|
|
|
|
fc r17
|
|
|
|
fc r1
|
|
|
|
fc r2
|
|
|
|
fc r3
|
|
|
|
fc r4
|
|
|
|
fc r5
|
|
|
|
fc r6
|
|
|
|
fc r7
|
|
|
|
add r17=32*8,r17
|
|
|
|
add r1=32*8,r1
|
|
|
|
add r2=32*8,r2
|
|
|
|
add r3=32*8,r3
|
|
|
|
add r4=32*8,r4
|
|
|
|
add r5=32*8,r5
|
|
|
|
add r6=32*8,r6
|
|
|
|
add r7=32*8,r7
|
|
|
|
;;
|
|
|
|
fc r17
|
|
|
|
fc r1
|
|
|
|
fc r2
|
|
|
|
fc r3
|
|
|
|
fc r4
|
|
|
|
fc r5
|
|
|
|
fc r6
|
|
|
|
fc r7
|
|
|
|
|
|
|
|
br.sptk b0
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
//EndStub//////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
|
|
|
|
//++
|
|
|
|
// Name:
|
2005-09-11 14:22:53 +07:00
|
|
|
// ia64_state_restore()
|
2005-04-17 05:20:36 +07:00
|
|
|
//
|
|
|
|
// Stub Description:
|
|
|
|
//
|
2005-09-11 14:22:53 +07:00
|
|
|
// Restore the SAL/OS state. This is sensitive to the layout of struct
|
|
|
|
// ia64_sal_os_state in mca.h.
|
|
|
|
//
|
|
|
|
// r2 contains the return address, r3 contains either
|
|
|
|
// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
|
|
|
|
//
|
|
|
|
// In addition to the SAL to OS state, this routine restores all the
|
|
|
|
// registers that appear in struct pt_regs and struct switch_stack,
|
|
|
|
// excluding those in the PAL minstate area.
|
2005-04-17 05:20:36 +07:00
|
|
|
//
|
|
|
|
//--
|
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
ia64_state_restore:
|
|
|
|
// Restore the switch_stack data that is not in minstate nor pt_regs.
|
|
|
|
add regs=MCA_SWITCH_STACK_OFFSET, r3
|
|
|
|
mov b0=r2 // save return address
|
|
|
|
;;
|
|
|
|
GET_IA64_MCA_DATA(temp2)
|
|
|
|
;;
|
|
|
|
add regs=temp2, regs
|
|
|
|
;;
|
|
|
|
add temp1=SW(F2), regs
|
|
|
|
add temp2=SW(F3), regs
|
|
|
|
;;
|
|
|
|
ldf.fill f2=[temp1],32
|
|
|
|
ldf.fill f3=[temp2],32
|
|
|
|
;;
|
|
|
|
ldf.fill f4=[temp1],32
|
|
|
|
ldf.fill f5=[temp2],32
|
|
|
|
;;
|
|
|
|
ldf.fill f12=[temp1],32
|
|
|
|
ldf.fill f13=[temp2],32
|
|
|
|
;;
|
|
|
|
ldf.fill f14=[temp1],32
|
|
|
|
ldf.fill f15=[temp2],32
|
|
|
|
;;
|
|
|
|
ldf.fill f16=[temp1],32
|
|
|
|
ldf.fill f17=[temp2],32
|
|
|
|
;;
|
|
|
|
ldf.fill f18=[temp1],32
|
|
|
|
ldf.fill f19=[temp2],32
|
|
|
|
;;
|
|
|
|
ldf.fill f20=[temp1],32
|
|
|
|
ldf.fill f21=[temp2],32
|
|
|
|
;;
|
|
|
|
ldf.fill f22=[temp1],32
|
|
|
|
ldf.fill f23=[temp2],32
|
|
|
|
;;
|
|
|
|
ldf.fill f24=[temp1],32
|
|
|
|
ldf.fill f25=[temp2],32
|
|
|
|
;;
|
|
|
|
ldf.fill f26=[temp1],32
|
|
|
|
ldf.fill f27=[temp2],32
|
|
|
|
;;
|
|
|
|
ldf.fill f28=[temp1],32
|
|
|
|
ldf.fill f29=[temp2],32
|
|
|
|
;;
|
|
|
|
ldf.fill f30=[temp1],SW(B2)-SW(F30)
|
|
|
|
ldf.fill f31=[temp2],SW(B3)-SW(F31)
|
|
|
|
;;
|
|
|
|
ld8 temp3=[temp1],16 // restore b2
|
|
|
|
ld8 temp4=[temp2],16 // restore b3
|
|
|
|
;;
|
|
|
|
mov b2=temp3
|
|
|
|
mov b3=temp4
|
|
|
|
ld8 temp3=[temp1],SW(AR_LC)-SW(B4) // restore b4
|
|
|
|
ld8 temp4=[temp2] // restore b5
|
|
|
|
;;
|
|
|
|
mov b4=temp3
|
|
|
|
mov b5=temp4
|
|
|
|
ld8 temp3=[temp1] // restore ar.lc
|
|
|
|
;;
|
|
|
|
mov ar.lc=temp3
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
// Restore the pt_regs data that is not in minstate. The previous code
|
|
|
|
// left regs at switch_stack.
|
|
|
|
add regs=MCA_PT_REGS_OFFSET-MCA_SWITCH_STACK_OFFSET, regs
|
|
|
|
;;
|
|
|
|
add temp1=PT(B6), regs
|
|
|
|
add temp2=PT(B7), regs
|
|
|
|
;;
|
|
|
|
ld8 temp3=[temp1],PT(AR_CSD)-PT(B6) // restore b6
|
|
|
|
ld8 temp4=[temp2],PT(AR_SSD)-PT(B7) // restore b7
|
|
|
|
;;
|
|
|
|
mov b6=temp3
|
|
|
|
mov b7=temp4
|
|
|
|
ld8 temp3=[temp1],PT(AR_UNAT)-PT(AR_CSD) // restore ar.csd
|
|
|
|
ld8 temp4=[temp2],PT(AR_PFS)-PT(AR_SSD) // restore ar.ssd
|
|
|
|
;;
|
|
|
|
mov ar.csd=temp3
|
|
|
|
mov ar.ssd=temp4
|
|
|
|
ld8 temp3=[temp1] // restore ar.unat
|
|
|
|
add temp1=PT(AR_CCV)-PT(AR_UNAT), temp1
|
|
|
|
ld8 temp4=[temp2],PT(AR_FPSR)-PT(AR_PFS) // restore ar.pfs
|
|
|
|
;;
|
|
|
|
mov ar.unat=temp3
|
|
|
|
mov ar.pfs=temp4
|
|
|
|
// ar.rnat, ar.bspstore, loadrs are restore in ia64_old_stack.
|
|
|
|
ld8 temp3=[temp1],PT(F6)-PT(AR_CCV) // restore ar.ccv
|
|
|
|
ld8 temp4=[temp2],PT(F7)-PT(AR_FPSR) // restore ar.fpsr
|
|
|
|
;;
|
|
|
|
mov ar.ccv=temp3
|
|
|
|
mov ar.fpsr=temp4
|
|
|
|
ldf.fill f6=[temp1],PT(F8)-PT(F6)
|
|
|
|
ldf.fill f7=[temp2],PT(F9)-PT(F7)
|
|
|
|
;;
|
|
|
|
ldf.fill f8=[temp1],PT(F10)-PT(F8)
|
|
|
|
ldf.fill f9=[temp2],PT(F11)-PT(F9)
|
|
|
|
;;
|
|
|
|
ldf.fill f10=[temp1]
|
|
|
|
ldf.fill f11=[temp2]
|
|
|
|
|
|
|
|
// Restore the SAL to OS state. The previous code left regs at pt_regs.
|
|
|
|
add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs
|
2005-04-17 05:20:36 +07:00
|
|
|
;;
|
2006-04-07 15:08:11 +07:00
|
|
|
add temp1=SOS(SAL_RA), regs
|
|
|
|
add temp2=SOS(SAL_GP), regs
|
2005-09-11 14:22:53 +07:00
|
|
|
;;
|
|
|
|
ld8 r12=[temp1],16 // sal_ra
|
|
|
|
ld8 r9=[temp2],16 // sal_gp
|
|
|
|
;;
|
2005-09-22 15:49:15 +07:00
|
|
|
ld8 r22=[temp1],16 // pal_min_state, virtual
|
2006-04-07 13:34:34 +07:00
|
|
|
ld8 r13=[temp2],16 // prev_IA64_KR_CURRENT
|
2005-09-11 14:22:53 +07:00
|
|
|
;;
|
2005-09-22 15:49:15 +07:00
|
|
|
ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK
|
|
|
|
ld8 r20=[temp2],16 // prev_task
|
|
|
|
;;
|
2005-09-11 14:22:53 +07:00
|
|
|
ld8 temp3=[temp1],16 // cr.isr
|
|
|
|
ld8 temp4=[temp2],16 // cr.ifa
|
|
|
|
;;
|
|
|
|
mov cr.isr=temp3
|
|
|
|
mov cr.ifa=temp4
|
|
|
|
ld8 temp3=[temp1],16 // cr.itir
|
|
|
|
ld8 temp4=[temp2],16 // cr.iipa
|
|
|
|
;;
|
|
|
|
mov cr.itir=temp3
|
|
|
|
mov cr.iipa=temp4
|
2006-04-07 15:08:11 +07:00
|
|
|
ld8 temp3=[temp1] // cr.iim
|
|
|
|
ld8 temp4=[temp2] // cr.iha
|
|
|
|
add temp1=SOS(OS_STATUS), regs
|
|
|
|
add temp2=SOS(CONTEXT), regs
|
2005-09-11 14:22:53 +07:00
|
|
|
;;
|
|
|
|
mov cr.iim=temp3
|
|
|
|
mov cr.iha=temp4
|
2006-01-14 05:01:01 +07:00
|
|
|
dep r22=0,r22,62,1 // pal_min_state, physical, uncached
|
2006-04-07 13:34:34 +07:00
|
|
|
mov IA64_KR(CURRENT)=r13
|
2005-09-11 14:22:53 +07:00
|
|
|
ld8 r8=[temp1] // os_status
|
|
|
|
ld8 r10=[temp2] // context
|
|
|
|
|
2005-09-22 15:49:15 +07:00
|
|
|
/* Wire IA64_TR_CURRENT_STACK to the stack that we are resuming to. To
|
|
|
|
* avoid any dependencies on the algorithm in ia64_switch_to(), just
|
|
|
|
* purge any existing CURRENT_STACK mapping and insert the new one.
|
|
|
|
*
|
2006-04-07 13:34:34 +07:00
|
|
|
* r16 contains prev_IA64_KR_CURRENT_STACK, r13 contains
|
2005-09-22 15:49:15 +07:00
|
|
|
* prev_IA64_KR_CURRENT, these values may have been changed by the C
|
|
|
|
* code. Do not use r8, r9, r10, r22, they contain values ready for
|
|
|
|
* the return to SAL.
|
|
|
|
*/
|
|
|
|
|
|
|
|
mov r15=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK
|
|
|
|
;;
|
|
|
|
shl r15=r15,IA64_GRANULE_SHIFT
|
|
|
|
;;
|
|
|
|
dep r15=-1,r15,61,3 // virtual granule
|
|
|
|
mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps
|
|
|
|
;;
|
|
|
|
ptr.d r15,r18
|
|
|
|
;;
|
|
|
|
srlz.d
|
|
|
|
|
2006-04-07 13:34:34 +07:00
|
|
|
extr.u r19=r13,61,3 // r13 = prev_IA64_KR_CURRENT
|
2005-09-22 15:49:15 +07:00
|
|
|
shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK
|
|
|
|
movl r21=PAGE_KERNEL // page properties
|
|
|
|
;;
|
|
|
|
mov IA64_KR(CURRENT_STACK)=r16
|
|
|
|
cmp.ne p6,p0=RGN_KERNEL,r19 // new stack is in the kernel region?
|
|
|
|
or r21=r20,r21 // construct PA | page properties
|
|
|
|
(p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:(
|
|
|
|
;;
|
|
|
|
mov cr.itir=r18
|
2006-04-07 13:34:34 +07:00
|
|
|
mov cr.ifa=r13
|
2005-09-22 15:49:15 +07:00
|
|
|
mov r20=IA64_TR_CURRENT_STACK
|
|
|
|
;;
|
|
|
|
itr.d dtr[r20]=r21
|
|
|
|
;;
|
|
|
|
srlz.d
|
|
|
|
1:
|
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
br.sptk b0
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
//EndStub//////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
//++
|
|
|
|
// Name:
|
|
|
|
// ia64_new_stack()
|
2005-04-17 05:20:36 +07:00
|
|
|
//
|
2005-09-11 14:22:53 +07:00
|
|
|
// Stub Description:
|
2005-04-17 05:20:36 +07:00
|
|
|
//
|
2005-09-11 14:22:53 +07:00
|
|
|
// Switch to the MCA/INIT stack.
|
2005-04-17 05:20:36 +07:00
|
|
|
//
|
2005-09-11 14:22:53 +07:00
|
|
|
// r2 contains the return address, r3 contains either
|
|
|
|
// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
|
2005-04-17 05:20:36 +07:00
|
|
|
//
|
2005-09-11 14:22:53 +07:00
|
|
|
// On entry RBS is still on the original stack, this routine switches RBS
|
|
|
|
// to use the MCA/INIT stack.
|
2005-04-17 05:20:36 +07:00
|
|
|
//
|
2005-09-11 14:22:53 +07:00
|
|
|
// On entry, sos->pal_min_state is physical, on exit it is virtual.
|
|
|
|
//
|
|
|
|
//--
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
ia64_new_stack:
|
|
|
|
add regs=MCA_PT_REGS_OFFSET, r3
|
2006-04-07 15:08:11 +07:00
|
|
|
add temp2=MCA_SOS_OFFSET+SOS(PAL_MIN_STATE), r3
|
2005-09-11 14:22:53 +07:00
|
|
|
mov b0=r2 // save return address
|
|
|
|
GET_IA64_MCA_DATA(temp1)
|
|
|
|
invala
|
2005-04-17 05:20:36 +07:00
|
|
|
;;
|
2005-09-11 14:22:53 +07:00
|
|
|
add temp2=temp2, temp1 // struct ia64_sal_os_state.pal_min_state on MCA or INIT stack
|
|
|
|
add regs=regs, temp1 // struct pt_regs on MCA or INIT stack
|
2005-04-17 05:20:36 +07:00
|
|
|
;;
|
2005-09-11 14:22:53 +07:00
|
|
|
// Address of minstate area provided by PAL is physical, uncacheable.
|
|
|
|
// Convert to Linux virtual address in region 6 for C code.
|
|
|
|
ld8 ms=[temp2] // pal_min_state, physical
|
2005-04-17 05:20:36 +07:00
|
|
|
;;
|
2005-09-11 14:22:53 +07:00
|
|
|
dep temp1=-1,ms,62,2 // set region 6
|
|
|
|
mov temp3=IA64_RBS_OFFSET-MCA_PT_REGS_OFFSET
|
|
|
|
;;
|
|
|
|
st8 [temp2]=temp1 // pal_min_state, virtual
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
add temp4=temp3, regs // start of bspstore on new stack
|
2005-04-17 05:20:36 +07:00
|
|
|
;;
|
2005-09-11 14:22:53 +07:00
|
|
|
mov ar.bspstore=temp4 // switch RBS to MCA/INIT stack
|
2005-04-17 05:20:36 +07:00
|
|
|
;;
|
2005-09-11 14:22:53 +07:00
|
|
|
flushrs // must be first in group
|
|
|
|
br.sptk b0
|
|
|
|
|
|
|
|
//EndStub//////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
|
|
|
|
//++
|
|
|
|
// Name:
|
|
|
|
// ia64_old_stack()
|
|
|
|
//
|
|
|
|
// Stub Description:
|
|
|
|
//
|
|
|
|
// Switch to the old stack.
|
|
|
|
//
|
|
|
|
// r2 contains the return address, r3 contains either
|
|
|
|
// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
|
|
|
|
//
|
|
|
|
// On entry, pal_min_state is virtual, on exit it is physical.
|
|
|
|
//
|
|
|
|
// On entry RBS is on the MCA/INIT stack, this routine switches RBS
|
|
|
|
// back to the previous stack.
|
|
|
|
//
|
|
|
|
// The psr is set to all zeroes. SAL return requires either all zeroes or
|
|
|
|
// just psr.mc set. Leaving psr.mc off allows INIT to be issued if this
|
|
|
|
// code does not perform correctly.
|
|
|
|
//
|
|
|
|
// The dirty registers at the time of the event were flushed to the
|
|
|
|
// MCA/INIT stack in ia64_pt_regs_save(). Restore the dirty registers
|
|
|
|
// before reverting to the previous bspstore.
|
|
|
|
//--
|
|
|
|
|
|
|
|
ia64_old_stack:
|
|
|
|
add regs=MCA_PT_REGS_OFFSET, r3
|
|
|
|
mov b0=r2 // save return address
|
|
|
|
GET_IA64_MCA_DATA(temp2)
|
|
|
|
LOAD_PHYSICAL(p0,temp1,1f)
|
2005-04-17 05:20:36 +07:00
|
|
|
;;
|
2005-09-11 14:22:53 +07:00
|
|
|
mov cr.ipsr=r0
|
|
|
|
mov cr.ifs=r0
|
|
|
|
mov cr.iip=temp1
|
2005-04-17 05:20:36 +07:00
|
|
|
;;
|
2005-09-11 14:22:53 +07:00
|
|
|
invala
|
2005-04-17 05:20:36 +07:00
|
|
|
rfi
|
2005-09-11 14:22:53 +07:00
|
|
|
1:
|
|
|
|
|
|
|
|
add regs=regs, temp2 // struct pt_regs on MCA or INIT stack
|
2005-04-17 05:20:36 +07:00
|
|
|
;;
|
2005-09-11 14:22:53 +07:00
|
|
|
add temp1=PT(LOADRS), regs
|
2005-04-17 05:20:36 +07:00
|
|
|
;;
|
2005-09-11 14:22:53 +07:00
|
|
|
ld8 temp2=[temp1],PT(AR_BSPSTORE)-PT(LOADRS) // restore loadrs
|
2005-04-17 05:20:36 +07:00
|
|
|
;;
|
2005-09-11 14:22:53 +07:00
|
|
|
ld8 temp3=[temp1],PT(AR_RNAT)-PT(AR_BSPSTORE) // restore ar.bspstore
|
|
|
|
mov ar.rsc=temp2
|
|
|
|
;;
|
|
|
|
loadrs
|
|
|
|
ld8 temp4=[temp1] // restore ar.rnat
|
|
|
|
;;
|
|
|
|
mov ar.bspstore=temp3 // back to old stack
|
|
|
|
;;
|
|
|
|
mov ar.rnat=temp4
|
|
|
|
;;
|
|
|
|
|
|
|
|
br.sptk b0
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
//EndStub//////////////////////////////////////////////////////////////////////
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
//++
|
|
|
|
// Name:
|
|
|
|
// ia64_set_kernel_registers()
|
2005-04-17 05:20:36 +07:00
|
|
|
//
|
2005-09-11 14:22:53 +07:00
|
|
|
// Stub Description:
|
|
|
|
//
|
|
|
|
// Set the registers that are required by the C code in order to run on an
|
|
|
|
// MCA/INIT stack.
|
|
|
|
//
|
|
|
|
// r2 contains the return address, r3 contains either
|
|
|
|
// IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
|
2005-04-17 05:20:36 +07:00
|
|
|
//
|
2005-09-11 14:22:53 +07:00
|
|
|
//--
|
|
|
|
|
|
|
|
ia64_set_kernel_registers:
|
|
|
|
add temp3=MCA_SP_OFFSET, r3
|
|
|
|
mov b0=r2 // save return address
|
|
|
|
GET_IA64_MCA_DATA(temp1)
|
|
|
|
;;
|
|
|
|
add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack
|
|
|
|
add r13=temp1, r3 // set current to start of MCA/INIT stack
|
2005-09-22 15:49:15 +07:00
|
|
|
add r20=temp1, r3 // physical start of MCA/INIT stack
|
2005-09-11 14:22:53 +07:00
|
|
|
;;
|
|
|
|
DATA_PA_TO_VA(r12,temp2)
|
|
|
|
DATA_PA_TO_VA(r13,temp3)
|
|
|
|
;;
|
|
|
|
mov IA64_KR(CURRENT)=r13
|
|
|
|
|
2005-09-22 15:49:15 +07:00
|
|
|
/* Wire IA64_TR_CURRENT_STACK to the MCA/INIT handler stack. To avoid
|
|
|
|
* any dependencies on the algorithm in ia64_switch_to(), just purge
|
|
|
|
* any existing CURRENT_STACK mapping and insert the new one.
|
|
|
|
*/
|
|
|
|
|
|
|
|
mov r16=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK
|
|
|
|
;;
|
|
|
|
shl r16=r16,IA64_GRANULE_SHIFT
|
|
|
|
;;
|
|
|
|
dep r16=-1,r16,61,3 // virtual granule
|
|
|
|
mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps
|
|
|
|
;;
|
|
|
|
ptr.d r16,r18
|
|
|
|
;;
|
|
|
|
srlz.d
|
|
|
|
|
|
|
|
shr.u r16=r20,IA64_GRANULE_SHIFT // r20 = physical start of MCA/INIT stack
|
|
|
|
movl r21=PAGE_KERNEL // page properties
|
|
|
|
;;
|
|
|
|
mov IA64_KR(CURRENT_STACK)=r16
|
|
|
|
or r21=r20,r21 // construct PA | page properties
|
|
|
|
;;
|
|
|
|
mov cr.itir=r18
|
|
|
|
mov cr.ifa=r13
|
|
|
|
mov r20=IA64_TR_CURRENT_STACK
|
2006-09-19 06:37:15 +07:00
|
|
|
|
|
|
|
movl r17=FPSR_DEFAULT
|
|
|
|
;;
|
|
|
|
mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
|
2005-09-22 15:49:15 +07:00
|
|
|
;;
|
|
|
|
itr.d dtr[r20]=r21
|
|
|
|
;;
|
|
|
|
srlz.d
|
2005-09-11 14:22:53 +07:00
|
|
|
|
|
|
|
br.sptk b0
|
|
|
|
|
|
|
|
//EndStub//////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
#undef ms
|
|
|
|
#undef regs
|
|
|
|
#undef temp1
|
|
|
|
#undef temp2
|
|
|
|
#undef temp3
|
|
|
|
#undef temp4
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-09-11 14:22:53 +07:00
|
|
|
// Support function for mca.c, it is here to avoid using inline asm. Given the
|
|
|
|
// address of an rnat slot, if that address is below the current ar.bspstore
|
|
|
|
// then return the contents of that slot, otherwise return the contents of
|
|
|
|
// ar.rnat.
|
|
|
|
GLOBAL_ENTRY(ia64_get_rnat)
|
|
|
|
alloc r14=ar.pfs,1,0,0,0
|
|
|
|
mov ar.rsc=0
|
|
|
|
;;
|
|
|
|
mov r14=ar.bspstore
|
|
|
|
;;
|
|
|
|
cmp.lt p6,p7=in0,r14
|
|
|
|
;;
|
|
|
|
(p6) ld8 r8=[in0]
|
|
|
|
(p7) mov r8=ar.rnat
|
|
|
|
mov ar.rsc=3
|
|
|
|
br.ret.sptk.many rp
|
|
|
|
END(ia64_get_rnat)
|
[IA64] kdump: Mask MCA/INIT on frozen cpus
Summary:
INIT asserted on kdump kernel invokes INIT handler not only on a
cpu that running on the kdump kernel, but also BSP of the panicked
kernel, because the (badly) frozen BSP can be thawed by INIT.
Description:
The kdump_cpu_freeze() is called on cpus except one that initiates
panic and/or kdump, to stop/offline the cpu (on ia64, it means we
pass control of cpus to SAL, or put them in spinloop). Note that
CPU0(BSP) always go to spinloop, so if panic was happened on an AP,
there are at least 2cpus (= the AP and BSP) which not back to SAL.
On the spinning cpus, interrupts are disabled (rsm psr.i), but INIT
is still interruptible because psr.mc for mask them is not set unless
kdump_cpu_freeze() is not called from MCA/INIT context.
Therefore, assume that a panic was happened on an AP, kdump was
invoked, new INIT handlers for kdump kernel was registered and then
an INIT is asserted. From the viewpoint of SAL, there are 2 online
cpus, so INIT will be delivered to both of them. It likely means
that not only the AP (= a cpu executing kdump) enters INIT handler
which is newly registered, but also BSP (= another cpu spinning in
panicked kernel) enters the same INIT handler. Of course setting of
registers in BSP are still old (for panicked kernel), so what happen
with running handler with wrong setting will be extremely unexpected.
I believe this is not desirable behavior.
How to Reproduce:
Start kdump on one of APs (e.g. cpu1)
# taskset 0x2 echo c > /proc/sysrq-trigger
Then assert INIT after kdump kernel is booted, after new INIT handler
for kdump kernel is registered.
Expected results:
An INIT handler is invoked only on the AP.
Actual results:
An INIT handler is invoked on the AP and BSP.
Sample of results:
I got following console log by asserting INIT after prompt "root:/>".
It seems that two monarchs appeared by one INIT, and one panicked at
last. And it also seems that the panicked one supposed there were
4 online cpus and no one did rendezvous:
:
[ 0 %]dropping to initramfs shell
exiting this shell will reboot your system
root:/> Entered OS INIT handler. PSP=fff301a0 cpu=0 monarch=0
ia64_init_handler: Promoting cpu 0 to monarch.
Delaying for 5 seconds...
All OS INIT slaves have reached rendezvous
Processes interrupted by INIT - 0 (cpu 0 task 0xa000000100af0000)
:
<<snip>>
:
Entered OS INIT handler. PSP=fff301a0 cpu=0 monarch=1
Delaying for 5 seconds...
mlogbuf_finish: printing switched to urgent mode, MCA/INIT might be dodgy or fail.
OS INIT slave did not rendezvous on cpu 1 2 3
INIT swapper 0[0]: bugcheck! 0 [1]
:
<<snip>>
:
Kernel panic - not syncing: Attempted to kill the idle task!
Proposed fix:
To avoid this problem, this patch inserts ia64_set_psr_mc() to mask
INIT on cpus going to be frozen. This masking have no effect if the
kdump_cpu_freeze() is called from INIT handler when kdump_on_init == 1,
because psr.mc is already turned on to 1 before entering OS_INIT.
I confirmed that weird log like above are disappeared after applying
this patch.
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Haren Myneni <hbabu@us.ibm.com>
Cc: kexec@lists.infradead.org
Acked-by: Fenghua Yu <fenghua.yu@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
2009-08-07 04:51:56 +07:00
|
|
|
|
|
|
|
|
|
|
|
// void ia64_set_psr_mc(void)
|
|
|
|
//
|
|
|
|
// Set psr.mc bit to mask MCA/INIT.
|
|
|
|
GLOBAL_ENTRY(ia64_set_psr_mc)
|
|
|
|
rsm psr.i | psr.ic // disable interrupts
|
|
|
|
;;
|
|
|
|
srlz.d
|
|
|
|
;;
|
|
|
|
mov r14 = psr // get psr{36:35,31:0}
|
|
|
|
movl r15 = 1f
|
|
|
|
;;
|
|
|
|
dep r14 = -1, r14, PSR_MC, 1 // set psr.mc
|
|
|
|
;;
|
|
|
|
dep r14 = -1, r14, PSR_IC, 1 // set psr.ic
|
|
|
|
;;
|
|
|
|
dep r14 = -1, r14, PSR_BN, 1 // keep bank1 in use
|
|
|
|
;;
|
|
|
|
mov cr.ipsr = r14
|
|
|
|
mov cr.ifs = r0
|
|
|
|
mov cr.iip = r15
|
|
|
|
;;
|
|
|
|
rfi
|
|
|
|
1:
|
|
|
|
br.ret.sptk.many rp
|
|
|
|
END(ia64_set_psr_mc)
|