2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* linux/arch/arm/kernel/entry-armv.S
|
|
|
|
*
|
|
|
|
* Copyright (C) 1996,1997,1998 Russell King.
|
|
|
|
* ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
|
2006-01-14 04:05:25 +07:00
|
|
|
* nommu support by Hyok S. Choi (hyok.choi@samsung.com)
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* Low-level vector interface routines
|
|
|
|
*
|
2007-12-04 20:33:33 +07:00
|
|
|
* Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
|
|
|
|
* that causes it to save wrong values... Be aware!
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
|
2015-05-02 07:13:42 +07:00
|
|
|
#include <linux/init.h>
|
|
|
|
|
2012-03-10 23:30:31 +07:00
|
|
|
#include <asm/assembler.h>
|
2005-10-30 03:44:55 +07:00
|
|
|
#include <asm/memory.h>
|
2011-02-06 22:32:24 +07:00
|
|
|
#include <asm/glue-df.h>
|
|
|
|
#include <asm/glue-pf.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <asm/vfpmacros.h>
|
2012-02-09 07:26:34 +07:00
|
|
|
#ifndef CONFIG_MULTI_IRQ_HANDLER
|
2008-08-05 22:14:15 +07:00
|
|
|
#include <mach/entry-macro.S>
|
2012-02-09 07:26:34 +07:00
|
|
|
#endif
|
2006-06-21 19:31:52 +07:00
|
|
|
#include <asm/thread_notify.h>
|
2009-02-16 17:42:09 +07:00
|
|
|
#include <asm/unwind.h>
|
2009-11-10 06:53:29 +07:00
|
|
|
#include <asm/unistd.h>
|
2010-07-05 20:53:10 +07:00
|
|
|
#include <asm/tls.h>
|
2012-03-29 00:30:01 +07:00
|
|
|
#include <asm/system_info.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#include "entry-header.S"
|
2010-12-22 19:20:08 +07:00
|
|
|
#include <asm/entry-macro-multi.S>
|
2015-01-05 18:29:25 +07:00
|
|
|
#include <asm/probes.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-05-22 00:14:44 +07:00
|
|
|
/*
|
2011-06-26 16:34:02 +07:00
|
|
|
* Interrupt handling.
|
2005-05-22 00:14:44 +07:00
|
|
|
*/
|
|
|
|
.macro irq_handler
|
2010-12-13 15:42:34 +07:00
|
|
|
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
2011-06-26 16:34:02 +07:00
|
|
|
ldr r1, =handle_arch_irq
|
2010-12-13 15:42:34 +07:00
|
|
|
mov r0, sp
|
2015-04-21 20:17:25 +07:00
|
|
|
badr lr, 9997f
|
2011-09-06 15:23:26 +07:00
|
|
|
ldr pc, [r1]
|
|
|
|
#else
|
2010-12-22 19:20:08 +07:00
|
|
|
arch_irq_handler_default
|
2011-09-06 15:23:26 +07:00
|
|
|
#endif
|
2010-09-04 16:47:48 +07:00
|
|
|
9997:
|
2005-05-22 00:14:44 +07:00
|
|
|
.endm
|
|
|
|
|
2011-06-26 16:22:08 +07:00
|
|
|
.macro pabt_helper
|
2011-06-26 18:37:35 +07:00
|
|
|
@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
|
2011-06-26 16:22:08 +07:00
|
|
|
#ifdef MULTI_PABORT
|
2011-06-25 21:46:08 +07:00
|
|
|
ldr ip, .LCprocfns
|
2011-06-26 16:22:08 +07:00
|
|
|
mov lr, pc
|
2011-06-25 21:46:08 +07:00
|
|
|
ldr pc, [ip, #PROCESSOR_PABT_FUNC]
|
2011-06-26 16:22:08 +07:00
|
|
|
#else
|
|
|
|
bl CPU_PABORT_HANDLER
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro dabt_helper
|
|
|
|
|
|
|
|
@
|
|
|
|
@ Call the processor-specific abort handler:
|
|
|
|
@
|
2011-06-26 22:01:26 +07:00
|
|
|
@ r2 - pt_regs
|
2011-06-26 20:35:07 +07:00
|
|
|
@ r4 - aborted context pc
|
|
|
|
@ r5 - aborted context psr
|
2011-06-26 16:22:08 +07:00
|
|
|
@
|
|
|
|
@ The abort handler must return the aborted address in r0, and
|
|
|
|
@ the fault status register in r1. r9 must be preserved.
|
|
|
|
@
|
|
|
|
#ifdef MULTI_DABORT
|
2011-06-25 21:46:08 +07:00
|
|
|
ldr ip, .LCprocfns
|
2011-06-26 16:22:08 +07:00
|
|
|
mov lr, pc
|
2011-06-25 21:46:08 +07:00
|
|
|
ldr pc, [ip, #PROCESSOR_DABT_FUNC]
|
2011-06-26 16:22:08 +07:00
|
|
|
#else
|
|
|
|
bl CPU_DABORT_HANDLER
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2017-11-25 06:54:22 +07:00
|
|
|
.section .entry.text,"ax",%progbits
|
2007-12-04 03:27:56 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Invalid mode handlers
|
|
|
|
*/
|
2005-06-01 04:22:32 +07:00
|
|
|
.macro inv_entry, reason
|
2016-05-10 22:34:27 +07:00
|
|
|
sub sp, sp, #PT_REGS_SIZE
|
2009-07-24 18:32:54 +07:00
|
|
|
ARM( stmib sp, {r1 - lr} )
|
|
|
|
THUMB( stmia sp, {r0 - r12} )
|
|
|
|
THUMB( str sp, [sp, #S_SP] )
|
|
|
|
THUMB( str lr, [sp, #S_LR] )
|
2005-04-17 05:20:36 +07:00
|
|
|
mov r1, #\reason
|
|
|
|
.endm
|
|
|
|
|
|
|
|
__pabt_invalid:
|
2005-06-01 04:22:32 +07:00
|
|
|
inv_entry BAD_PREFETCH
|
|
|
|
b common_invalid
|
2008-08-28 17:22:32 +07:00
|
|
|
ENDPROC(__pabt_invalid)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
__dabt_invalid:
|
2005-06-01 04:22:32 +07:00
|
|
|
inv_entry BAD_DATA
|
|
|
|
b common_invalid
|
2008-08-28 17:22:32 +07:00
|
|
|
ENDPROC(__dabt_invalid)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
__irq_invalid:
|
2005-06-01 04:22:32 +07:00
|
|
|
inv_entry BAD_IRQ
|
|
|
|
b common_invalid
|
2008-08-28 17:22:32 +07:00
|
|
|
ENDPROC(__irq_invalid)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
__und_invalid:
|
2005-06-01 04:22:32 +07:00
|
|
|
inv_entry BAD_UNDEFINSTR
|
|
|
|
|
|
|
|
@
|
|
|
|
@ XXX fall through to common_invalid
|
|
|
|
@
|
|
|
|
|
|
|
|
@
|
|
|
|
@ common_invalid - generic code for failed exception (re-entrant version of handlers)
|
|
|
|
@
|
|
|
|
common_invalid:
|
|
|
|
zero_fp
|
|
|
|
|
|
|
|
ldmia r0, {r4 - r6}
|
|
|
|
add r0, sp, #S_PC @ here for interlock avoidance
|
|
|
|
mov r7, #-1 @ "" "" "" ""
|
|
|
|
str r4, [sp] @ save preserved r0
|
|
|
|
stmia r0, {r5 - r7} @ lr_<exception>,
|
|
|
|
@ cpsr_<exception>, "old_r0"
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
mov r0, sp
|
|
|
|
b bad_mode
|
2008-08-28 17:22:32 +07:00
|
|
|
ENDPROC(__und_invalid)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* SVC mode handlers
|
|
|
|
*/
|
2006-01-14 23:18:08 +07:00
|
|
|
|
|
|
|
#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
|
|
|
|
#define SPFIX(code...) code
|
|
|
|
#else
|
|
|
|
#define SPFIX(code...)
|
|
|
|
#endif
|
|
|
|
|
2015-08-20 16:32:02 +07:00
|
|
|
.macro svc_entry, stack_hole=0, trace=1, uaccess=1
|
2009-02-16 17:42:09 +07:00
|
|
|
UNWIND(.fnstart )
|
|
|
|
UNWIND(.save {r0 - pc} )
|
2016-05-13 16:22:38 +07:00
|
|
|
sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
|
2009-07-24 18:32:54 +07:00
|
|
|
#ifdef CONFIG_THUMB2_KERNEL
|
|
|
|
SPFIX( str r0, [sp] ) @ temporarily saved
|
|
|
|
SPFIX( mov r0, sp )
|
|
|
|
SPFIX( tst r0, #4 ) @ test original stack alignment
|
|
|
|
SPFIX( ldr r0, [sp] ) @ restored
|
|
|
|
#else
|
2006-01-14 23:18:08 +07:00
|
|
|
SPFIX( tst sp, #4 )
|
2009-07-24 18:32:54 +07:00
|
|
|
#endif
|
|
|
|
SPFIX( subeq sp, sp, #4 )
|
|
|
|
stmia sp, {r1 - r12}
|
2005-06-01 04:22:32 +07:00
|
|
|
|
2011-06-25 21:44:20 +07:00
|
|
|
ldmia r0, {r3 - r5}
|
|
|
|
add r7, sp, #S_SP - 4 @ here for interlock avoidance
|
|
|
|
mov r6, #-1 @ "" "" "" ""
|
2016-05-13 16:22:38 +07:00
|
|
|
add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
|
2011-06-25 21:44:20 +07:00
|
|
|
SPFIX( addeq r2, r2, #4 )
|
|
|
|
str r3, [sp, #-4]! @ save the "real" r0 copied
|
2005-06-01 04:22:32 +07:00
|
|
|
@ from the exception stack
|
|
|
|
|
2011-06-25 21:44:20 +07:00
|
|
|
mov r3, lr
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
@
|
|
|
|
@ We are now ready to fill in the remaining blanks on the stack:
|
|
|
|
@
|
2011-06-25 21:44:20 +07:00
|
|
|
@ r2 - sp_svc
|
|
|
|
@ r3 - lr_svc
|
|
|
|
@ r4 - lr_<exception>, already fixed up for correct return/restart
|
|
|
|
@ r5 - spsr_<exception>
|
|
|
|
@ r6 - orig_r0 (see pt_regs definition in ptrace.h)
|
2005-04-17 05:20:36 +07:00
|
|
|
@
|
2011-06-25 21:44:20 +07:00
|
|
|
stmia r7, {r2 - r6}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2016-05-13 17:40:20 +07:00
|
|
|
get_thread_info tsk
|
|
|
|
ldr r0, [tsk, #TI_ADDR_LIMIT]
|
|
|
|
mov r1, #TASK_SIZE
|
|
|
|
str r1, [tsk, #TI_ADDR_LIMIT]
|
|
|
|
str r0, [sp, #SVC_ADDR_LIMIT]
|
|
|
|
|
2015-08-20 16:32:02 +07:00
|
|
|
uaccess_save r0
|
|
|
|
.if \uaccess
|
|
|
|
uaccess_disable r0
|
|
|
|
.endif
|
|
|
|
|
2014-09-17 23:12:06 +07:00
|
|
|
.if \trace
|
2011-06-25 17:44:06 +07:00
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
|
bl trace_hardirqs_off
|
|
|
|
#endif
|
2014-09-17 23:12:06 +07:00
|
|
|
.endif
|
2011-06-25 23:35:19 +07:00
|
|
|
.endm
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2011-06-25 23:35:19 +07:00
|
|
|
.align 5
|
|
|
|
__dabt_svc:
|
2015-08-20 16:32:02 +07:00
|
|
|
svc_entry uaccess=0
|
2005-04-17 05:20:36 +07:00
|
|
|
mov r2, sp
|
2011-06-26 22:01:26 +07:00
|
|
|
dabt_helper
|
2013-11-04 17:42:29 +07:00
|
|
|
THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
|
2011-06-25 21:44:20 +07:00
|
|
|
svc_exit r5 @ return from exception
|
2009-02-16 17:42:09 +07:00
|
|
|
UNWIND(.fnend )
|
2008-08-28 17:22:32 +07:00
|
|
|
ENDPROC(__dabt_svc)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
.align 5
|
|
|
|
__irq_svc:
|
2005-06-01 04:22:32 +07:00
|
|
|
svc_entry
|
2005-05-22 00:14:44 +07:00
|
|
|
irq_handler
|
2011-06-25 16:57:57 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#ifdef CONFIG_PREEMPT
|
2011-06-25 16:57:57 +07:00
|
|
|
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
|
2005-05-22 00:15:45 +07:00
|
|
|
ldr r0, [tsk, #TI_FLAGS] @ get flags
|
2008-04-13 23:47:35 +07:00
|
|
|
teq r8, #0 @ if preempt count != 0
|
|
|
|
movne r0, #0 @ force flags to 0
|
2005-04-17 05:20:36 +07:00
|
|
|
tst r0, #_TIF_NEED_RESCHED
|
|
|
|
blne svc_preempt
|
|
|
|
#endif
|
2011-06-26 18:47:08 +07:00
|
|
|
|
2013-03-28 19:57:40 +07:00
|
|
|
svc_exit r5, irq = 1 @ return from exception
|
2009-02-16 17:42:09 +07:00
|
|
|
UNWIND(.fnend )
|
2008-08-28 17:22:32 +07:00
|
|
|
ENDPROC(__irq_svc)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
.ltorg
|
|
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
|
svc_preempt:
|
2008-04-13 23:47:35 +07:00
|
|
|
mov r8, lr
|
2005-04-17 05:20:36 +07:00
|
|
|
1: bl preempt_schedule_irq @ irq en/disable is done inside
|
2005-05-22 00:15:45 +07:00
|
|
|
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
|
2005-04-17 05:20:36 +07:00
|
|
|
tst r0, #_TIF_NEED_RESCHED
|
2014-06-30 22:29:12 +07:00
|
|
|
reteq r8 @ go again
|
2005-04-17 05:20:36 +07:00
|
|
|
b 1b
|
|
|
|
#endif
|
|
|
|
|
2012-07-31 01:42:10 +07:00
|
|
|
__und_fault:
|
|
|
|
@ Correct the PC such that it is pointing at the instruction
|
|
|
|
@ which caused the fault. If the faulting instruction was ARM
|
|
|
|
@ the PC will be pointing at the next instruction, and have to
|
|
|
|
@ subtract 4. Otherwise, it is Thumb, and the PC will be
|
|
|
|
@ pointing at the second half of the Thumb instruction. We
|
|
|
|
@ have to subtract 2.
|
|
|
|
ldr r2, [r0, #S_PC]
|
|
|
|
sub r2, r2, r1
|
|
|
|
str r2, [r0, #S_PC]
|
|
|
|
b do_undefinstr
|
|
|
|
ENDPROC(__und_fault)
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
.align 5
|
|
|
|
__und_svc:
|
2007-12-15 03:56:01 +07:00
|
|
|
#ifdef CONFIG_KPROBES
|
|
|
|
@ If a kprobe is about to simulate a "stmdb sp..." instruction,
|
|
|
|
@ it obviously needs free stack space which then will belong to
|
|
|
|
@ the saved context.
|
2015-01-05 18:29:25 +07:00
|
|
|
svc_entry MAX_STACK_SIZE
|
2007-12-15 03:56:01 +07:00
|
|
|
#else
|
2005-06-01 04:22:32 +07:00
|
|
|
svc_entry
|
2007-12-15 03:56:01 +07:00
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
@
|
|
|
|
@ call emulation code, which returns using r9 if it has emulated
|
|
|
|
@ the instruction, or the more conventional lr if we are to treat
|
|
|
|
@ this as a real undefined instruction
|
|
|
|
@
|
|
|
|
@ r0 - instruction
|
|
|
|
@
|
2012-07-31 01:42:10 +07:00
|
|
|
#ifndef CONFIG_THUMB2_KERNEL
|
2011-06-25 21:44:20 +07:00
|
|
|
ldr r0, [r4, #-4]
|
2009-09-19 05:27:07 +07:00
|
|
|
#else
|
2012-07-31 01:42:10 +07:00
|
|
|
mov r1, #2
|
2011-06-25 21:44:20 +07:00
|
|
|
ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
|
2011-08-19 23:59:27 +07:00
|
|
|
cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
|
2012-07-31 01:42:10 +07:00
|
|
|
blo __und_svc_fault
|
|
|
|
ldrh r9, [r4] @ bottom 16 bits
|
|
|
|
add r4, r4, #2
|
|
|
|
str r4, [sp, #S_PC]
|
|
|
|
orr r0, r9, r0, lsl #16
|
2009-09-19 05:27:07 +07:00
|
|
|
#endif
|
2015-04-21 20:17:25 +07:00
|
|
|
badr r9, __und_svc_finish
|
2011-06-25 21:44:20 +07:00
|
|
|
mov r2, r4
|
2005-04-17 05:20:36 +07:00
|
|
|
bl call_fpe
|
|
|
|
|
2012-07-31 01:42:10 +07:00
|
|
|
mov r1, #4 @ PC correction to apply
|
|
|
|
__und_svc_fault:
|
2005-04-17 05:20:36 +07:00
|
|
|
mov r0, sp @ struct pt_regs *regs
|
2012-07-31 01:42:10 +07:00
|
|
|
bl __und_fault
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2012-07-31 01:42:10 +07:00
|
|
|
__und_svc_finish:
|
2016-08-03 16:33:35 +07:00
|
|
|
get_thread_info tsk
|
2011-06-25 21:44:20 +07:00
|
|
|
ldr r5, [sp, #S_PSR] @ Get SVC cpsr
|
|
|
|
svc_exit r5 @ return from exception
|
2009-02-16 17:42:09 +07:00
|
|
|
UNWIND(.fnend )
|
2008-08-28 17:22:32 +07:00
|
|
|
ENDPROC(__und_svc)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
.align 5
|
|
|
|
__pabt_svc:
|
2005-06-01 04:22:32 +07:00
|
|
|
svc_entry
|
2009-09-25 19:39:47 +07:00
|
|
|
mov r2, sp @ regs
|
2011-06-26 18:37:35 +07:00
|
|
|
pabt_helper
|
2011-06-25 21:44:20 +07:00
|
|
|
svc_exit r5 @ return from exception
|
2009-02-16 17:42:09 +07:00
|
|
|
UNWIND(.fnend )
|
2008-08-28 17:22:32 +07:00
|
|
|
ENDPROC(__pabt_svc)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-09-17 23:12:06 +07:00
|
|
|
.align 5
|
|
|
|
__fiq_svc:
|
|
|
|
svc_entry trace=0
|
|
|
|
mov r0, sp @ struct pt_regs *regs
|
|
|
|
bl handle_fiq_as_nmi
|
|
|
|
svc_exit_via_fiq
|
|
|
|
UNWIND(.fnend )
|
|
|
|
ENDPROC(__fiq_svc)
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
.align 5
|
2005-06-01 00:02:00 +07:00
|
|
|
.LCcralign:
|
|
|
|
.word cr_alignment
|
2008-04-19 04:43:07 +07:00
|
|
|
#ifdef MULTI_DABORT
|
2005-04-17 05:20:36 +07:00
|
|
|
.LCprocfns:
|
|
|
|
.word processor
|
|
|
|
#endif
|
|
|
|
.LCfp:
|
|
|
|
.word fp_enter
|
|
|
|
|
2014-09-17 23:12:06 +07:00
|
|
|
/*
|
|
|
|
* Abort mode handlers
|
|
|
|
*/
|
|
|
|
|
|
|
|
@
|
|
|
|
@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
|
|
|
|
@ and reuses the same macros. However in abort mode we must also
|
|
|
|
@ save/restore lr_abt and spsr_abt to make nested aborts safe.
|
|
|
|
@
|
|
|
|
.align 5
|
|
|
|
__fiq_abt:
|
|
|
|
svc_entry trace=0
|
|
|
|
|
|
|
|
ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
|
|
|
|
THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
|
|
|
|
THUMB( msr cpsr_c, r0 )
|
|
|
|
mov r1, lr @ Save lr_abt
|
|
|
|
mrs r2, spsr @ Save spsr_abt, abort is now safe
|
|
|
|
ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
|
|
|
|
THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
|
|
|
|
THUMB( msr cpsr_c, r0 )
|
|
|
|
stmfd sp!, {r1 - r2}
|
|
|
|
|
|
|
|
add r0, sp, #8 @ struct pt_regs *regs
|
|
|
|
bl handle_fiq_as_nmi
|
|
|
|
|
|
|
|
ldmfd sp!, {r1 - r2}
|
|
|
|
ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
|
|
|
|
THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
|
|
|
|
THUMB( msr cpsr_c, r0 )
|
|
|
|
mov lr, r1 @ Restore lr_abt, abort is unsafe
|
|
|
|
msr spsr_cxsf, r2 @ Restore spsr_abt
|
|
|
|
ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
|
|
|
|
THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
|
|
|
|
THUMB( msr cpsr_c, r0 )
|
|
|
|
|
|
|
|
svc_exit_via_fiq
|
|
|
|
UNWIND(.fnend )
|
|
|
|
ENDPROC(__fiq_abt)
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* User mode handlers
|
2006-01-14 23:18:08 +07:00
|
|
|
*
|
2016-05-10 22:34:27 +07:00
|
|
|
* EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2006-01-14 23:18:08 +07:00
|
|
|
|
2016-05-10 22:34:27 +07:00
|
|
|
#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
|
2006-01-14 23:18:08 +07:00
|
|
|
#error "sizeof(struct pt_regs) must be a multiple of 8"
|
|
|
|
#endif
|
|
|
|
|
2015-08-20 16:32:02 +07:00
|
|
|
.macro usr_entry, trace=1, uaccess=1
|
2009-02-16 17:42:09 +07:00
|
|
|
UNWIND(.fnstart )
|
|
|
|
UNWIND(.cantunwind ) @ don't unwind the user space
|
2016-05-10 22:34:27 +07:00
|
|
|
sub sp, sp, #PT_REGS_SIZE
|
2009-07-24 18:32:54 +07:00
|
|
|
ARM( stmib sp, {r1 - r12} )
|
|
|
|
THUMB( stmia sp, {r0 - r12} )
|
2005-06-01 04:22:32 +07:00
|
|
|
|
2014-08-28 19:08:14 +07:00
|
|
|
ATRAP( mrc p15, 0, r7, c1, c0, 0)
|
|
|
|
ATRAP( ldr r8, .LCcralign)
|
|
|
|
|
2011-06-25 21:44:20 +07:00
|
|
|
ldmia r0, {r3 - r5}
|
2005-06-01 04:22:32 +07:00
|
|
|
add r0, sp, #S_PC @ here for interlock avoidance
|
2011-06-25 21:44:20 +07:00
|
|
|
mov r6, #-1 @ "" "" "" ""
|
2005-06-01 04:22:32 +07:00
|
|
|
|
2011-06-25 21:44:20 +07:00
|
|
|
str r3, [sp] @ save the "real" r0 copied
|
2005-06-01 04:22:32 +07:00
|
|
|
@ from the exception stack
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-08-28 19:08:14 +07:00
|
|
|
ATRAP( ldr r8, [r8, #0])
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
@
|
|
|
|
@ We are now ready to fill in the remaining blanks on the stack:
|
|
|
|
@
|
2011-06-25 21:44:20 +07:00
|
|
|
@ r4 - lr_<exception>, already fixed up for correct return/restart
|
|
|
|
@ r5 - spsr_<exception>
|
|
|
|
@ r6 - orig_r0 (see pt_regs definition in ptrace.h)
|
2005-04-17 05:20:36 +07:00
|
|
|
@
|
|
|
|
@ Also, separately save sp_usr and lr_usr
|
|
|
|
@
|
2011-06-25 21:44:20 +07:00
|
|
|
stmia r0, {r4 - r6}
|
2009-07-24 18:32:54 +07:00
|
|
|
ARM( stmdb r0, {sp, lr}^ )
|
|
|
|
THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2015-08-20 16:32:02 +07:00
|
|
|
.if \uaccess
|
|
|
|
uaccess_disable ip
|
|
|
|
.endif
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
@ Enable the alignment trap while in kernel mode
|
2014-08-28 19:08:14 +07:00
|
|
|
ATRAP( teq r8, r7)
|
|
|
|
ATRAP( mcrne p15, 0, r8, c1, c0, 0)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
@
|
|
|
|
@ Clear FP to mark the first stack frame
|
|
|
|
@
|
|
|
|
zero_fp
|
2011-06-25 23:35:19 +07:00
|
|
|
|
2014-09-17 23:12:06 +07:00
|
|
|
.if \trace
|
2015-07-03 18:42:36 +07:00
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
2011-06-25 23:35:19 +07:00
|
|
|
bl trace_hardirqs_off
|
|
|
|
#endif
|
2013-03-29 04:54:40 +07:00
|
|
|
ct_user_exit save = 0
|
2014-09-17 23:12:06 +07:00
|
|
|
.endif
|
2005-04-17 05:20:36 +07:00
|
|
|
.endm
|
|
|
|
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 23:20:29 +07:00
|
|
|
.macro kuser_cmpxchg_check
|
2015-09-22 01:34:28 +07:00
|
|
|
#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 23:20:29 +07:00
|
|
|
#ifndef CONFIG_MMU
|
|
|
|
#warning "NPTL on non MMU needs fixing"
|
|
|
|
#else
|
|
|
|
@ Make sure our user space atomic helper is restarted
|
|
|
|
@ if it was interrupted in a critical region. Here we
|
|
|
|
@ perform a quick test inline since it should be false
|
|
|
|
@ 99.9999% of the time. The rest is done out of line.
|
2011-06-25 21:44:20 +07:00
|
|
|
cmp r4, #TASK_SIZE
|
2011-06-20 10:36:03 +07:00
|
|
|
blhs kuser_cmpxchg64_fixup
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 23:20:29 +07:00
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
.align 5
|
|
|
|
__dabt_usr:
|
2015-08-20 16:32:02 +07:00
|
|
|
usr_entry uaccess=0
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 23:20:29 +07:00
|
|
|
kuser_cmpxchg_check
|
2005-04-17 05:20:36 +07:00
|
|
|
mov r2, sp
|
2011-06-26 22:01:26 +07:00
|
|
|
dabt_helper
|
|
|
|
b ret_from_exception
|
2009-02-16 17:42:09 +07:00
|
|
|
UNWIND(.fnend )
|
2008-08-28 17:22:32 +07:00
|
|
|
ENDPROC(__dabt_usr)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
.align 5
|
|
|
|
__irq_usr:
|
2005-06-01 04:22:32 +07:00
|
|
|
usr_entry
|
2011-06-26 00:28:19 +07:00
|
|
|
kuser_cmpxchg_check
|
2005-05-22 00:14:44 +07:00
|
|
|
irq_handler
|
2011-06-25 16:57:57 +07:00
|
|
|
get_thread_info tsk
|
2005-04-17 05:20:36 +07:00
|
|
|
mov why, #0
|
2011-06-05 08:24:58 +07:00
|
|
|
b ret_to_user_from_irq
|
2009-02-16 17:42:09 +07:00
|
|
|
UNWIND(.fnend )
|
2008-08-28 17:22:32 +07:00
|
|
|
ENDPROC(__irq_usr)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
.ltorg
|
|
|
|
|
|
|
|
.align 5
|
|
|
|
__und_usr:
|
2015-08-20 16:32:02 +07:00
|
|
|
usr_entry uaccess=0
|
2011-06-26 00:28:19 +07:00
|
|
|
|
2011-06-25 21:44:20 +07:00
|
|
|
mov r2, r4
|
|
|
|
mov r3, r5
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2012-07-31 01:42:10 +07:00
|
|
|
@ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
|
|
|
|
@ faulting instruction depending on Thumb mode.
|
|
|
|
@ r3 = regs->ARM_cpsr
|
2005-04-17 05:20:36 +07:00
|
|
|
@
|
2012-07-31 01:42:10 +07:00
|
|
|
@ The emulation code returns using r9 if it has emulated the
|
|
|
|
@ instruction, or the more conventional lr if we are to treat
|
|
|
|
@ this as a real undefined instruction
|
2005-04-17 05:20:36 +07:00
|
|
|
@
|
2015-04-21 20:17:25 +07:00
|
|
|
badr r9, ret_from_exception
|
2012-07-31 01:42:10 +07:00
|
|
|
|
2014-04-22 22:14:29 +07:00
|
|
|
@ IRQs must be enabled before attempting to read the instruction from
|
|
|
|
@ user space since that could cause a page/translation fault if the
|
|
|
|
@ page table was modified by another CPU.
|
|
|
|
enable_irq
|
|
|
|
|
2008-04-19 04:43:08 +07:00
|
|
|
tst r3, #PSR_T_BIT @ Thumb mode?
|
2012-07-31 01:42:10 +07:00
|
|
|
bne __und_usr_thumb
|
|
|
|
sub r4, r2, #4 @ ARM instr at LR - 4
|
|
|
|
1: ldrt r0, [r4]
|
2013-02-13 01:59:57 +07:00
|
|
|
ARM_BE8(rev r0, r0) @ little endian instruction
|
|
|
|
|
2015-08-20 16:32:02 +07:00
|
|
|
uaccess_disable ip
|
|
|
|
|
2012-07-31 01:42:10 +07:00
|
|
|
@ r0 = 32-bit ARM instruction which caused the exception
|
|
|
|
@ r2 = PC value for the following instruction (:= regs->ARM_pc)
|
|
|
|
@ r4 = PC value for the faulting instruction
|
|
|
|
@ lr = 32-bit undefined instruction function
|
2015-04-21 20:17:25 +07:00
|
|
|
badr lr, __und_usr_fault_32
|
2012-07-31 01:42:10 +07:00
|
|
|
b call_fpe
|
|
|
|
|
|
|
|
__und_usr_thumb:
|
2008-04-19 04:43:08 +07:00
|
|
|
@ Thumb instruction
|
2012-07-31 01:42:10 +07:00
|
|
|
sub r4, r2, #2 @ First half of thumb instr at LR - 2
|
2011-08-20 00:00:08 +07:00
|
|
|
#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
|
|
|
|
/*
|
|
|
|
* Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
|
|
|
|
* can never be supported in a single kernel, this code is not applicable at
|
|
|
|
* all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
|
|
|
|
* made about .arch directives.
|
|
|
|
*/
|
|
|
|
#if __LINUX_ARM_ARCH__ < 7
|
|
|
|
/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
|
|
|
|
#define NEED_CPU_ARCHITECTURE
|
|
|
|
ldr r5, .LCcpu_architecture
|
|
|
|
ldr r5, [r5]
|
|
|
|
cmp r5, #CPU_ARCH_ARMv7
|
2012-07-31 01:42:10 +07:00
|
|
|
blo __und_usr_fault_16 @ 16bit undefined instruction
|
2011-08-20 00:00:08 +07:00
|
|
|
/*
|
|
|
|
* The following code won't get run unless the running CPU really is v7, so
|
|
|
|
* coding round the lack of ldrht on older arches is pointless. Temporarily
|
|
|
|
* override the assembler target arch with the minimum required instead:
|
|
|
|
*/
|
|
|
|
.arch armv6t2
|
|
|
|
#endif
|
2012-07-31 01:42:10 +07:00
|
|
|
2: ldrht r5, [r4]
|
2014-01-21 12:45:11 +07:00
|
|
|
ARM_BE8(rev16 r5, r5) @ little endian instruction
|
2011-08-19 23:59:27 +07:00
|
|
|
cmp r5, #0xe800 @ 32bit instruction if xx != 0
|
2015-08-20 16:32:02 +07:00
|
|
|
blo __und_usr_fault_16_pan @ 16bit undefined instruction
|
2012-07-31 01:42:10 +07:00
|
|
|
3: ldrht r0, [r2]
|
2014-01-21 12:45:11 +07:00
|
|
|
ARM_BE8(rev16 r0, r0) @ little endian instruction
|
2015-08-20 16:32:02 +07:00
|
|
|
uaccess_disable ip
|
2008-04-19 04:43:08 +07:00
|
|
|
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
|
2012-07-31 01:42:10 +07:00
|
|
|
str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
|
2008-04-19 04:43:08 +07:00
|
|
|
orr r0, r0, r5, lsl #16
|
2015-04-21 20:17:25 +07:00
|
|
|
badr lr, __und_usr_fault_32
|
2012-07-31 01:42:10 +07:00
|
|
|
@ r0 = the two 16-bit Thumb instructions which caused the exception
|
|
|
|
@ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
|
|
|
|
@ r4 = PC value for the first 16-bit Thumb instruction
|
|
|
|
@ lr = 32bit undefined instruction function
|
2011-08-20 00:00:08 +07:00
|
|
|
|
|
|
|
#if __LINUX_ARM_ARCH__ < 7
|
|
|
|
/* If the target arch was overridden, change it back: */
|
|
|
|
#ifdef CONFIG_CPU_32v6K
|
|
|
|
.arch armv6k
|
2008-04-19 04:43:08 +07:00
|
|
|
#else
|
2011-08-20 00:00:08 +07:00
|
|
|
.arch armv6
|
|
|
|
#endif
|
|
|
|
#endif /* __LINUX_ARM_ARCH__ < 7 */
|
|
|
|
#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
|
2012-07-31 01:42:10 +07:00
|
|
|
b __und_usr_fault_16
|
2008-04-19 04:43:08 +07:00
|
|
|
#endif
|
2012-07-31 01:42:10 +07:00
|
|
|
UNWIND(.fnend)
|
2008-08-28 17:22:32 +07:00
|
|
|
ENDPROC(__und_usr)
|
2008-04-19 04:43:08 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
2012-07-31 01:42:10 +07:00
|
|
|
* The out of line fixup for the ldrt instructions above.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2015-03-24 16:41:09 +07:00
|
|
|
.pushsection .text.fixup, "ax"
|
2012-06-15 22:49:58 +07:00
|
|
|
.align 2
|
ARM: 8062/1: Modify ldrt fixup handler to re-execute the userspace instruction
We will reach fixup handler when one thread(say cpu0) caused an undefined exception, while another thread(say cpu1) is unmmaping the page.
Fixup handler returns to the next userspace instruction which has caused the undef execption, rather than going to the same instruction.
ARM ARM says that after undefined exception, the PC will be pointing
to the next instruction. ie +4 offset in case of ARM and +2 in case of Thumb
And there is no correction offset passed to vector_stub in case of
undef exception.
File: arch/arm/kernel/entry-armv.S +1085
vector_stub und, UND_MODE
During an undefined exception, in normal scenario(ie when ldrt
instruction does not cause an abort) after resorting the context in
VFP hardware, the PC is modified as show below before jumping to
ret_from_exception which is in r9.
File: arch/arm/vfp/vfphw.S +169
@ The context stored in the VFP hardware is up to date with this thread
vfp_hw_state_valid:
tst r1, #FPEXC_EX
bne process_exception @ might as well handle the pending
@ exception before retrying branch
@ out before setting an FPEXC that
@ stops us reading stuff
VFPFMXR FPEXC, r1 @ Restore FPEXC last
sub r2, r2, #4 @ Retry current instruction - if Thumb
str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
@ else it's one 32-bit instruction, so
@ always subtract 4 from the following
@ instruction address.
But if ldrt results in an abort, we reach the fixup handler and return
to ret_from_execption without correcting the pc.
This patch modifes the fixup handler to re-execute the same instruction which caused undefined execption.
Signed-off-by: Vinayak Menon <vinayakm.list@gmail.com>
Signed-off-by: Arun KS <getarunks@gmail.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-05-19 17:43:00 +07:00
|
|
|
4: str r4, [sp, #S_PC] @ retry current instruction
|
2014-06-30 22:29:12 +07:00
|
|
|
ret r9
|
2010-04-19 16:15:03 +07:00
|
|
|
.popsection
|
|
|
|
.pushsection __ex_table,"a"
|
2008-04-19 04:43:08 +07:00
|
|
|
.long 1b, 4b
|
2011-11-23 05:42:12 +07:00
|
|
|
#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
|
2008-04-19 04:43:08 +07:00
|
|
|
.long 2b, 4b
|
|
|
|
.long 3b, 4b
|
|
|
|
#endif
|
2010-04-19 16:15:03 +07:00
|
|
|
.popsection
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether the instruction is a co-processor instruction.
|
|
|
|
* If yes, we need to call the relevant co-processor handler.
|
|
|
|
*
|
|
|
|
* Note that we don't do a full check here for the co-processor
|
|
|
|
* instructions; all instructions with bit 27 set are well
|
|
|
|
* defined. The only instructions that should fault are the
|
|
|
|
* co-processor instructions. However, we have to watch out
|
|
|
|
* for the ARM6/ARM7 SWI bug.
|
|
|
|
*
|
2008-01-11 01:16:17 +07:00
|
|
|
* NEON is a special case that has to be handled here. Not all
|
|
|
|
* NEON instructions are co-processor instructions, so we have
|
|
|
|
* to make a special case of checking for them. Plus, there's
|
|
|
|
* five groups of them, so we have a table of mask/opcode pairs
|
|
|
|
* to check against, and if any match then we branch off into the
|
|
|
|
* NEON handler code.
|
|
|
|
*
|
2005-04-17 05:20:36 +07:00
|
|
|
* Emulators may wish to make use of the following registers:
|
2012-07-31 01:42:10 +07:00
|
|
|
* r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
|
|
|
|
* r2 = PC value to resume execution after successful emulation
|
2007-01-07 05:53:48 +07:00
|
|
|
* r9 = normal "successful" return address
|
2012-07-31 01:42:10 +07:00
|
|
|
* r10 = this threads thread_info structure
|
2007-01-07 05:53:48 +07:00
|
|
|
* lr = unrecognised instruction return address
|
2014-04-22 22:14:29 +07:00
|
|
|
* IRQs enabled, FIQs enabled.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2008-04-19 04:43:08 +07:00
|
|
|
@
|
|
|
|
@ Fall-through from Thumb-2 __und_usr
|
|
|
|
@
|
|
|
|
#ifdef CONFIG_NEON
|
2013-02-24 00:53:52 +07:00
|
|
|
get_thread_info r10 @ get current thread
|
2008-04-19 04:43:08 +07:00
|
|
|
adr r6, .LCneon_thumb_opcodes
|
|
|
|
b 2f
|
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
call_fpe:
|
2013-02-24 00:53:52 +07:00
|
|
|
get_thread_info r10 @ get current thread
|
2008-01-11 01:16:17 +07:00
|
|
|
#ifdef CONFIG_NEON
|
2008-04-19 04:43:08 +07:00
|
|
|
adr r6, .LCneon_arm_opcodes
|
2013-02-24 00:53:52 +07:00
|
|
|
2: ldr r5, [r6], #4 @ mask value
|
2008-01-11 01:16:17 +07:00
|
|
|
ldr r7, [r6], #4 @ opcode bits matching in mask
|
2013-02-24 00:53:52 +07:00
|
|
|
cmp r5, #0 @ end mask?
|
|
|
|
beq 1f
|
|
|
|
and r8, r0, r5
|
2008-01-11 01:16:17 +07:00
|
|
|
cmp r8, r7 @ NEON instruction?
|
|
|
|
bne 2b
|
|
|
|
mov r7, #1
|
|
|
|
strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
|
|
|
|
strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
|
|
|
|
b do_vfp @ let VFP handler handle this
|
|
|
|
1:
|
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
|
2008-04-19 04:43:08 +07:00
|
|
|
tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
|
2014-06-30 22:29:12 +07:00
|
|
|
reteq lr
|
2005-04-17 05:20:36 +07:00
|
|
|
and r8, r0, #0x00000f00 @ mask out CP number
|
2009-07-24 18:32:54 +07:00
|
|
|
THUMB( lsr r8, r8, #8 )
|
2005-04-17 05:20:36 +07:00
|
|
|
mov r7, #1
|
|
|
|
add r6, r10, #TI_USED_CP
|
2009-07-24 18:32:54 +07:00
|
|
|
ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
|
|
|
|
THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
|
2005-04-17 05:20:36 +07:00
|
|
|
#ifdef CONFIG_IWMMXT
|
|
|
|
@ Test if we need to give access to iWMMXt coprocessors
|
|
|
|
ldr r5, [r10, #TI_FLAGS]
|
|
|
|
rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
|
|
|
|
movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
|
|
|
|
bcs iwmmxt_task_enable
|
|
|
|
#endif
|
2009-07-24 18:32:54 +07:00
|
|
|
ARM( add pc, pc, r8, lsr #6 )
|
|
|
|
THUMB( lsl r8, r8, #2 )
|
|
|
|
THUMB( add pc, r8 )
|
|
|
|
nop
|
|
|
|
|
2014-06-30 22:29:12 +07:00
|
|
|
ret.w lr @ CP#0
|
2009-07-24 18:32:54 +07:00
|
|
|
W(b) do_fpe @ CP#1 (FPE)
|
|
|
|
W(b) do_fpe @ CP#2 (FPE)
|
2014-06-30 22:29:12 +07:00
|
|
|
ret.w lr @ CP#3
|
2006-06-28 05:03:03 +07:00
|
|
|
#ifdef CONFIG_CRUNCH
|
|
|
|
b crunch_task_enable @ CP#4 (MaverickCrunch)
|
|
|
|
b crunch_task_enable @ CP#5 (MaverickCrunch)
|
|
|
|
b crunch_task_enable @ CP#6 (MaverickCrunch)
|
|
|
|
#else
|
2014-06-30 22:29:12 +07:00
|
|
|
ret.w lr @ CP#4
|
|
|
|
ret.w lr @ CP#5
|
|
|
|
ret.w lr @ CP#6
|
2006-06-28 05:03:03 +07:00
|
|
|
#endif
|
2014-06-30 22:29:12 +07:00
|
|
|
ret.w lr @ CP#7
|
|
|
|
ret.w lr @ CP#8
|
|
|
|
ret.w lr @ CP#9
|
2005-04-17 05:20:36 +07:00
|
|
|
#ifdef CONFIG_VFP
|
2009-07-24 18:32:54 +07:00
|
|
|
W(b) do_vfp @ CP#10 (VFP)
|
|
|
|
W(b) do_vfp @ CP#11 (VFP)
|
2005-04-17 05:20:36 +07:00
|
|
|
#else
|
2014-06-30 22:29:12 +07:00
|
|
|
ret.w lr @ CP#10 (VFP)
|
|
|
|
ret.w lr @ CP#11 (VFP)
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|
2014-06-30 22:29:12 +07:00
|
|
|
ret.w lr @ CP#12
|
|
|
|
ret.w lr @ CP#13
|
|
|
|
ret.w lr @ CP#14 (Debug)
|
|
|
|
ret.w lr @ CP#15 (Control)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2011-08-20 00:00:08 +07:00
|
|
|
#ifdef NEED_CPU_ARCHITECTURE
|
|
|
|
.align 2
|
|
|
|
.LCcpu_architecture:
|
|
|
|
.word __cpu_architecture
|
|
|
|
#endif
|
|
|
|
|
2008-01-11 01:16:17 +07:00
|
|
|
#ifdef CONFIG_NEON
|
|
|
|
.align 6
|
|
|
|
|
2008-04-19 04:43:08 +07:00
|
|
|
.LCneon_arm_opcodes:
|
2008-01-11 01:16:17 +07:00
|
|
|
.word 0xfe000000 @ mask
|
|
|
|
.word 0xf2000000 @ opcode
|
|
|
|
|
|
|
|
.word 0xff100000 @ mask
|
|
|
|
.word 0xf4000000 @ opcode
|
|
|
|
|
2008-04-19 04:43:08 +07:00
|
|
|
.word 0x00000000 @ mask
|
|
|
|
.word 0x00000000 @ opcode
|
|
|
|
|
|
|
|
.LCneon_thumb_opcodes:
|
|
|
|
.word 0xef000000 @ mask
|
|
|
|
.word 0xef000000 @ opcode
|
|
|
|
|
|
|
|
.word 0xff100000 @ mask
|
|
|
|
.word 0xf9000000 @ opcode
|
|
|
|
|
2008-01-11 01:16:17 +07:00
|
|
|
.word 0x00000000 @ mask
|
|
|
|
.word 0x00000000 @ opcode
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
do_fpe:
|
|
|
|
ldr r4, .LCfp
|
|
|
|
add r10, r10, #TI_FPSTATE @ r10 = workspace
|
|
|
|
ldr pc, [r4] @ Call FP module USR entry point
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The FP module is called with these registers set:
|
|
|
|
* r0 = instruction
|
|
|
|
* r2 = PC+4
|
|
|
|
* r9 = normal "successful" return address
|
|
|
|
* r10 = FP workspace
|
|
|
|
* lr = unrecognised FP instruction return address
|
|
|
|
*/
|
|
|
|
|
2010-04-30 16:45:46 +07:00
|
|
|
.pushsection .data
|
2017-07-26 18:49:31 +07:00
|
|
|
.align 2
|
2005-04-17 05:20:36 +07:00
|
|
|
ENTRY(fp_enter)
|
2007-01-07 05:53:48 +07:00
|
|
|
.word no_fp
|
2010-04-30 16:45:46 +07:00
|
|
|
.popsection
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2009-09-19 05:27:07 +07:00
|
|
|
ENTRY(no_fp)
|
2014-06-30 22:29:12 +07:00
|
|
|
ret lr
|
2009-09-19 05:27:07 +07:00
|
|
|
ENDPROC(no_fp)
|
2007-01-07 05:53:48 +07:00
|
|
|
|
2012-07-31 01:42:10 +07:00
|
|
|
__und_usr_fault_32:
|
|
|
|
mov r1, #4
|
|
|
|
b 1f
|
2015-08-20 16:32:02 +07:00
|
|
|
__und_usr_fault_16_pan:
|
|
|
|
uaccess_disable ip
|
2012-07-31 01:42:10 +07:00
|
|
|
__und_usr_fault_16:
|
|
|
|
mov r1, #2
|
2014-04-22 22:14:29 +07:00
|
|
|
1: mov r0, sp
|
2015-04-21 20:17:25 +07:00
|
|
|
badr lr, ret_from_exception
|
2012-07-31 01:42:10 +07:00
|
|
|
b __und_fault
|
|
|
|
ENDPROC(__und_usr_fault_32)
|
|
|
|
ENDPROC(__und_usr_fault_16)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
.align 5
|
|
|
|
__pabt_usr:
|
2005-06-01 04:22:32 +07:00
|
|
|
usr_entry
|
2009-09-25 19:39:47 +07:00
|
|
|
mov r2, sp @ regs
|
2011-06-26 18:37:35 +07:00
|
|
|
pabt_helper
|
2009-02-16 17:42:09 +07:00
|
|
|
UNWIND(.fnend )
|
2005-04-17 05:20:36 +07:00
|
|
|
/* fall through */
|
|
|
|
/*
|
|
|
|
* This is the return code to user mode for abort handlers
|
|
|
|
*/
|
|
|
|
ENTRY(ret_from_exception)
|
2009-02-16 17:42:09 +07:00
|
|
|
UNWIND(.fnstart )
|
|
|
|
UNWIND(.cantunwind )
|
2005-04-17 05:20:36 +07:00
|
|
|
get_thread_info tsk
|
|
|
|
mov why, #0
|
|
|
|
b ret_to_user
|
2009-02-16 17:42:09 +07:00
|
|
|
UNWIND(.fnend )
|
2008-08-28 17:22:32 +07:00
|
|
|
ENDPROC(__pabt_usr)
|
|
|
|
ENDPROC(ret_from_exception)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-09-17 23:12:06 +07:00
|
|
|
.align 5
|
|
|
|
__fiq_usr:
|
|
|
|
usr_entry trace=0
|
|
|
|
kuser_cmpxchg_check
|
|
|
|
mov r0, sp @ struct pt_regs *regs
|
|
|
|
bl handle_fiq_as_nmi
|
|
|
|
get_thread_info tsk
|
|
|
|
restore_user_regs fast = 0, offset = 0
|
|
|
|
UNWIND(.fnend )
|
|
|
|
ENDPROC(__fiq_usr)
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Register switch for ARMv3 and ARMv4 processors
|
|
|
|
* r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
|
|
|
|
* previous and next are guaranteed not to be the same.
|
|
|
|
*/
|
|
|
|
ENTRY(__switch_to)
|
2009-02-16 17:42:09 +07:00
|
|
|
UNWIND(.fnstart )
|
|
|
|
UNWIND(.cantunwind )
|
2005-04-17 05:20:36 +07:00
|
|
|
add ip, r1, #TI_CPU_SAVE
|
2009-07-24 18:32:54 +07:00
|
|
|
ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
|
|
|
|
THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
|
|
|
|
THUMB( str sp, [ip], #4 )
|
|
|
|
THUMB( str lr, [ip], #4 )
|
2013-06-19 05:23:26 +07:00
|
|
|
ldr r4, [r2, #TI_TP_VALUE]
|
|
|
|
ldr r5, [r2, #TI_TP_VALUE + 4]
|
2010-09-13 22:03:21 +07:00
|
|
|
#ifdef CONFIG_CPU_USE_DOMAINS
|
2015-08-20 03:23:48 +07:00
|
|
|
mrc p15, 0, r6, c3, c0, 0 @ Get domain register
|
|
|
|
str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
|
2006-06-21 19:31:52 +07:00
|
|
|
ldr r6, [r2, #TI_CPU_DOMAIN]
|
2006-01-14 04:05:25 +07:00
|
|
|
#endif
|
2013-06-19 05:23:26 +07:00
|
|
|
switch_tls r1, r4, r5, r3, r7
|
2010-06-08 08:50:33 +07:00
|
|
|
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
|
|
|
ldr r7, [r2, #TI_TASK]
|
|
|
|
ldr r8, =__stack_chk_guard
|
2017-06-30 23:03:59 +07:00
|
|
|
.if (TSK_STACK_CANARY > IMM12_MASK)
|
|
|
|
add r7, r7, #TSK_STACK_CANARY & ~IMM12_MASK
|
|
|
|
.endif
|
|
|
|
ldr r7, [r7, #TSK_STACK_CANARY & IMM12_MASK]
|
2010-06-08 08:50:33 +07:00
|
|
|
#endif
|
2010-09-13 22:03:21 +07:00
|
|
|
#ifdef CONFIG_CPU_USE_DOMAINS
|
2005-04-17 05:20:36 +07:00
|
|
|
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
|
|
|
|
#endif
|
2006-06-21 19:31:52 +07:00
|
|
|
mov r5, r0
|
|
|
|
add r4, r2, #TI_CPU_SAVE
|
|
|
|
ldr r0, =thread_notify_head
|
|
|
|
mov r1, #THREAD_NOTIFY_SWITCH
|
|
|
|
bl atomic_notifier_call_chain
|
2010-06-08 08:50:33 +07:00
|
|
|
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
|
|
|
str r7, [r8]
|
|
|
|
#endif
|
2009-07-24 18:32:54 +07:00
|
|
|
THUMB( mov ip, r4 )
|
2006-06-21 19:31:52 +07:00
|
|
|
mov r0, r5
|
2009-07-24 18:32:54 +07:00
|
|
|
ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
|
|
|
|
THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
|
|
|
|
THUMB( ldr sp, [ip], #4 )
|
|
|
|
THUMB( ldr pc, [ip] )
|
2009-02-16 17:42:09 +07:00
|
|
|
UNWIND(.fnend )
|
2008-08-28 17:22:32 +07:00
|
|
|
ENDPROC(__switch_to)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
__INIT
|
2005-04-30 04:08:33 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* User helpers.
|
|
|
|
*
|
|
|
|
* Each segment is 32-byte aligned and will be moved to the top of the high
|
|
|
|
* vector page. New segments (if ever needed) must be added in front of
|
|
|
|
* existing ones. This mechanism should be used only for things that are
|
|
|
|
* really small and justified, and not be abused freely.
|
|
|
|
*
|
2011-06-20 10:36:03 +07:00
|
|
|
* See Documentation/arm/kernel_user_helpers.txt for formal definitions.
|
2005-04-30 04:08:33 +07:00
|
|
|
*/
|
2009-07-24 18:32:54 +07:00
|
|
|
THUMB( .arm )
|
2005-04-30 04:08:33 +07:00
|
|
|
|
2006-08-18 23:20:15 +07:00
|
|
|
.macro usr_ret, reg
|
|
|
|
#ifdef CONFIG_ARM_THUMB
|
|
|
|
bx \reg
|
|
|
|
#else
|
2014-06-30 22:29:12 +07:00
|
|
|
ret \reg
|
2006-08-18 23:20:15 +07:00
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2013-07-04 17:32:04 +07:00
|
|
|
.macro kuser_pad, sym, size
|
|
|
|
.if (. - \sym) & 3
|
|
|
|
.rept 4 - (. - \sym) & 3
|
|
|
|
.byte 0
|
|
|
|
.endr
|
|
|
|
.endif
|
|
|
|
.rept (\size - (. - \sym)) / 4
|
|
|
|
.word 0xe7fddef1
|
|
|
|
.endr
|
|
|
|
.endm
|
|
|
|
|
2013-07-24 00:37:00 +07:00
|
|
|
#ifdef CONFIG_KUSER_HELPERS
|
2005-04-30 04:08:33 +07:00
|
|
|
.align 5
|
|
|
|
.globl __kuser_helper_start
|
|
|
|
__kuser_helper_start:
|
|
|
|
|
2005-12-20 05:20:51 +07:00
|
|
|
/*
|
2011-06-20 10:36:03 +07:00
|
|
|
* Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
|
|
|
|
* kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
|
2005-12-20 05:20:51 +07:00
|
|
|
*/
|
|
|
|
|
2011-06-20 10:36:03 +07:00
|
|
|
__kuser_cmpxchg64: @ 0xffff0f60
|
|
|
|
|
2015-09-22 01:34:28 +07:00
|
|
|
#if defined(CONFIG_CPU_32v6K)
|
2011-06-20 10:36:03 +07:00
|
|
|
|
|
|
|
stmfd sp!, {r4, r5, r6, r7}
|
|
|
|
ldrd r4, r5, [r0] @ load old val
|
|
|
|
ldrd r6, r7, [r1] @ load new val
|
|
|
|
smp_dmb arm
|
|
|
|
1: ldrexd r0, r1, [r2] @ load current val
|
|
|
|
eors r3, r0, r4 @ compare with oldval (1)
|
|
|
|
eoreqs r3, r1, r5 @ compare with oldval (2)
|
|
|
|
strexdeq r3, r6, r7, [r2] @ store newval if eq
|
|
|
|
teqeq r3, #1 @ success?
|
|
|
|
beq 1b @ if no then retry
|
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 21:39:23 +07:00
|
|
|
smp_dmb arm
|
2011-06-20 10:36:03 +07:00
|
|
|
rsbs r0, r3, #0 @ set returned val and C flag
|
|
|
|
ldmfd sp!, {r4, r5, r6, r7}
|
2012-02-03 17:08:05 +07:00
|
|
|
usr_ret lr
|
2011-06-20 10:36:03 +07:00
|
|
|
|
|
|
|
#elif !defined(CONFIG_SMP)
|
|
|
|
|
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The only thing that can break atomicity in this cmpxchg64
|
|
|
|
* implementation is either an IRQ or a data abort exception
|
|
|
|
* causing another process/thread to be scheduled in the middle of
|
|
|
|
* the critical sequence. The same strategy as for cmpxchg is used.
|
|
|
|
*/
|
|
|
|
stmfd sp!, {r4, r5, r6, lr}
|
|
|
|
ldmia r0, {r4, r5} @ load old val
|
|
|
|
ldmia r1, {r6, lr} @ load new val
|
|
|
|
1: ldmia r2, {r0, r1} @ load current val
|
|
|
|
eors r3, r0, r4 @ compare with oldval (1)
|
|
|
|
eoreqs r3, r1, r5 @ compare with oldval (2)
|
|
|
|
2: stmeqia r2, {r6, lr} @ store newval if eq
|
|
|
|
rsbs r0, r3, #0 @ set return val and C flag
|
|
|
|
ldmfd sp!, {r4, r5, r6, pc}
|
|
|
|
|
|
|
|
.text
|
|
|
|
kuser_cmpxchg64_fixup:
|
|
|
|
@ Called from kuser_cmpxchg_fixup.
|
2011-07-23 05:09:07 +07:00
|
|
|
@ r4 = address of interrupted insn (must be preserved).
|
2011-06-20 10:36:03 +07:00
|
|
|
@ sp = saved regs. r7 and r8 are clobbered.
|
|
|
|
@ 1b = first critical insn, 2b = last critical insn.
|
2011-07-23 05:09:07 +07:00
|
|
|
@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
|
2011-06-20 10:36:03 +07:00
|
|
|
mov r7, #0xffff0fff
|
|
|
|
sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
|
2011-07-23 05:09:07 +07:00
|
|
|
subs r8, r4, r7
|
2011-06-20 10:36:03 +07:00
|
|
|
rsbcss r8, r8, #(2b - 1b)
|
|
|
|
strcs r7, [sp, #S_PC]
|
|
|
|
#if __LINUX_ARM_ARCH__ < 6
|
|
|
|
bcc kuser_cmpxchg32_fixup
|
|
|
|
#endif
|
2014-06-30 22:29:12 +07:00
|
|
|
ret lr
|
2011-06-20 10:36:03 +07:00
|
|
|
.previous
|
|
|
|
|
|
|
|
#else
|
|
|
|
#warning "NPTL on non MMU needs fixing"
|
|
|
|
mov r0, #-1
|
|
|
|
adds r0, r0, #0
|
2006-08-18 23:20:15 +07:00
|
|
|
usr_ret lr
|
2011-06-20 10:36:03 +07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#else
|
|
|
|
#error "incoherent kernel configuration"
|
|
|
|
#endif
|
|
|
|
|
2013-07-04 17:32:04 +07:00
|
|
|
kuser_pad __kuser_cmpxchg64, 64
|
2005-12-20 05:20:51 +07:00
|
|
|
|
|
|
|
__kuser_memory_barrier: @ 0xffff0fa0
|
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 21:39:23 +07:00
|
|
|
smp_dmb arm
|
2006-08-18 23:20:15 +07:00
|
|
|
usr_ret lr
|
2005-12-20 05:20:51 +07:00
|
|
|
|
2013-07-04 17:32:04 +07:00
|
|
|
kuser_pad __kuser_memory_barrier, 32
|
2005-04-30 04:08:33 +07:00
|
|
|
|
|
|
|
__kuser_cmpxchg: @ 0xffff0fc0
|
|
|
|
|
2015-09-22 01:34:28 +07:00
|
|
|
#if __LINUX_ARM_ARCH__ < 6
|
2005-04-30 04:08:33 +07:00
|
|
|
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 23:20:29 +07:00
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
|
2005-04-30 04:08:33 +07:00
|
|
|
/*
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 23:20:29 +07:00
|
|
|
* The only thing that can break atomicity in this cmpxchg
|
|
|
|
* implementation is either an IRQ or a data abort exception
|
|
|
|
* causing another process/thread to be scheduled in the middle
|
|
|
|
* of the critical sequence. To prevent this, code is added to
|
|
|
|
* the IRQ and data abort exception handlers to set the pc back
|
|
|
|
* to the beginning of the critical section if it is found to be
|
|
|
|
* within that critical section (see kuser_cmpxchg_fixup).
|
2005-04-30 04:08:33 +07:00
|
|
|
*/
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 23:20:29 +07:00
|
|
|
1: ldr r3, [r2] @ load current val
|
|
|
|
subs r3, r3, r0 @ compare with oldval
|
|
|
|
2: streq r1, [r2] @ store newval if eq
|
|
|
|
rsbs r0, r3, #0 @ set return val and C flag
|
|
|
|
usr_ret lr
|
|
|
|
|
|
|
|
.text
|
2011-06-20 10:36:03 +07:00
|
|
|
kuser_cmpxchg32_fixup:
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 23:20:29 +07:00
|
|
|
@ Called from kuser_cmpxchg_check macro.
|
2011-06-25 21:44:20 +07:00
|
|
|
@ r4 = address of interrupted insn (must be preserved).
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 23:20:29 +07:00
|
|
|
@ sp = saved regs. r7 and r8 are clobbered.
|
|
|
|
@ 1b = first critical insn, 2b = last critical insn.
|
2011-06-25 21:44:20 +07:00
|
|
|
@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 23:20:29 +07:00
|
|
|
mov r7, #0xffff0fff
|
|
|
|
sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
|
2011-06-25 21:44:20 +07:00
|
|
|
subs r8, r4, r7
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 23:20:29 +07:00
|
|
|
rsbcss r8, r8, #(2b - 1b)
|
|
|
|
strcs r7, [sp, #S_PC]
|
2014-06-30 22:29:12 +07:00
|
|
|
ret lr
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 23:20:29 +07:00
|
|
|
.previous
|
|
|
|
|
2006-02-09 04:19:37 +07:00
|
|
|
#else
|
|
|
|
#warning "NPTL on non MMU needs fixing"
|
|
|
|
mov r0, #-1
|
|
|
|
adds r0, r0, #0
|
2006-08-18 23:20:15 +07:00
|
|
|
usr_ret lr
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 23:20:29 +07:00
|
|
|
#endif
|
2005-04-30 04:08:33 +07:00
|
|
|
|
|
|
|
#else
|
|
|
|
|
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 21:39:23 +07:00
|
|
|
smp_dmb arm
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 23:20:29 +07:00
|
|
|
1: ldrex r3, [r2]
|
2005-04-30 04:08:33 +07:00
|
|
|
subs r3, r3, r0
|
|
|
|
strexeq r3, r1, [r2]
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 23:20:29 +07:00
|
|
|
teqeq r3, #1
|
|
|
|
beq 1b
|
2005-04-30 04:08:33 +07:00
|
|
|
rsbs r0, r3, #0
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-20 23:20:29 +07:00
|
|
|
/* beware -- each __kuser slot must be 8 instructions max */
|
2010-09-04 16:47:48 +07:00
|
|
|
ALT_SMP(b __kuser_memory_barrier)
|
|
|
|
ALT_UP(usr_ret lr)
|
2005-04-30 04:08:33 +07:00
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2013-07-04 17:32:04 +07:00
|
|
|
kuser_pad __kuser_cmpxchg, 32
|
2005-04-30 04:08:33 +07:00
|
|
|
|
|
|
|
__kuser_get_tls: @ 0xffff0fe0
|
2010-07-05 20:53:10 +07:00
|
|
|
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
|
2006-08-18 23:20:15 +07:00
|
|
|
usr_ret lr
|
2010-07-05 20:53:10 +07:00
|
|
|
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
|
2013-07-04 17:32:04 +07:00
|
|
|
kuser_pad __kuser_get_tls, 16
|
|
|
|
.rep 3
|
2010-07-05 20:53:10 +07:00
|
|
|
.word 0 @ 0xffff0ff0 software TLS value, then
|
|
|
|
.endr @ pad up to __kuser_helper_version
|
2005-04-30 04:08:33 +07:00
|
|
|
|
|
|
|
__kuser_helper_version: @ 0xffff0ffc
|
|
|
|
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
|
|
|
|
|
|
|
|
.globl __kuser_helper_end
|
|
|
|
__kuser_helper_end:
|
|
|
|
|
2013-07-24 00:37:00 +07:00
|
|
|
#endif
|
|
|
|
|
2009-07-24 18:32:54 +07:00
|
|
|
THUMB( .thumb )
|
2005-04-30 04:08:33 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Vector stubs.
|
|
|
|
*
|
2013-07-04 17:40:32 +07:00
|
|
|
* This code is copied to 0xffff1000 so we can use branches in the
|
|
|
|
* vectors, rather than ldr's. Note that this code must not exceed
|
|
|
|
* a page size.
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
|
|
|
* Common stub entry macro:
|
|
|
|
* Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
|
2005-06-01 04:22:32 +07:00
|
|
|
*
|
|
|
|
* SP points to a minimal amount of processor-private memory, the address
|
|
|
|
* of which is copied into r0 for the mode specific abort handler.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2005-11-06 21:42:37 +07:00
|
|
|
.macro vector_stub, name, mode, correction=0
|
2005-04-17 05:20:36 +07:00
|
|
|
.align 5
|
|
|
|
|
|
|
|
vector_\name:
|
|
|
|
.if \correction
|
|
|
|
sub lr, lr, #\correction
|
|
|
|
.endif
|
2005-06-01 04:22:32 +07:00
|
|
|
|
|
|
|
@
|
|
|
|
@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
|
|
|
|
@ (parent CPSR)
|
|
|
|
@
|
|
|
|
stmia sp, {r0, lr} @ save r0, lr
|
2005-04-17 05:20:36 +07:00
|
|
|
mrs lr, spsr
|
2005-06-01 04:22:32 +07:00
|
|
|
str lr, [sp, #8] @ save spsr
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
@
|
2005-06-01 04:22:32 +07:00
|
|
|
@ Prepare for SVC32 mode. IRQs remain disabled.
|
2005-04-17 05:20:36 +07:00
|
|
|
@
|
2005-06-01 04:22:32 +07:00
|
|
|
mrs r0, cpsr
|
2009-07-24 18:32:54 +07:00
|
|
|
eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
|
2005-06-01 04:22:32 +07:00
|
|
|
msr spsr_cxsf, r0
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-06-01 04:22:32 +07:00
|
|
|
@
|
|
|
|
@ the branch table must immediately follow this code
|
|
|
|
@
|
|
|
|
and lr, lr, #0x0f
|
2009-07-24 18:32:54 +07:00
|
|
|
THUMB( adr r0, 1f )
|
|
|
|
THUMB( ldr lr, [r0, lr, lsl #2] )
|
2005-11-06 21:42:37 +07:00
|
|
|
mov r0, sp
|
2009-07-24 18:32:54 +07:00
|
|
|
ARM( ldr lr, [pc, lr, lsl #2] )
|
2005-06-01 04:22:32 +07:00
|
|
|
movs pc, lr @ branch to handler in SVC mode
|
2008-08-28 17:22:32 +07:00
|
|
|
ENDPROC(vector_\name)
|
2009-07-24 18:32:52 +07:00
|
|
|
|
|
|
|
.align 2
|
|
|
|
@ handler addresses follow this label
|
|
|
|
1:
|
2005-04-17 05:20:36 +07:00
|
|
|
.endm
|
|
|
|
|
2013-07-04 18:03:31 +07:00
|
|
|
.section .stubs, "ax", %progbits
|
2013-07-04 17:40:32 +07:00
|
|
|
@ This must be the first word
|
|
|
|
.word vector_swi
|
|
|
|
|
|
|
|
vector_rst:
|
|
|
|
ARM( swi SYS_ERROR0 )
|
|
|
|
THUMB( svc #0 )
|
|
|
|
THUMB( nop )
|
|
|
|
b vector_und
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Interrupt dispatcher
|
|
|
|
*/
|
2005-11-06 21:42:37 +07:00
|
|
|
vector_stub irq, IRQ_MODE, 4
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
.long __irq_usr @ 0 (USR_26 / USR_32)
|
|
|
|
.long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
|
|
|
|
.long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
|
|
|
|
.long __irq_svc @ 3 (SVC_26 / SVC_32)
|
|
|
|
.long __irq_invalid @ 4
|
|
|
|
.long __irq_invalid @ 5
|
|
|
|
.long __irq_invalid @ 6
|
|
|
|
.long __irq_invalid @ 7
|
|
|
|
.long __irq_invalid @ 8
|
|
|
|
.long __irq_invalid @ 9
|
|
|
|
.long __irq_invalid @ a
|
|
|
|
.long __irq_invalid @ b
|
|
|
|
.long __irq_invalid @ c
|
|
|
|
.long __irq_invalid @ d
|
|
|
|
.long __irq_invalid @ e
|
|
|
|
.long __irq_invalid @ f
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Data abort dispatcher
|
|
|
|
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC
|
|
|
|
*/
|
2005-11-06 21:42:37 +07:00
|
|
|
vector_stub dabt, ABT_MODE, 8
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
.long __dabt_usr @ 0 (USR_26 / USR_32)
|
|
|
|
.long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
|
|
|
|
.long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
|
|
|
|
.long __dabt_svc @ 3 (SVC_26 / SVC_32)
|
|
|
|
.long __dabt_invalid @ 4
|
|
|
|
.long __dabt_invalid @ 5
|
|
|
|
.long __dabt_invalid @ 6
|
|
|
|
.long __dabt_invalid @ 7
|
|
|
|
.long __dabt_invalid @ 8
|
|
|
|
.long __dabt_invalid @ 9
|
|
|
|
.long __dabt_invalid @ a
|
|
|
|
.long __dabt_invalid @ b
|
|
|
|
.long __dabt_invalid @ c
|
|
|
|
.long __dabt_invalid @ d
|
|
|
|
.long __dabt_invalid @ e
|
|
|
|
.long __dabt_invalid @ f
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prefetch abort dispatcher
|
|
|
|
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC
|
|
|
|
*/
|
2005-11-06 21:42:37 +07:00
|
|
|
vector_stub pabt, ABT_MODE, 4
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
.long __pabt_usr @ 0 (USR_26 / USR_32)
|
|
|
|
.long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
|
|
|
|
.long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
|
|
|
|
.long __pabt_svc @ 3 (SVC_26 / SVC_32)
|
|
|
|
.long __pabt_invalid @ 4
|
|
|
|
.long __pabt_invalid @ 5
|
|
|
|
.long __pabt_invalid @ 6
|
|
|
|
.long __pabt_invalid @ 7
|
|
|
|
.long __pabt_invalid @ 8
|
|
|
|
.long __pabt_invalid @ 9
|
|
|
|
.long __pabt_invalid @ a
|
|
|
|
.long __pabt_invalid @ b
|
|
|
|
.long __pabt_invalid @ c
|
|
|
|
.long __pabt_invalid @ d
|
|
|
|
.long __pabt_invalid @ e
|
|
|
|
.long __pabt_invalid @ f
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Undef instr entry dispatcher
|
|
|
|
* Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
|
|
|
|
*/
|
2005-11-06 21:42:37 +07:00
|
|
|
vector_stub und, UND_MODE
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
.long __und_usr @ 0 (USR_26 / USR_32)
|
|
|
|
.long __und_invalid @ 1 (FIQ_26 / FIQ_32)
|
|
|
|
.long __und_invalid @ 2 (IRQ_26 / IRQ_32)
|
|
|
|
.long __und_svc @ 3 (SVC_26 / SVC_32)
|
|
|
|
.long __und_invalid @ 4
|
|
|
|
.long __und_invalid @ 5
|
|
|
|
.long __und_invalid @ 6
|
|
|
|
.long __und_invalid @ 7
|
|
|
|
.long __und_invalid @ 8
|
|
|
|
.long __und_invalid @ 9
|
|
|
|
.long __und_invalid @ a
|
|
|
|
.long __und_invalid @ b
|
|
|
|
.long __und_invalid @ c
|
|
|
|
.long __und_invalid @ d
|
|
|
|
.long __und_invalid @ e
|
|
|
|
.long __und_invalid @ f
|
|
|
|
|
|
|
|
.align 5
|
|
|
|
|
2013-07-04 17:40:32 +07:00
|
|
|
/*=============================================================================
|
|
|
|
* Address exception handler
|
|
|
|
*-----------------------------------------------------------------------------
|
|
|
|
* These aren't too critical.
|
|
|
|
* (they're not supposed to happen, and won't happen in 32-bit data mode).
|
|
|
|
*/
|
|
|
|
|
|
|
|
vector_addrexcptn:
|
|
|
|
b vector_addrexcptn
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*=============================================================================
|
2014-09-17 23:12:06 +07:00
|
|
|
* FIQ "NMI" handler
|
2005-04-17 05:20:36 +07:00
|
|
|
*-----------------------------------------------------------------------------
|
2014-09-17 23:12:06 +07:00
|
|
|
* Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
|
|
|
|
* systems.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2014-09-17 23:12:06 +07:00
|
|
|
vector_stub fiq, FIQ_MODE, 4
|
|
|
|
|
|
|
|
.long __fiq_usr @ 0 (USR_26 / USR_32)
|
|
|
|
.long __fiq_svc @ 1 (FIQ_26 / FIQ_32)
|
|
|
|
.long __fiq_svc @ 2 (IRQ_26 / IRQ_32)
|
|
|
|
.long __fiq_svc @ 3 (SVC_26 / SVC_32)
|
|
|
|
.long __fiq_svc @ 4
|
|
|
|
.long __fiq_svc @ 5
|
|
|
|
.long __fiq_svc @ 6
|
|
|
|
.long __fiq_abt @ 7
|
|
|
|
.long __fiq_svc @ 8
|
|
|
|
.long __fiq_svc @ 9
|
|
|
|
.long __fiq_svc @ a
|
|
|
|
.long __fiq_svc @ b
|
|
|
|
.long __fiq_svc @ c
|
|
|
|
.long __fiq_svc @ d
|
|
|
|
.long __fiq_svc @ e
|
|
|
|
.long __fiq_svc @ f
|
2005-04-17 05:20:36 +07:00
|
|
|
|
ARM: 8515/2: move .vectors and .stubs sections back into the kernel VMA
Commit b9b32bf70f2f ("ARM: use linker magic for vectors and vector stubs")
updated the linker script to emit the .vectors and .stubs sections into a
VMA range that is zero based and disjoint from the normal static kernel
region. The reason for that was that this way, the sections can be placed
exactly 4 KB apart, while the payload of the .vectors section is only 32
bytes.
Since the symbols that are part of the .stubs section are emitted into the
kallsyms table, they appear with zero based addresses as well, e.g.,
00001004 t vector_rst
00001020 t vector_irq
000010a0 t vector_dabt
00001120 t vector_pabt
000011a0 t vector_und
00001220 t vector_addrexcptn
00001240 t vector_fiq
00001240 T vector_fiq_offset
As this confuses perf when it accesses the kallsyms tables, commit
7122c3e9154b ("scripts/link-vmlinux.sh: only filter kernel symbols for
arm") implemented a somewhat ugly special case for ARM, where the value
of CONFIG_PAGE_OFFSET is passed to scripts/kallsyms, and symbols whose
addresses are below it are filtered out. Note that this special case only
applies to CONFIG_XIP_KERNEL=n, not because the issue the patch addresses
exists only in that case, but because finding a limit below which to apply
the filtering is not entirely straightforward.
Since the .vectors and .stubs sections contain position independent code
that is never executed in place, we can emit it at its most likely runtime
VMA (for more recent CPUs), which is 0xffff0000 for the vector table and
0xffff1000 for the stubs. Not only does this fix the perf issue with
kallsyms, allowing us to drop the special case in scripts/kallsyms
entirely, it also gives debuggers a more realistic view of the address
space, and setting breakpoints or single stepping through code in the
vector table or the stubs is more likely to work as expected on CPUs that
use a high vector address. E.g.,
00001240 A vector_fiq_offset
...
c0c35000 T __init_begin
c0c35000 T __vectors_start
c0c35020 T __stubs_start
c0c35020 T __vectors_end
c0c352e0 T _sinittext
c0c352e0 T __stubs_end
...
ffff1004 t vector_rst
ffff1020 t vector_irq
ffff10a0 t vector_dabt
ffff1120 t vector_pabt
ffff11a0 t vector_und
ffff1220 t vector_addrexcptn
ffff1240 T vector_fiq
(Note that vector_fiq_offset is now an absolute symbol, which kallsyms
already ignores by default)
The LMA footprint is identical with or without this change, only the VMAs
are different:
Before:
Idx Name Size VMA LMA File off Algn
...
14 .notes 00000024 c0c34020 c0c34020 00a34020 2**2
CONTENTS, ALLOC, LOAD, READONLY, CODE
15 .vectors 00000020 00000000 c0c35000 00a40000 2**1
CONTENTS, ALLOC, LOAD, READONLY, CODE
16 .stubs 000002c0 00001000 c0c35020 00a41000 2**5
CONTENTS, ALLOC, LOAD, READONLY, CODE
17 .init.text 0006b1b8 c0c352e0 c0c352e0 00a452e0 2**5
CONTENTS, ALLOC, LOAD, READONLY, CODE
...
After:
Idx Name Size VMA LMA File off Algn
...
14 .notes 00000024 c0c34020 c0c34020 00a34020 2**2
CONTENTS, ALLOC, LOAD, READONLY, CODE
15 .vectors 00000020 ffff0000 c0c35000 00a40000 2**1
CONTENTS, ALLOC, LOAD, READONLY, CODE
16 .stubs 000002c0 ffff1000 c0c35020 00a41000 2**5
CONTENTS, ALLOC, LOAD, READONLY, CODE
17 .init.text 0006b1b8 c0c352e0 c0c352e0 00a452e0 2**5
CONTENTS, ALLOC, LOAD, READONLY, CODE
...
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Chris Brandt <chris.brandt@renesas.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2016-02-10 17:41:08 +07:00
|
|
|
.globl vector_fiq
|
2013-07-09 07:03:17 +07:00
|
|
|
|
2013-07-04 18:03:31 +07:00
|
|
|
.section .vectors, "ax", %progbits
|
2016-02-05 16:04:47 +07:00
|
|
|
.L__vectors_start:
|
2013-07-04 18:03:31 +07:00
|
|
|
W(b) vector_rst
|
|
|
|
W(b) vector_und
|
2016-02-05 16:04:47 +07:00
|
|
|
W(ldr) pc, .L__vectors_start + 0x1000
|
2013-07-04 18:03:31 +07:00
|
|
|
W(b) vector_pabt
|
|
|
|
W(b) vector_dabt
|
|
|
|
W(b) vector_addrexcptn
|
|
|
|
W(b) vector_irq
|
|
|
|
W(b) vector_fiq
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
.data
|
2017-07-26 18:49:31 +07:00
|
|
|
.align 2
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
.globl cr_alignment
|
|
|
|
cr_alignment:
|
|
|
|
.space 4
|
2010-12-13 15:42:34 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
|
|
|
.globl handle_arch_irq
|
|
|
|
handle_arch_irq:
|
|
|
|
.space 4
|
|
|
|
#endif
|