mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-13 21:46:53 +07:00
a3c0f84765
Spectre variant 1 attacks are about this sequence of pseudo-code: index = load(user-manipulated pointer); access(base + index * stride); In order for the cache side-channel to work, the access() must me made to memory which userspace can detect whether cache lines have been loaded. On 32-bit ARM, this must be either user accessible memory, or a kernel mapping of that same user accessible memory. The problem occurs when the load() speculatively loads privileged data, and the subsequent access() is made to user accessible memory. Any load() which makes use of a user-maniplated pointer is a potential problem if the data it has loaded is used in a subsequent access. This also applies for the access() if the data loaded by that access is used by a subsequent access. Harden the get_user() accessors against Spectre attacks by forcing out of bounds addresses to a NULL pointer. This prevents get_user() being used as the load() step above. As a side effect, put_user() will also be affected even though it isn't implicated. Also harden copy_from_user() by redoing the bounds check within the arm_copy_from_user() code, and NULLing the pointer if out of bounds. Acked-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
116 lines
2.2 KiB
ArmAsm
116 lines
2.2 KiB
ArmAsm
/*
|
|
* linux/arch/arm/lib/copy_from_user.S
|
|
*
|
|
* Author: Nicolas Pitre
|
|
* Created: Sep 29, 2005
|
|
* Copyright: MontaVista Software, Inc.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/assembler.h>
|
|
#include <asm/unwind.h>
|
|
|
|
/*
|
|
* Prototype:
|
|
*
|
|
* size_t arm_copy_from_user(void *to, const void *from, size_t n)
|
|
*
|
|
* Purpose:
|
|
*
|
|
* copy a block to kernel memory from user memory
|
|
*
|
|
* Params:
|
|
*
|
|
* to = kernel memory
|
|
* from = user memory
|
|
* n = number of bytes to copy
|
|
*
|
|
* Return value:
|
|
*
|
|
* Number of bytes NOT copied.
|
|
*/
|
|
|
|
#ifndef CONFIG_THUMB2_KERNEL
|
|
#define LDR1W_SHIFT 0
|
|
#else
|
|
#define LDR1W_SHIFT 1
|
|
#endif
|
|
#define STR1W_SHIFT 0
|
|
|
|
.macro ldr1w ptr reg abort
|
|
ldrusr \reg, \ptr, 4, abort=\abort
|
|
.endm
|
|
|
|
.macro ldr4w ptr reg1 reg2 reg3 reg4 abort
|
|
ldr1w \ptr, \reg1, \abort
|
|
ldr1w \ptr, \reg2, \abort
|
|
ldr1w \ptr, \reg3, \abort
|
|
ldr1w \ptr, \reg4, \abort
|
|
.endm
|
|
|
|
.macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
|
|
ldr4w \ptr, \reg1, \reg2, \reg3, \reg4, \abort
|
|
ldr4w \ptr, \reg5, \reg6, \reg7, \reg8, \abort
|
|
.endm
|
|
|
|
.macro ldr1b ptr reg cond=al abort
|
|
ldrusr \reg, \ptr, 1, \cond, abort=\abort
|
|
.endm
|
|
|
|
.macro str1w ptr reg abort
|
|
W(str) \reg, [\ptr], #4
|
|
.endm
|
|
|
|
.macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
|
|
stmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8}
|
|
.endm
|
|
|
|
.macro str1b ptr reg cond=al abort
|
|
str\cond\()b \reg, [\ptr], #1
|
|
.endm
|
|
|
|
.macro enter reg1 reg2
|
|
mov r3, #0
|
|
stmdb sp!, {r0, r2, r3, \reg1, \reg2}
|
|
.endm
|
|
|
|
.macro usave reg1 reg2
|
|
UNWIND( .save {r0, r2, r3, \reg1, \reg2} )
|
|
.endm
|
|
|
|
.macro exit reg1 reg2
|
|
add sp, sp, #8
|
|
ldmfd sp!, {r0, \reg1, \reg2}
|
|
.endm
|
|
|
|
.text
|
|
|
|
ENTRY(arm_copy_from_user)
|
|
#ifdef CONFIG_CPU_SPECTRE
|
|
get_thread_info r3
|
|
ldr r3, [r3, #TI_ADDR_LIMIT]
|
|
adds ip, r1, r2 @ ip=addr+size
|
|
sub r3, r3, #1 @ addr_limit - 1
|
|
cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
|
|
movcs r1, #0 @ addr = NULL
|
|
csdb
|
|
#endif
|
|
|
|
#include "copy_template.S"
|
|
|
|
ENDPROC(arm_copy_from_user)
|
|
|
|
.pushsection .fixup,"ax"
|
|
.align 0
|
|
copy_abort_preamble
|
|
ldmfd sp!, {r1, r2, r3}
|
|
sub r0, r0, r1
|
|
rsb r0, r0, r2
|
|
copy_abort_end
|
|
.popsection
|
|
|