mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-25 11:22:33 +07:00
8ad8b72721
This patch ports the feature Kernel Address SANitizer (KASAN). Note: The start address of shadow memory is at the beginning of kernel space, which is 2^64 - (2^39 / 2) in SV39. The size of the kernel space is 2^38 bytes so the size of shadow memory should be 2^38 / 8. Thus, the shadow memory would not overlap with the fixmap area. There are currently two limitations in this port, 1. RV64 only: KASAN need large address space for extra shadow memory region. 2. KASAN can't debug the modules since the modules are allocated in VMALLOC area. We mapped the shadow memory, which corresponding to VMALLOC area, to the kasan_early_shadow_page because we don't have enough physical space for all the shadow memory corresponding to VMALLOC area. Signed-off-by: Nick Hu <nickhu@andestech.com> Reported-by: Greentime Hu <green.hu@gmail.com> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
109 lines
2.1 KiB
ArmAsm
109 lines
2.1 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2013 Regents of the University of California
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/asm.h>
|
|
|
|
/* void *memcpy(void *, const void *, size_t) */
|
|
ENTRY(__memcpy)
|
|
WEAK(memcpy)
|
|
move t6, a0 /* Preserve return value */
|
|
|
|
/* Defer to byte-oriented copy for small sizes */
|
|
sltiu a3, a2, 128
|
|
bnez a3, 4f
|
|
/* Use word-oriented copy only if low-order bits match */
|
|
andi a3, t6, SZREG-1
|
|
andi a4, a1, SZREG-1
|
|
bne a3, a4, 4f
|
|
|
|
beqz a3, 2f /* Skip if already aligned */
|
|
/*
|
|
* Round to nearest double word-aligned address
|
|
* greater than or equal to start address
|
|
*/
|
|
andi a3, a1, ~(SZREG-1)
|
|
addi a3, a3, SZREG
|
|
/* Handle initial misalignment */
|
|
sub a4, a3, a1
|
|
1:
|
|
lb a5, 0(a1)
|
|
addi a1, a1, 1
|
|
sb a5, 0(t6)
|
|
addi t6, t6, 1
|
|
bltu a1, a3, 1b
|
|
sub a2, a2, a4 /* Update count */
|
|
|
|
2:
|
|
andi a4, a2, ~((16*SZREG)-1)
|
|
beqz a4, 4f
|
|
add a3, a1, a4
|
|
3:
|
|
REG_L a4, 0(a1)
|
|
REG_L a5, SZREG(a1)
|
|
REG_L a6, 2*SZREG(a1)
|
|
REG_L a7, 3*SZREG(a1)
|
|
REG_L t0, 4*SZREG(a1)
|
|
REG_L t1, 5*SZREG(a1)
|
|
REG_L t2, 6*SZREG(a1)
|
|
REG_L t3, 7*SZREG(a1)
|
|
REG_L t4, 8*SZREG(a1)
|
|
REG_L t5, 9*SZREG(a1)
|
|
REG_S a4, 0(t6)
|
|
REG_S a5, SZREG(t6)
|
|
REG_S a6, 2*SZREG(t6)
|
|
REG_S a7, 3*SZREG(t6)
|
|
REG_S t0, 4*SZREG(t6)
|
|
REG_S t1, 5*SZREG(t6)
|
|
REG_S t2, 6*SZREG(t6)
|
|
REG_S t3, 7*SZREG(t6)
|
|
REG_S t4, 8*SZREG(t6)
|
|
REG_S t5, 9*SZREG(t6)
|
|
REG_L a4, 10*SZREG(a1)
|
|
REG_L a5, 11*SZREG(a1)
|
|
REG_L a6, 12*SZREG(a1)
|
|
REG_L a7, 13*SZREG(a1)
|
|
REG_L t0, 14*SZREG(a1)
|
|
REG_L t1, 15*SZREG(a1)
|
|
addi a1, a1, 16*SZREG
|
|
REG_S a4, 10*SZREG(t6)
|
|
REG_S a5, 11*SZREG(t6)
|
|
REG_S a6, 12*SZREG(t6)
|
|
REG_S a7, 13*SZREG(t6)
|
|
REG_S t0, 14*SZREG(t6)
|
|
REG_S t1, 15*SZREG(t6)
|
|
addi t6, t6, 16*SZREG
|
|
bltu a1, a3, 3b
|
|
andi a2, a2, (16*SZREG)-1 /* Update count */
|
|
|
|
4:
|
|
/* Handle trailing misalignment */
|
|
beqz a2, 6f
|
|
add a3, a1, a2
|
|
|
|
/* Use word-oriented copy if co-aligned to word boundary */
|
|
or a5, a1, t6
|
|
or a5, a5, a3
|
|
andi a5, a5, 3
|
|
bnez a5, 5f
|
|
7:
|
|
lw a4, 0(a1)
|
|
addi a1, a1, 4
|
|
sw a4, 0(t6)
|
|
addi t6, t6, 4
|
|
bltu a1, a3, 7b
|
|
|
|
ret
|
|
|
|
5:
|
|
lb a4, 0(a1)
|
|
addi a1, a1, 1
|
|
sb a4, 0(t6)
|
|
addi t6, t6, 1
|
|
bltu a1, a3, 5b
|
|
6:
|
|
ret
|
|
END(__memcpy)
|