mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-04-10 02:48:06 +07:00
[S390] Change vmalloc defintions
Currently the vmalloc area starts at a dynamic address depending on the memory size. There was also an 8MB security hole after the physical memory to catch out-of-bounds accesses. We can simplify the code by putting the vmalloc area explicitely at the top of the kernel mapping and setting the vmalloc size to a fixed value of 128MB/128GB for 31bit/64bit systems. Part of the vmalloc area will be used for the vmem_map. This leaves an area of 96MB/1GB for normal vmalloc allocations. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
8ffd74a092
commit
5fd9c6e214
@ -617,7 +617,7 @@ EXPORT_SYMBOL_GPL(real_memory_size);
|
|||||||
static void __init setup_memory_end(void)
|
static void __init setup_memory_end(void)
|
||||||
{
|
{
|
||||||
unsigned long memory_size;
|
unsigned long memory_size;
|
||||||
unsigned long max_mem, max_phys;
|
unsigned long max_mem;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
|
#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
|
||||||
@ -625,10 +625,10 @@ static void __init setup_memory_end(void)
|
|||||||
memory_end = ZFCPDUMP_HSA_SIZE;
|
memory_end = ZFCPDUMP_HSA_SIZE;
|
||||||
#endif
|
#endif
|
||||||
memory_size = 0;
|
memory_size = 0;
|
||||||
max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE;
|
|
||||||
memory_end &= PAGE_MASK;
|
memory_end &= PAGE_MASK;
|
||||||
|
|
||||||
max_mem = memory_end ? min(max_phys, memory_end) : max_phys;
|
max_mem = memory_end ? min(VMALLOC_START, memory_end) : VMALLOC_START;
|
||||||
|
memory_end = min(max_mem, memory_end);
|
||||||
|
|
||||||
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
||||||
struct mem_chunk *chunk = &memory_chunk[i];
|
struct mem_chunk *chunk = &memory_chunk[i];
|
||||||
|
@ -15,10 +15,6 @@
|
|||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
|
|
||||||
unsigned long vmalloc_end;
|
|
||||||
EXPORT_SYMBOL(vmalloc_end);
|
|
||||||
|
|
||||||
static struct page *vmem_map;
|
|
||||||
static DEFINE_MUTEX(vmem_mutex);
|
static DEFINE_MUTEX(vmem_mutex);
|
||||||
|
|
||||||
struct memory_segment {
|
struct memory_segment {
|
||||||
@ -188,8 +184,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
|
|||||||
pte_t pte;
|
pte_t pte;
|
||||||
int ret = -ENOMEM;
|
int ret = -ENOMEM;
|
||||||
|
|
||||||
map_start = vmem_map + PFN_DOWN(start);
|
map_start = VMEM_MAP + PFN_DOWN(start);
|
||||||
map_end = vmem_map + PFN_DOWN(start + size);
|
map_end = VMEM_MAP + PFN_DOWN(start + size);
|
||||||
|
|
||||||
start_addr = (unsigned long) map_start & PAGE_MASK;
|
start_addr = (unsigned long) map_start & PAGE_MASK;
|
||||||
end_addr = PFN_ALIGN((unsigned long) map_end);
|
end_addr = PFN_ALIGN((unsigned long) map_end);
|
||||||
@ -254,7 +250,7 @@ static int insert_memory_segment(struct memory_segment *seg)
|
|||||||
{
|
{
|
||||||
struct memory_segment *tmp;
|
struct memory_segment *tmp;
|
||||||
|
|
||||||
if (PFN_DOWN(seg->start + seg->size) > max_pfn ||
|
if (seg->start + seg->size >= VMALLOC_START ||
|
||||||
seg->start + seg->size < seg->start)
|
seg->start + seg->size < seg->start)
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
|
|
||||||
@ -357,17 +353,15 @@ int add_shared_memory(unsigned long start, unsigned long size)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* map whole physical memory to virtual memory (identity mapping)
|
* map whole physical memory to virtual memory (identity mapping)
|
||||||
|
* we reserve enough space in the vmalloc area for vmemmap to hotplug
|
||||||
|
* additional memory segments.
|
||||||
*/
|
*/
|
||||||
void __init vmem_map_init(void)
|
void __init vmem_map_init(void)
|
||||||
{
|
{
|
||||||
unsigned long map_size;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page);
|
BUILD_BUG_ON((unsigned long)VMEM_MAP + VMEM_MAP_SIZE > VMEM_MAP_MAX);
|
||||||
vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size);
|
NODE_DATA(0)->node_mem_map = VMEM_MAP;
|
||||||
vmem_map = (struct page *) vmalloc_end;
|
|
||||||
NODE_DATA(0)->node_mem_map = vmem_map;
|
|
||||||
|
|
||||||
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
|
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
|
||||||
vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
|
vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
|
||||||
}
|
}
|
||||||
|
@ -104,41 +104,27 @@ extern char empty_zero_page[PAGE_SIZE];
|
|||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
/*
|
/*
|
||||||
* Just any arbitrary offset to the start of the vmalloc VM area: the
|
* The vmalloc area will always be on the topmost area of the kernel
|
||||||
* current 8MB value just means that there will be a 8MB "hole" after the
|
* mapping. We reserve 96MB (31bit) / 1GB (64bit) for vmalloc,
|
||||||
* physical memory until the kernel virtual memory starts. That means that
|
* which should be enough for any sane case.
|
||||||
* any out-of-bounds memory accesses will hopefully be caught.
|
* By putting vmalloc at the top, we maximise the gap between physical
|
||||||
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
|
* memory and vmalloc to catch misplaced memory accesses. As a side
|
||||||
* area for the same reason. ;)
|
* effect, this also makes sure that 64 bit module code cannot be used
|
||||||
* vmalloc area starts at 4GB to prevent syscall table entry exchanging
|
* as system call address.
|
||||||
* from modules.
|
|
||||||
*/
|
|
||||||
extern unsigned long vmalloc_end;
|
|
||||||
|
|
||||||
#ifdef CONFIG_64BIT
|
|
||||||
#define VMALLOC_ADDR (max(0x100000000UL, (unsigned long) high_memory))
|
|
||||||
#else
|
|
||||||
#define VMALLOC_ADDR ((unsigned long) high_memory)
|
|
||||||
#endif
|
|
||||||
#define VMALLOC_OFFSET (8*1024*1024)
|
|
||||||
#define VMALLOC_START ((VMALLOC_ADDR + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
|
|
||||||
#define VMALLOC_END vmalloc_end
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We need some free virtual space to be able to do vmalloc.
|
|
||||||
* VMALLOC_MIN_SIZE defines the minimum size of the vmalloc
|
|
||||||
* area. On a machine with 2GB memory we make sure that we
|
|
||||||
* have at least 128MB free space for vmalloc. On a machine
|
|
||||||
* with 4TB we make sure we have at least 128GB.
|
|
||||||
*/
|
*/
|
||||||
#ifndef __s390x__
|
#ifndef __s390x__
|
||||||
#define VMALLOC_MIN_SIZE 0x8000000UL
|
#define VMALLOC_START 0x78000000UL
|
||||||
#define VMALLOC_END_INIT 0x80000000UL
|
#define VMALLOC_END 0x7e000000UL
|
||||||
|
#define VMEM_MAP_MAX 0x80000000UL
|
||||||
#else /* __s390x__ */
|
#else /* __s390x__ */
|
||||||
#define VMALLOC_MIN_SIZE 0x2000000000UL
|
#define VMALLOC_START 0x3e000000000UL
|
||||||
#define VMALLOC_END_INIT 0x40000000000UL
|
#define VMALLOC_END 0x3e040000000UL
|
||||||
|
#define VMEM_MAP_MAX 0x40000000000UL
|
||||||
#endif /* __s390x__ */
|
#endif /* __s390x__ */
|
||||||
|
|
||||||
|
#define VMEM_MAP ((struct page *) VMALLOC_END)
|
||||||
|
#define VMEM_MAP_SIZE ((VMALLOC_START / PAGE_SIZE) * sizeof(struct page))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A 31 bit pagetable entry of S390 has following format:
|
* A 31 bit pagetable entry of S390 has following format:
|
||||||
* | PFRA | | OS |
|
* | PFRA | | OS |
|
||||||
|
Loading…
Reference in New Issue
Block a user