mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-19 13:36:20 +07:00
[S390] Get rid of HOLES_IN_ZONE requirement.
Align everything to MAX_ORDER so we can get rid of the extra checks. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
5c699714d0
commit
9f4b0ba81f
@ -276,9 +276,6 @@ source "kernel/Kconfig.preempt"
|
|||||||
|
|
||||||
source "mm/Kconfig"
|
source "mm/Kconfig"
|
||||||
|
|
||||||
config HOLES_IN_ZONE
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
comment "I/O subsystem configuration"
|
comment "I/O subsystem configuration"
|
||||||
|
|
||||||
config MACHCHK_WARNING
|
config MACHCHK_WARNING
|
||||||
|
@ -559,7 +559,9 @@ setup_resources(void)
|
|||||||
data_resource.start = (unsigned long) &_etext;
|
data_resource.start = (unsigned long) &_etext;
|
||||||
data_resource.end = (unsigned long) &_edata - 1;
|
data_resource.end = (unsigned long) &_edata - 1;
|
||||||
|
|
||||||
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
|
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
||||||
|
if (!memory_chunk[i].size)
|
||||||
|
continue;
|
||||||
res = alloc_bootmem_low(sizeof(struct resource));
|
res = alloc_bootmem_low(sizeof(struct resource));
|
||||||
res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
|
res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
|
||||||
switch (memory_chunk[i].type) {
|
switch (memory_chunk[i].type) {
|
||||||
@ -630,6 +632,27 @@ static void __init setup_memory_end(void)
|
|||||||
max_mem = memory_end ? min(VMALLOC_START, memory_end) : VMALLOC_START;
|
max_mem = memory_end ? min(VMALLOC_START, memory_end) : VMALLOC_START;
|
||||||
memory_end = min(max_mem, memory_end);
|
memory_end = min(max_mem, memory_end);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure all chunks are MAX_ORDER aligned so we don't need the
|
||||||
|
* extra checks that HOLES_IN_ZONE would require.
|
||||||
|
*/
|
||||||
|
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
||||||
|
unsigned long start, end;
|
||||||
|
struct mem_chunk *chunk;
|
||||||
|
unsigned long align;
|
||||||
|
|
||||||
|
chunk = &memory_chunk[i];
|
||||||
|
align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
|
||||||
|
start = (chunk->addr + align - 1) & ~(align - 1);
|
||||||
|
end = (chunk->addr + chunk->size) & ~(align - 1);
|
||||||
|
if (start >= end)
|
||||||
|
memset(chunk, 0, sizeof(*chunk));
|
||||||
|
else {
|
||||||
|
chunk->addr = start;
|
||||||
|
chunk->size = end - start;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
||||||
struct mem_chunk *chunk = &memory_chunk[i];
|
struct mem_chunk *chunk = &memory_chunk[i];
|
||||||
|
|
||||||
|
@ -376,7 +376,7 @@ static int __init vmem_convert_memory_chunk(void)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
mutex_lock(&vmem_mutex);
|
mutex_lock(&vmem_mutex);
|
||||||
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
|
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
||||||
if (!memory_chunk[i].size)
|
if (!memory_chunk[i].size)
|
||||||
continue;
|
continue;
|
||||||
seg = kzalloc(sizeof(*seg), GFP_KERNEL);
|
seg = kzalloc(sizeof(*seg), GFP_KERNEL);
|
||||||
|
Loading…
Reference in New Issue
Block a user