mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 07:26:16 +07:00
8a7f97b902
Add check for the return value of memblock_alloc*() functions and call panic() in case of error. The panic message repeats the one used by panicing memblock allocators with adjustment of parameters to include only relevant ones. The replacement was mostly automated with semantic patches like the one below with manual massaging of format strings. @@ expression ptr, size, align; @@ ptr = memblock_alloc(size, align); + if (!ptr) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, size, align); [anders.roxell@linaro.org: use '%pa' with 'phys_addr_t' type] Link: http://lkml.kernel.org/r/20190131161046.21886-1-anders.roxell@linaro.org [rppt@linux.ibm.com: fix format strings for panics after memblock_alloc] Link: http://lkml.kernel.org/r/1548950940-15145-1-git-send-email-rppt@linux.ibm.com [rppt@linux.ibm.com: don't panic if the allocation in sparse_buffer_init fails] Link: http://lkml.kernel.org/r/20190131074018.GD28876@rapoport-lnx [akpm@linux-foundation.org: fix xtensa printk warning] Link: http://lkml.kernel.org/r/1548057848-15136-20-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Anders Roxell <anders.roxell@linaro.org> Reviewed-by: Guo Ren <ren_guo@c-sky.com> [c-sky] Acked-by: Paul Burton <paul.burton@mips.com> [MIPS] Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> [s390] Reviewed-by: Juergen Gross <jgross@suse.com> [Xen] Reviewed-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k] Acked-by: Max Filippov <jcmvbkbc@gmail.com> [xtensa] Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Christoph Hellwig <hch@lst.de> Cc: "David S. Miller" <davem@davemloft.net> Cc: Dennis Zhou <dennis@kernel.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Mark Salter <msalter@redhat.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Petr Mladek <pmladek@suse.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Rob Herring <robh+dt@kernel.org> Cc: Rob Herring <robh@kernel.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
170 lines
3.8 KiB
C
170 lines
3.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* NUMA support for s390
|
|
*
|
|
* Implement NUMA core code.
|
|
*
|
|
* Copyright IBM Corp. 2015
|
|
*/
|
|
|
|
#define KMSG_COMPONENT "numa"
|
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/mmzone.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/node.h>
|
|
|
|
#include <asm/numa.h>
|
|
#include "numa_mode.h"
|
|
|
|
pg_data_t *node_data[MAX_NUMNODES];
|
|
EXPORT_SYMBOL(node_data);
|
|
|
|
cpumask_t node_to_cpumask_map[MAX_NUMNODES];
|
|
EXPORT_SYMBOL(node_to_cpumask_map);
|
|
|
|
static void plain_setup(void)
|
|
{
|
|
node_set(0, node_possible_map);
|
|
}
|
|
|
|
const struct numa_mode numa_mode_plain = {
|
|
.name = "plain",
|
|
.setup = plain_setup,
|
|
};
|
|
|
|
static const struct numa_mode *mode = &numa_mode_plain;
|
|
|
|
int numa_pfn_to_nid(unsigned long pfn)
|
|
{
|
|
return mode->__pfn_to_nid ? mode->__pfn_to_nid(pfn) : 0;
|
|
}
|
|
|
|
void numa_update_cpu_topology(void)
|
|
{
|
|
if (mode->update_cpu_topology)
|
|
mode->update_cpu_topology();
|
|
}
|
|
|
|
int __node_distance(int a, int b)
|
|
{
|
|
return mode->distance ? mode->distance(a, b) : 0;
|
|
}
|
|
EXPORT_SYMBOL(__node_distance);
|
|
|
|
int numa_debug_enabled;
|
|
|
|
/*
|
|
* numa_setup_memory() - Assign bootmem to nodes
|
|
*
|
|
* The memory is first added to memblock without any respect to nodes.
|
|
* This is fixed before remaining memblock memory is handed over to the
|
|
* buddy allocator.
|
|
* An important side effect is that large bootmem allocations might easily
|
|
* cross node boundaries, which can be needed for large allocations with
|
|
* smaller memory stripes in each node (i.e. when using NUMA emulation).
|
|
*
|
|
* Memory defines nodes:
|
|
* Therefore this routine also sets the nodes online with memory.
|
|
*/
|
|
static void __init numa_setup_memory(void)
|
|
{
|
|
unsigned long cur_base, align, end_of_dram;
|
|
int nid = 0;
|
|
|
|
end_of_dram = memblock_end_of_DRAM();
|
|
align = mode->align ? mode->align() : ULONG_MAX;
|
|
|
|
/*
|
|
* Step through all available memory and assign it to the nodes
|
|
* indicated by the mode implementation.
|
|
* All nodes which are seen here will be set online.
|
|
*/
|
|
cur_base = 0;
|
|
do {
|
|
nid = numa_pfn_to_nid(PFN_DOWN(cur_base));
|
|
node_set_online(nid);
|
|
memblock_set_node(cur_base, align, &memblock.memory, nid);
|
|
cur_base += align;
|
|
} while (cur_base < end_of_dram);
|
|
|
|
/* Allocate and fill out node_data */
|
|
for (nid = 0; nid < MAX_NUMNODES; nid++) {
|
|
NODE_DATA(nid) = memblock_alloc(sizeof(pg_data_t), 8);
|
|
if (!NODE_DATA(nid))
|
|
panic("%s: Failed to allocate %zu bytes align=0x%x\n",
|
|
__func__, sizeof(pg_data_t), 8);
|
|
}
|
|
|
|
for_each_online_node(nid) {
|
|
unsigned long start_pfn, end_pfn;
|
|
unsigned long t_start, t_end;
|
|
int i;
|
|
|
|
start_pfn = ULONG_MAX;
|
|
end_pfn = 0;
|
|
for_each_mem_pfn_range(i, nid, &t_start, &t_end, NULL) {
|
|
if (t_start < start_pfn)
|
|
start_pfn = t_start;
|
|
if (t_end > end_pfn)
|
|
end_pfn = t_end;
|
|
}
|
|
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
|
|
NODE_DATA(nid)->node_id = nid;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* numa_setup() - Earliest initialization
|
|
*
|
|
* Assign the mode and call the mode's setup routine.
|
|
*/
|
|
void __init numa_setup(void)
|
|
{
|
|
pr_info("NUMA mode: %s\n", mode->name);
|
|
nodes_clear(node_possible_map);
|
|
/* Initially attach all possible CPUs to node 0. */
|
|
cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
|
|
if (mode->setup)
|
|
mode->setup();
|
|
numa_setup_memory();
|
|
memblock_dump_all();
|
|
}
|
|
|
|
/*
|
|
* numa_init_late() - Initialization initcall
|
|
*
|
|
* Register NUMA nodes.
|
|
*/
|
|
static int __init numa_init_late(void)
|
|
{
|
|
int nid;
|
|
|
|
for_each_online_node(nid)
|
|
register_one_node(nid);
|
|
return 0;
|
|
}
|
|
arch_initcall(numa_init_late);
|
|
|
|
static int __init parse_debug(char *parm)
|
|
{
|
|
numa_debug_enabled = 1;
|
|
return 0;
|
|
}
|
|
early_param("numa_debug", parse_debug);
|
|
|
|
static int __init parse_numa(char *parm)
|
|
{
|
|
if (strcmp(parm, numa_mode_plain.name) == 0)
|
|
mode = &numa_mode_plain;
|
|
#ifdef CONFIG_NUMA_EMU
|
|
if (strcmp(parm, numa_mode_emu.name) == 0)
|
|
mode = &numa_mode_emu;
|
|
#endif
|
|
return 0;
|
|
}
|
|
early_param("numa", parse_numa);
|