2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1995 Linus Torvalds
|
|
|
|
* Copyright (C) 1995 Waldorf Electronics
|
|
|
|
* Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
|
|
|
|
* Copyright (C) 1996 Stoned Elipot
|
|
|
|
* Copyright (C) 1999 Silicon Graphics, Inc.
|
2013-01-22 18:59:30 +07:00
|
|
|
* Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/ioport.h>
|
2011-07-24 03:30:40 +07:00
|
|
|
#include <linux/export.h>
|
2006-07-10 18:44:13 +07:00
|
|
|
#include <linux/screen_info.h>
|
2011-12-09 01:22:09 +07:00
|
|
|
#include <linux/memblock.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/initrd.h>
|
|
|
|
#include <linux/root_dev.h>
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/console.h>
|
2006-03-27 16:16:04 +07:00
|
|
|
#include <linux/pfn.h>
|
2007-06-29 22:55:48 +07:00
|
|
|
#include <linux/debugfs.h>
|
2012-10-11 23:14:58 +07:00
|
|
|
#include <linux/kexec.h>
|
2013-04-13 18:15:47 +07:00
|
|
|
#include <linux/sizes.h>
|
2014-07-16 22:51:32 +07:00
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/dma-contiguous.h>
|
2016-05-11 05:50:03 +07:00
|
|
|
#include <linux/decompress/generic.h>
|
2016-11-23 20:43:46 +07:00
|
|
|
#include <linux/of_fdt.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#include <asm/addrspace.h>
|
|
|
|
#include <asm/bootinfo.h>
|
2007-10-23 18:43:11 +07:00
|
|
|
#include <asm/bugs.h>
|
2005-07-13 18:48:45 +07:00
|
|
|
#include <asm/cache.h>
|
2015-01-29 18:14:13 +07:00
|
|
|
#include <asm/cdmm.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <asm/cpu.h>
|
2015-09-23 00:10:55 +07:00
|
|
|
#include <asm/debug.h>
|
2018-06-15 18:08:45 +07:00
|
|
|
#include <asm/dma-coherence.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <asm/sections.h>
|
|
|
|
#include <asm/setup.h>
|
2007-11-19 19:23:51 +07:00
|
|
|
#include <asm/smp-ops.h>
|
2010-10-13 13:52:46 +07:00
|
|
|
#include <asm/prom.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2015-09-11 21:46:14 +07:00
|
|
|
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
|
|
|
|
const char __section(.appended_dtb) __appended_dtb[0x100000];
|
|
|
|
#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
|
|
|
|
|
2005-07-13 18:48:45 +07:00
|
|
|
struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
EXPORT_SYMBOL(cpu_data);
|
|
|
|
|
|
|
|
#ifdef CONFIG_VT
|
|
|
|
struct screen_info screen_info;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup information
|
|
|
|
*
|
|
|
|
* These are initialized so they are in the .data section
|
|
|
|
*/
|
2005-07-13 18:48:45 +07:00
|
|
|
unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
EXPORT_SYMBOL(mips_machtype);
|
|
|
|
|
|
|
|
struct boot_mem_map boot_mem_map;
|
|
|
|
|
2009-11-22 03:34:41 +07:00
|
|
|
static char __initdata command_line[COMMAND_LINE_SIZE];
|
|
|
|
char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
|
|
|
|
|
|
|
|
#ifdef CONFIG_CMDLINE_BOOL
|
|
|
|
static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
|
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* mips_io_port_base is the begin of the address space to which x86 style
|
|
|
|
* I/O ports are mapped.
|
|
|
|
*/
|
2010-10-15 02:36:49 +07:00
|
|
|
const unsigned long mips_io_port_base = -1;
|
2005-04-17 05:20:36 +07:00
|
|
|
EXPORT_SYMBOL(mips_io_port_base);
|
|
|
|
|
|
|
|
static struct resource code_resource = { .name = "Kernel code", };
|
|
|
|
static struct resource data_resource = { .name = "Kernel data", };
|
2017-10-13 02:50:34 +07:00
|
|
|
static struct resource bss_resource = { .name = "Kernel bss", };
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-04-13 18:15:47 +07:00
|
|
|
static void *detect_magic __initdata = detect_memory_region;
|
|
|
|
|
2018-07-28 08:23:20 +07:00
|
|
|
#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
|
|
|
|
unsigned long ARCH_PFN_OFFSET;
|
|
|
|
EXPORT_SYMBOL(ARCH_PFN_OFFSET);
|
|
|
|
#endif
|
|
|
|
|
2014-11-22 06:22:09 +07:00
|
|
|
void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
int x = boot_mem_map.nr_map;
|
2012-11-15 18:53:59 +07:00
|
|
|
int i;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2016-08-09 19:21:48 +07:00
|
|
|
/*
|
|
|
|
* If the region reaches the top of the physical address space, adjust
|
|
|
|
* the size slightly so that (start + size) doesn't overflow
|
|
|
|
*/
|
2018-06-15 05:28:02 +07:00
|
|
|
if (start + size - 1 == PHYS_ADDR_MAX)
|
2016-08-09 19:21:48 +07:00
|
|
|
--size;
|
|
|
|
|
2006-08-11 22:51:48 +07:00
|
|
|
/* Sanity check */
|
|
|
|
if (start + size < start) {
|
2014-10-04 23:50:42 +07:00
|
|
|
pr_warn("Trying to add an invalid memory region, skipped\n");
|
2006-08-11 22:51:48 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
2012-11-15 18:53:59 +07:00
|
|
|
* Try to merge with existing entry, if any.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2012-11-15 18:53:59 +07:00
|
|
|
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
|
|
|
struct boot_mem_map_entry *entry = boot_mem_map.map + i;
|
|
|
|
unsigned long top;
|
|
|
|
|
|
|
|
if (entry->type != type)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (start + size < entry->addr)
|
|
|
|
continue; /* no overlap */
|
|
|
|
|
|
|
|
if (entry->addr + entry->size < start)
|
|
|
|
continue; /* no overlap */
|
|
|
|
|
|
|
|
top = max(entry->addr + entry->size, start + size);
|
|
|
|
entry->addr = min(entry->addr, start);
|
|
|
|
entry->size = top - entry->addr;
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-11-15 18:53:59 +07:00
|
|
|
if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
|
2008-07-28 19:12:52 +07:00
|
|
|
pr_err("Ooops! Too many entries in the memory map!\n");
|
2005-04-17 05:20:36 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
boot_mem_map.map[x].addr = start;
|
|
|
|
boot_mem_map.map[x].size = size;
|
|
|
|
boot_mem_map.map[x].type = type;
|
|
|
|
boot_mem_map.nr_map++;
|
|
|
|
}
|
|
|
|
|
2014-11-22 06:22:09 +07:00
|
|
|
void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
|
2013-04-13 18:15:47 +07:00
|
|
|
{
|
|
|
|
void *dm = &detect_magic;
|
2014-11-22 06:22:09 +07:00
|
|
|
phys_addr_t size;
|
2013-04-13 18:15:47 +07:00
|
|
|
|
|
|
|
for (size = sz_min; size < sz_max; size <<= 1) {
|
|
|
|
if (!memcmp(dm, dm + size, sizeof(detect_magic)))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
|
|
|
|
((unsigned long long) size) / SZ_1M,
|
|
|
|
(unsigned long long) start,
|
|
|
|
((unsigned long long) sz_min) / SZ_1M,
|
|
|
|
((unsigned long long) sz_max) / SZ_1M);
|
|
|
|
|
|
|
|
add_memory_region(start, size, BOOT_MEM_RAM);
|
|
|
|
}
|
|
|
|
|
2018-01-03 01:52:21 +07:00
|
|
|
static bool __init __maybe_unused memory_region_available(phys_addr_t start,
|
|
|
|
phys_addr_t size)
|
2016-11-23 20:43:45 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
bool in_ram = false, free = true;
|
|
|
|
|
|
|
|
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
|
|
|
phys_addr_t start_, end_;
|
|
|
|
|
|
|
|
start_ = boot_mem_map.map[i].addr;
|
|
|
|
end_ = boot_mem_map.map[i].addr + boot_mem_map.map[i].size;
|
|
|
|
|
|
|
|
switch (boot_mem_map.map[i].type) {
|
|
|
|
case BOOT_MEM_RAM:
|
|
|
|
if (start >= start_ && start + size <= end_)
|
|
|
|
in_ram = true;
|
|
|
|
break;
|
|
|
|
case BOOT_MEM_RESERVED:
|
|
|
|
if ((start >= start_ && start < end_) ||
|
|
|
|
(start < start_ && start + size >= start_))
|
|
|
|
free = false;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return in_ram && free;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static void __init print_memory_map(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
const int field = 2 * sizeof(unsigned long);
|
|
|
|
|
|
|
|
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
2008-07-28 19:12:52 +07:00
|
|
|
printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
|
2005-04-17 05:20:36 +07:00
|
|
|
field, (unsigned long long) boot_mem_map.map[i].size,
|
|
|
|
field, (unsigned long long) boot_mem_map.map[i].addr);
|
|
|
|
|
|
|
|
switch (boot_mem_map.map[i].type) {
|
|
|
|
case BOOT_MEM_RAM:
|
2008-07-28 19:12:52 +07:00
|
|
|
printk(KERN_CONT "(usable)\n");
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
2011-11-22 21:38:03 +07:00
|
|
|
case BOOT_MEM_INIT_RAM:
|
|
|
|
printk(KERN_CONT "(usable after init)\n");
|
|
|
|
break;
|
2005-04-17 05:20:36 +07:00
|
|
|
case BOOT_MEM_ROM_DATA:
|
2008-07-28 19:12:52 +07:00
|
|
|
printk(KERN_CONT "(ROM data)\n");
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
case BOOT_MEM_RESERVED:
|
2008-07-28 19:12:52 +07:00
|
|
|
printk(KERN_CONT "(reserved)\n");
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
default:
|
2008-07-28 19:12:52 +07:00
|
|
|
printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-08-11 22:51:49 +07:00
|
|
|
/*
|
|
|
|
* Manage initrd
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
|
|
|
2006-08-11 22:51:53 +07:00
|
|
|
static int __init rd_start_early(char *p)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-08-11 22:51:53 +07:00
|
|
|
unsigned long start = memparse(p, &p);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-09-04 05:56:16 +07:00
|
|
|
#ifdef CONFIG_64BIT
|
2006-10-19 18:20:04 +07:00
|
|
|
/* Guess if the sign extension was forgotten by bootloader */
|
|
|
|
if (start < XKPHYS)
|
|
|
|
start = (int)start;
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|
2006-08-11 22:51:53 +07:00
|
|
|
initrd_start = start;
|
|
|
|
initrd_end += start;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("rd_start", rd_start_early);
|
|
|
|
|
|
|
|
static int __init rd_size_early(char *p)
|
|
|
|
{
|
|
|
|
initrd_end += memparse(p, &p);
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2006-08-11 22:51:53 +07:00
|
|
|
early_param("rd_size", rd_size_early);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-10-19 18:20:04 +07:00
|
|
|
/* it returns the next free pfn after initrd */
|
2006-08-11 22:51:49 +07:00
|
|
|
static unsigned long __init init_initrd(void)
|
|
|
|
{
|
2006-10-19 18:20:04 +07:00
|
|
|
unsigned long end;
|
2006-08-11 22:51:49 +07:00
|
|
|
|
|
|
|
/*
|
2006-08-11 22:51:53 +07:00
|
|
|
* Board specific code or command line parser should have
|
|
|
|
* already set up initrd_start and initrd_end. In these cases
|
|
|
|
* perfom sanity checks and use them if all looks good.
|
2006-08-11 22:51:49 +07:00
|
|
|
*/
|
2009-12-17 08:57:07 +07:00
|
|
|
if (!initrd_start || initrd_end <= initrd_start)
|
2006-10-19 18:20:04 +07:00
|
|
|
goto disable;
|
|
|
|
|
|
|
|
if (initrd_start & ~PAGE_MASK) {
|
2008-07-28 19:12:52 +07:00
|
|
|
pr_err("initrd start must be page aligned\n");
|
2006-10-19 18:20:04 +07:00
|
|
|
goto disable;
|
2006-08-11 22:51:49 +07:00
|
|
|
}
|
2006-10-19 18:20:04 +07:00
|
|
|
if (initrd_start < PAGE_OFFSET) {
|
2008-07-28 19:12:52 +07:00
|
|
|
pr_err("initrd start < PAGE_OFFSET\n");
|
2006-10-19 18:20:04 +07:00
|
|
|
goto disable;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sanitize initrd addresses. For example firmware
|
|
|
|
* can't guess if they need to pass them through
|
|
|
|
* 64-bits values if the kernel has been built in pure
|
|
|
|
* 32-bit. We need also to switch from KSEG0 to XKPHYS
|
|
|
|
* addresses now, so the code can now safely use __pa().
|
|
|
|
*/
|
|
|
|
end = __pa(initrd_end);
|
|
|
|
initrd_end = (unsigned long)__va(end);
|
|
|
|
initrd_start = (unsigned long)__va(__pa(initrd_start));
|
|
|
|
|
|
|
|
ROOT_DEV = Root_RAM0;
|
|
|
|
return PFN_UP(end);
|
|
|
|
disable:
|
|
|
|
initrd_start = 0;
|
|
|
|
initrd_end = 0;
|
|
|
|
return 0;
|
2006-08-11 22:51:49 +07:00
|
|
|
}
|
|
|
|
|
2016-05-11 05:50:03 +07:00
|
|
|
/* In some conditions (e.g. big endian bootloader with a little endian
|
|
|
|
kernel), the initrd might appear byte swapped. Try to detect this and
|
|
|
|
byte swap it if needed. */
|
|
|
|
static void __init maybe_bswap_initrd(void)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_CPU_CAVIUM_OCTEON)
|
|
|
|
u64 buf;
|
|
|
|
|
|
|
|
/* Check for CPIO signature */
|
|
|
|
if (!memcmp((void *)initrd_start, "070701", 6))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Check for compressed initrd */
|
|
|
|
if (decompress_method((unsigned char *)initrd_start, 8, NULL))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Try again with a byte swapped header */
|
|
|
|
buf = swab64p((u64 *)initrd_start);
|
|
|
|
if (!memcmp(&buf, "070701", 6) ||
|
|
|
|
decompress_method((unsigned char *)(&buf), 8, NULL)) {
|
|
|
|
unsigned long i;
|
|
|
|
|
|
|
|
pr_info("Byteswapped initrd detected\n");
|
|
|
|
for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
|
|
|
|
swab64s((u64 *)i);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2006-08-11 22:51:49 +07:00
|
|
|
static void __init finalize_initrd(void)
|
|
|
|
{
|
|
|
|
unsigned long size = initrd_end - initrd_start;
|
|
|
|
|
|
|
|
if (size == 0) {
|
|
|
|
printk(KERN_INFO "Initrd not found or empty");
|
|
|
|
goto disable;
|
|
|
|
}
|
2006-10-19 18:20:01 +07:00
|
|
|
if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
|
2008-07-28 19:12:52 +07:00
|
|
|
printk(KERN_ERR "Initrd extends beyond end of memory");
|
2006-08-11 22:51:49 +07:00
|
|
|
goto disable;
|
|
|
|
}
|
|
|
|
|
2016-05-11 05:50:03 +07:00
|
|
|
maybe_bswap_initrd();
|
|
|
|
|
2018-09-10 16:23:18 +07:00
|
|
|
memblock_reserve(__pa(initrd_start), size);
|
2006-08-11 22:51:49 +07:00
|
|
|
initrd_below_start_ok = 1;
|
|
|
|
|
2008-07-28 19:12:52 +07:00
|
|
|
pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
|
|
|
|
initrd_start, size);
|
2006-08-11 22:51:49 +07:00
|
|
|
return;
|
|
|
|
disable:
|
2008-07-28 19:12:52 +07:00
|
|
|
printk(KERN_CONT " - disabling initrd\n");
|
2006-08-11 22:51:49 +07:00
|
|
|
initrd_start = 0;
|
|
|
|
initrd_end = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* !CONFIG_BLK_DEV_INITRD */
|
|
|
|
|
2006-10-13 17:22:52 +07:00
|
|
|
static unsigned long __init init_initrd(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-08-11 22:51:49 +07:00
|
|
|
#define finalize_initrd() do {} while (0)
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2006-08-11 22:51:48 +07:00
|
|
|
/*
|
|
|
|
* Initialize the bootmem allocator. It also setup initrd related data
|
|
|
|
* if needed.
|
|
|
|
*/
|
2014-06-26 10:41:28 +07:00
|
|
|
#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
|
2006-08-11 22:51:49 +07:00
|
|
|
|
2006-08-11 22:51:48 +07:00
|
|
|
static void __init bootmem_init(void)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-08-11 22:51:49 +07:00
|
|
|
init_initrd();
|
|
|
|
finalize_initrd();
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* !CONFIG_SGI_IP27 */
|
|
|
|
|
|
|
|
static void __init bootmem_init(void)
|
|
|
|
{
|
2008-01-07 22:41:13 +07:00
|
|
|
unsigned long reserved_end;
|
2018-06-15 05:28:02 +07:00
|
|
|
phys_addr_t ramstart = PHYS_ADDR_MAX;
|
2005-04-17 05:20:36 +07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
2010-09-08 12:50:43 +07:00
|
|
|
* Sanity check any INITRD first. We don't take it into account
|
|
|
|
* for bootmem setup initially, rely on the end-of-kernel-code
|
|
|
|
* as our memory range starting point. Once bootmem is inited we
|
|
|
|
* will reserve the area used for the initrd.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2010-09-08 12:50:43 +07:00
|
|
|
init_initrd();
|
|
|
|
reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2019-02-27 16:42:56 +07:00
|
|
|
memblock_reserve(PHYS_OFFSET,
|
|
|
|
(reserved_end << PAGE_SHIFT) - PHYS_OFFSET);
|
2018-09-10 16:23:18 +07:00
|
|
|
|
2007-01-10 15:44:04 +07:00
|
|
|
/*
|
|
|
|
* max_low_pfn is not a number of pages. The number of pages
|
|
|
|
* of the system is given by 'max_low_pfn - min_low_pfn'.
|
|
|
|
*/
|
|
|
|
min_low_pfn = ~0UL;
|
|
|
|
max_low_pfn = 0;
|
|
|
|
|
2006-08-11 22:51:48 +07:00
|
|
|
/*
|
2018-02-01 18:37:21 +07:00
|
|
|
* Find the highest page frame number we have available
|
|
|
|
* and the lowest used RAM address
|
2006-08-11 22:51:48 +07:00
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
|
|
|
unsigned long start, end;
|
|
|
|
|
|
|
|
if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
start = PFN_UP(boot_mem_map.map[i].addr);
|
|
|
|
end = PFN_DOWN(boot_mem_map.map[i].addr
|
2006-08-11 22:51:48 +07:00
|
|
|
+ boot_mem_map.map[i].size);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2018-02-01 18:37:21 +07:00
|
|
|
ramstart = min(ramstart, boot_mem_map.map[i].addr);
|
|
|
|
|
2016-11-01 20:59:09 +07:00
|
|
|
#ifndef CONFIG_HIGHMEM
|
|
|
|
/*
|
|
|
|
* Skip highmem here so we get an accurate max_low_pfn if low
|
|
|
|
* memory stops short of high memory.
|
|
|
|
* If the region overlaps HIGHMEM_START, end is clipped so
|
|
|
|
* max_pfn excludes the highmem portion.
|
|
|
|
*/
|
|
|
|
if (start >= PFN_DOWN(HIGHMEM_START))
|
|
|
|
continue;
|
|
|
|
if (end > PFN_DOWN(HIGHMEM_START))
|
|
|
|
end = PFN_DOWN(HIGHMEM_START);
|
|
|
|
#endif
|
|
|
|
|
2007-01-10 15:44:04 +07:00
|
|
|
if (end > max_low_pfn)
|
|
|
|
max_low_pfn = end;
|
|
|
|
if (start < min_low_pfn)
|
|
|
|
min_low_pfn = start;
|
2006-08-11 22:51:48 +07:00
|
|
|
if (end <= reserved_end)
|
2005-04-17 05:20:36 +07:00
|
|
|
continue;
|
2015-07-02 22:16:01 +07:00
|
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
2015-09-03 13:36:35 +07:00
|
|
|
/* Skip zones before initrd and initrd itself */
|
2015-07-02 22:16:01 +07:00
|
|
|
if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
|
|
|
|
continue;
|
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2018-07-28 08:23:20 +07:00
|
|
|
if (min_low_pfn >= max_low_pfn)
|
|
|
|
panic("Incorrect memory mapping !!!");
|
|
|
|
|
|
|
|
#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
|
|
|
|
ARCH_PFN_OFFSET = PFN_UP(ramstart);
|
|
|
|
#else
|
2018-02-01 18:37:21 +07:00
|
|
|
/*
|
|
|
|
* Reserve any memory between the start of RAM and PHYS_OFFSET
|
|
|
|
*/
|
2018-09-10 16:23:18 +07:00
|
|
|
if (ramstart > PHYS_OFFSET) {
|
2018-02-01 18:37:21 +07:00
|
|
|
add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
|
|
|
|
BOOT_MEM_RESERVED);
|
2018-09-10 16:23:18 +07:00
|
|
|
memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
|
|
|
|
}
|
2018-02-01 18:37:21 +07:00
|
|
|
|
2007-01-10 15:44:05 +07:00
|
|
|
if (min_low_pfn > ARCH_PFN_OFFSET) {
|
2008-07-28 19:12:52 +07:00
|
|
|
pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
|
|
|
|
(min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
|
|
|
|
min_low_pfn - ARCH_PFN_OFFSET);
|
2018-01-03 01:53:15 +07:00
|
|
|
} else if (ARCH_PFN_OFFSET - min_low_pfn > 0UL) {
|
2008-07-28 19:12:52 +07:00
|
|
|
pr_info("%lu free pages won't be used\n",
|
|
|
|
ARCH_PFN_OFFSET - min_low_pfn);
|
2007-01-10 15:44:04 +07:00
|
|
|
}
|
2007-01-10 15:44:05 +07:00
|
|
|
min_low_pfn = ARCH_PFN_OFFSET;
|
2018-07-28 08:23:20 +07:00
|
|
|
#endif
|
2007-01-10 15:44:04 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Determine low and high memory ranges
|
|
|
|
*/
|
2008-04-18 16:56:07 +07:00
|
|
|
max_pfn = max_low_pfn;
|
2007-01-10 15:44:04 +07:00
|
|
|
if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
|
2006-08-11 22:51:48 +07:00
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
|
|
highstart_pfn = PFN_DOWN(HIGHMEM_START);
|
2007-01-10 15:44:04 +07:00
|
|
|
highend_pfn = max_low_pfn;
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|
2007-01-10 15:44:04 +07:00
|
|
|
max_low_pfn = PFN_DOWN(HIGHMEM_START);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2007-11-03 09:05:43 +07:00
|
|
|
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
|
|
|
unsigned long start, end;
|
|
|
|
|
|
|
|
start = PFN_UP(boot_mem_map.map[i].addr);
|
|
|
|
end = PFN_DOWN(boot_mem_map.map[i].addr
|
|
|
|
+ boot_mem_map.map[i].size);
|
|
|
|
|
2008-01-07 22:41:13 +07:00
|
|
|
if (start <= min_low_pfn)
|
|
|
|
start = min_low_pfn;
|
2007-11-03 09:05:43 +07:00
|
|
|
if (start >= end)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
#ifndef CONFIG_HIGHMEM
|
|
|
|
if (end > max_low_pfn)
|
|
|
|
end = max_low_pfn;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ... finally, is the area going away?
|
|
|
|
*/
|
|
|
|
if (end <= start)
|
|
|
|
continue;
|
|
|
|
#endif
|
|
|
|
|
2011-12-09 01:22:09 +07:00
|
|
|
memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
|
2007-11-03 09:05:43 +07:00
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Register fully available low RAM pages with the bootmem allocator.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
2006-08-11 22:51:48 +07:00
|
|
|
unsigned long start, end, size;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2011-11-22 21:38:03 +07:00
|
|
|
start = PFN_UP(boot_mem_map.map[i].addr);
|
|
|
|
end = PFN_DOWN(boot_mem_map.map[i].addr
|
|
|
|
+ boot_mem_map.map[i].size);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Reserve usable memory.
|
|
|
|
*/
|
2011-11-22 21:38:03 +07:00
|
|
|
switch (boot_mem_map.map[i].type) {
|
|
|
|
case BOOT_MEM_RAM:
|
|
|
|
break;
|
|
|
|
case BOOT_MEM_INIT_RAM:
|
|
|
|
memory_present(0, start, end);
|
2005-04-17 05:20:36 +07:00
|
|
|
continue;
|
2011-11-22 21:38:03 +07:00
|
|
|
default:
|
|
|
|
/* Not usable memory */
|
2016-11-23 20:43:44 +07:00
|
|
|
if (start > min_low_pfn && end < max_low_pfn)
|
2018-09-10 16:23:18 +07:00
|
|
|
memblock_reserve(boot_mem_map.map[i].addr,
|
|
|
|
boot_mem_map.map[i].size);
|
|
|
|
|
2011-11-22 21:38:03 +07:00
|
|
|
continue;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
2006-08-11 22:51:48 +07:00
|
|
|
* We are rounding up the start address of usable memory
|
|
|
|
* and at the end of the usable range downwards.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2006-08-11 22:51:48 +07:00
|
|
|
if (start >= max_low_pfn)
|
2005-04-17 05:20:36 +07:00
|
|
|
continue;
|
2006-08-11 22:51:48 +07:00
|
|
|
if (start < reserved_end)
|
|
|
|
start = reserved_end;
|
|
|
|
if (end > max_low_pfn)
|
|
|
|
end = max_low_pfn;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
2006-08-11 22:51:48 +07:00
|
|
|
* ... finally, is the area going away?
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2006-08-11 22:51:48 +07:00
|
|
|
if (end <= start)
|
2005-04-17 05:20:36 +07:00
|
|
|
continue;
|
2006-08-11 22:51:48 +07:00
|
|
|
size = end - start;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* Register lowmem ranges */
|
2006-08-11 22:51:48 +07:00
|
|
|
memory_present(0, start, end);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
MIPS: bootmem: When relocatable, free memory below kernel
The kernel reserves all memory before the _end symbol as bootmem,
however, once the kernel can be relocated elsewhere in memory this may
result in a large amount of wasted memory. The assumption is that the
memory between the link and relocated address of the kernel may be
released back to the available memory pool.
Memory statistics for a Malta with the kernel relocating by
16Mb, without the patch:
Memory: 105952K/131072K available (4604K kernel code, 242K rwdata,
892K rodata, 1280K init, 183K bss, 25120K reserved, 0K cma-reserved)
And with the patch:
Memory: 122336K/131072K available (4604K kernel code, 242K rwdata,
892K rodata, 1280K init, 183K bss, 8736K reserved, 0K cma-reserved)
The 16Mb offset is removed from the reserved region and added back to
the available region.
Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
Cc: Aaro Koskinen <aaro.koskinen@nokia.com>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: Alexander Sverdlin <alexander.sverdlin@gmail.com>
Cc: Jaedon Shin <jaedon.shin@gmail.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Jonas Gorski <jogo@openwrt.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: kernel-hardening@lists.openwall.com
Cc: linux-kernel@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/12986/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-03-31 16:05:38 +07:00
|
|
|
#ifdef CONFIG_RELOCATABLE
|
|
|
|
/*
|
|
|
|
* The kernel reserves all memory below its _end symbol as bootmem,
|
|
|
|
* but the kernel may now be at a much higher address. The memory
|
|
|
|
* between the original and new locations may be returned to the system.
|
|
|
|
*/
|
|
|
|
if (__pa_symbol(_text) > __pa_symbol(VMLINUX_LOAD_ADDRESS)) {
|
|
|
|
unsigned long offset;
|
2016-03-31 16:05:42 +07:00
|
|
|
extern void show_kernel_relocation(const char *level);
|
MIPS: bootmem: When relocatable, free memory below kernel
The kernel reserves all memory before the _end symbol as bootmem,
however, once the kernel can be relocated elsewhere in memory this may
result in a large amount of wasted memory. The assumption is that the
memory between the link and relocated address of the kernel may be
released back to the available memory pool.
Memory statistics for a Malta with the kernel relocating by
16Mb, without the patch:
Memory: 105952K/131072K available (4604K kernel code, 242K rwdata,
892K rodata, 1280K init, 183K bss, 25120K reserved, 0K cma-reserved)
And with the patch:
Memory: 122336K/131072K available (4604K kernel code, 242K rwdata,
892K rodata, 1280K init, 183K bss, 8736K reserved, 0K cma-reserved)
The 16Mb offset is removed from the reserved region and added back to
the available region.
Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
Cc: Aaro Koskinen <aaro.koskinen@nokia.com>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: Alexander Sverdlin <alexander.sverdlin@gmail.com>
Cc: Jaedon Shin <jaedon.shin@gmail.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Jonas Gorski <jogo@openwrt.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: kernel-hardening@lists.openwall.com
Cc: linux-kernel@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/12986/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-03-31 16:05:38 +07:00
|
|
|
|
|
|
|
offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
|
2018-10-31 05:09:21 +07:00
|
|
|
memblock_free(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset);
|
2016-03-31 16:05:42 +07:00
|
|
|
|
|
|
|
#if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO)
|
|
|
|
/*
|
|
|
|
* This information is necessary when debugging the kernel
|
|
|
|
* But is a security vulnerability otherwise!
|
|
|
|
*/
|
|
|
|
show_kernel_relocation(KERN_INFO);
|
|
|
|
#endif
|
MIPS: bootmem: When relocatable, free memory below kernel
The kernel reserves all memory before the _end symbol as bootmem,
however, once the kernel can be relocated elsewhere in memory this may
result in a large amount of wasted memory. The assumption is that the
memory between the link and relocated address of the kernel may be
released back to the available memory pool.
Memory statistics for a Malta with the kernel relocating by
16Mb, without the patch:
Memory: 105952K/131072K available (4604K kernel code, 242K rwdata,
892K rodata, 1280K init, 183K bss, 25120K reserved, 0K cma-reserved)
And with the patch:
Memory: 122336K/131072K available (4604K kernel code, 242K rwdata,
892K rodata, 1280K init, 183K bss, 8736K reserved, 0K cma-reserved)
The 16Mb offset is removed from the reserved region and added back to
the available region.
Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
Cc: Aaro Koskinen <aaro.koskinen@nokia.com>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: Alexander Sverdlin <alexander.sverdlin@gmail.com>
Cc: Jaedon Shin <jaedon.shin@gmail.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Jonas Gorski <jogo@openwrt.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: kernel-hardening@lists.openwall.com
Cc: linux-kernel@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/12986/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-03-31 16:05:38 +07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-08-11 22:51:49 +07:00
|
|
|
/*
|
|
|
|
* Reserve initrd memory if needed.
|
|
|
|
*/
|
|
|
|
finalize_initrd();
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2006-08-11 22:51:49 +07:00
|
|
|
#endif /* CONFIG_SGI_IP27 */
|
|
|
|
|
2009-09-17 07:25:07 +07:00
|
|
|
static int usermem __initdata;
|
2006-08-11 22:51:53 +07:00
|
|
|
|
|
|
|
static int __init early_parse_mem(char *p)
|
|
|
|
{
|
2014-12-13 01:51:15 +07:00
|
|
|
phys_addr_t start, size;
|
2006-08-11 22:51:53 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If a user specifies memory size, we
|
|
|
|
* blow away any automatically generated
|
|
|
|
* size.
|
|
|
|
*/
|
|
|
|
if (usermem == 0) {
|
|
|
|
boot_mem_map.nr_map = 0;
|
|
|
|
usermem = 1;
|
2013-01-22 18:59:30 +07:00
|
|
|
}
|
2006-08-11 22:51:53 +07:00
|
|
|
start = 0;
|
|
|
|
size = memparse(p, &p);
|
|
|
|
if (*p == '@')
|
|
|
|
start = memparse(p + 1, &p);
|
|
|
|
|
|
|
|
add_memory_region(start, size, BOOT_MEM_RAM);
|
2016-11-23 20:43:49 +07:00
|
|
|
|
2006-08-11 22:51:53 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("mem", early_parse_mem);
|
2006-06-18 07:32:22 +07:00
|
|
|
|
2017-06-19 22:50:08 +07:00
|
|
|
static int __init early_parse_memmap(char *p)
|
|
|
|
{
|
|
|
|
char *oldp;
|
|
|
|
u64 start_at, mem_size;
|
|
|
|
|
|
|
|
if (!p)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!strncmp(p, "exactmap", 8)) {
|
|
|
|
pr_err("\"memmap=exactmap\" invalid on MIPS\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
oldp = p;
|
|
|
|
mem_size = memparse(p, &p);
|
|
|
|
if (p == oldp)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (*p == '@') {
|
|
|
|
start_at = memparse(p+1, &p);
|
|
|
|
add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
|
|
|
|
} else if (*p == '#') {
|
|
|
|
pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
|
|
|
|
return -EINVAL;
|
|
|
|
} else if (*p == '$') {
|
|
|
|
start_at = memparse(p+1, &p);
|
|
|
|
add_memory_region(start_at, mem_size, BOOT_MEM_RESERVED);
|
|
|
|
} else {
|
|
|
|
pr_err("\"memmap\" invalid format!\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*p == '\0') {
|
|
|
|
usermem = 1;
|
|
|
|
return 0;
|
|
|
|
} else
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
early_param("memmap", early_parse_memmap);
|
|
|
|
|
2013-02-13 02:41:48 +07:00
|
|
|
#ifdef CONFIG_PROC_VMCORE
|
|
|
|
unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
|
|
|
|
static int __init early_parse_elfcorehdr(char *p)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
setup_elfcorehdr = memparse(p, &p);
|
|
|
|
|
|
|
|
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
|
|
|
unsigned long start = boot_mem_map.map[i].addr;
|
|
|
|
unsigned long end = (boot_mem_map.map[i].addr +
|
|
|
|
boot_mem_map.map[i].size);
|
|
|
|
if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
|
|
|
|
/*
|
|
|
|
* Reserve from the elf core header to the end of
|
|
|
|
* the memory segment, that should all be kdump
|
|
|
|
* reserved memory.
|
|
|
|
*/
|
|
|
|
setup_elfcorehdr_size = end - setup_elfcorehdr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If we don't find it in the memory map, then we shouldn't
|
|
|
|
* have to worry about it, as the new kernel won't use it.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("elfcorehdr", early_parse_elfcorehdr);
|
|
|
|
#endif
|
|
|
|
|
2014-11-22 06:22:09 +07:00
|
|
|
static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type)
|
2006-06-18 07:32:22 +07:00
|
|
|
{
|
2014-11-22 06:22:09 +07:00
|
|
|
phys_addr_t size;
|
2013-02-13 02:41:47 +07:00
|
|
|
int i;
|
2011-11-22 21:38:03 +07:00
|
|
|
|
2013-02-13 02:41:47 +07:00
|
|
|
size = end - mem;
|
|
|
|
if (!size)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Make sure it is in the boot_mem_map */
|
|
|
|
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
|
|
|
if (mem >= boot_mem_map.map[i].addr &&
|
|
|
|
mem < (boot_mem_map.map[i].addr +
|
|
|
|
boot_mem_map.map[i].size))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
add_memory_region(mem, size, type);
|
|
|
|
}
|
2011-11-22 21:38:03 +07:00
|
|
|
|
2013-09-05 00:56:24 +07:00
|
|
|
#ifdef CONFIG_KEXEC
|
|
|
|
static inline unsigned long long get_total_mem(void)
|
|
|
|
{
|
|
|
|
unsigned long long total;
|
|
|
|
|
|
|
|
total = max_pfn - min_low_pfn;
|
|
|
|
return total << PAGE_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init mips_parse_crashkernel(void)
|
|
|
|
{
|
|
|
|
unsigned long long total_mem;
|
|
|
|
unsigned long long crash_size, crash_base;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
total_mem = get_total_mem();
|
|
|
|
ret = parse_crashkernel(boot_command_line, total_mem,
|
|
|
|
&crash_size, &crash_base);
|
|
|
|
if (ret != 0 || crash_size <= 0)
|
|
|
|
return;
|
|
|
|
|
2016-11-23 20:43:50 +07:00
|
|
|
if (!memory_region_available(crash_base, crash_size)) {
|
|
|
|
pr_warn("Invalid memory region reserved for crash kernel\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-09-05 00:56:24 +07:00
|
|
|
crashk_res.start = crash_base;
|
|
|
|
crashk_res.end = crash_base + crash_size - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init request_crashkernel(struct resource *res)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2016-11-23 20:43:43 +07:00
|
|
|
if (crashk_res.start == crashk_res.end)
|
|
|
|
return;
|
|
|
|
|
2013-09-05 00:56:24 +07:00
|
|
|
ret = request_resource(res, &crashk_res);
|
|
|
|
if (!ret)
|
|
|
|
pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
|
|
|
|
(unsigned long)((crashk_res.end -
|
|
|
|
crashk_res.start + 1) >> 20),
|
|
|
|
(unsigned long)(crashk_res.start >> 20));
|
|
|
|
}
|
|
|
|
#else /* !defined(CONFIG_KEXEC) */
|
|
|
|
static void __init mips_parse_crashkernel(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init request_crashkernel(struct resource *res)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* !defined(CONFIG_KEXEC) */
|
|
|
|
|
2015-10-12 18:13:02 +07:00
|
|
|
#define USE_PROM_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
|
|
|
|
#define USE_DTB_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
|
2015-12-21 10:47:35 +07:00
|
|
|
#define EXTEND_WITH_PROM IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
|
2016-04-28 16:03:09 +07:00
|
|
|
#define BUILTIN_EXTEND_WITH_PROM \
|
|
|
|
IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)
|
2015-10-12 18:13:02 +07:00
|
|
|
|
2018-09-01 05:28:57 +07:00
|
|
|
/*
|
|
|
|
* arch_mem_init - initialize memory management subsystem
|
|
|
|
*
|
|
|
|
* o plat_mem_setup() detects the memory configuration and will record detected
|
|
|
|
* memory areas using add_memory_region.
|
|
|
|
*
|
|
|
|
* At this stage the memory configuration of the system is known to the
|
|
|
|
* kernel but generic memory management system is still entirely uninitialized.
|
|
|
|
*
|
|
|
|
* o bootmem_init()
|
|
|
|
* o sparse_init()
|
|
|
|
* o paging_init()
|
|
|
|
* o dma_contiguous_reserve()
|
|
|
|
*
|
|
|
|
* At this stage the bootmem allocator is ready to use.
|
|
|
|
*
|
|
|
|
* NOTE: historically plat_mem_setup did the entire platform initialization.
|
|
|
|
* This was rather impractical because it meant plat_mem_setup had to
|
|
|
|
* get away without any kind of memory allocator. To keep old code from
|
|
|
|
* breaking plat_setup was just renamed to plat_mem_setup and a second platform
|
|
|
|
* initialization hook for anything else was introduced.
|
|
|
|
*/
|
2013-02-13 02:41:47 +07:00
|
|
|
static void __init arch_mem_init(char **cmdline_p)
|
|
|
|
{
|
2014-07-16 22:51:32 +07:00
|
|
|
struct memblock_region *reg;
|
2006-08-11 22:51:53 +07:00
|
|
|
extern void plat_mem_setup(void);
|
|
|
|
|
2018-09-28 05:59:18 +07:00
|
|
|
/*
|
|
|
|
* Initialize boot_command_line to an innocuous but non-empty string in
|
|
|
|
* order to prevent early_init_dt_scan_chosen() from copying
|
|
|
|
* CONFIG_CMDLINE into it without our knowledge. We handle
|
|
|
|
* CONFIG_CMDLINE ourselves below & don't want to duplicate its
|
|
|
|
* content because repeating arguments can be problematic.
|
|
|
|
*/
|
|
|
|
strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE);
|
|
|
|
|
|
|
|
/* call board setup routine */
|
|
|
|
plat_mem_setup();
|
2018-11-10 10:50:14 +07:00
|
|
|
memblock_set_bottom_up(true);
|
2018-09-28 05:59:18 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure all kernel memory is in the maps. The "UP" and
|
|
|
|
* "DOWN" are opposite for initdata since if it crosses over
|
|
|
|
* into another memory section you don't want that to be
|
|
|
|
* freed when the initdata is freed.
|
|
|
|
*/
|
|
|
|
arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
|
|
|
|
PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
|
|
|
|
BOOT_MEM_RAM);
|
|
|
|
arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
|
|
|
|
PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
|
|
|
|
BOOT_MEM_INIT_RAM);
|
|
|
|
|
|
|
|
pr_info("Determined physical RAM map:\n");
|
|
|
|
print_memory_map();
|
|
|
|
|
2015-10-12 18:13:02 +07:00
|
|
|
#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
|
2009-11-22 03:34:41 +07:00
|
|
|
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
|
|
|
|
#else
|
2015-10-12 18:13:02 +07:00
|
|
|
if ((USE_PROM_CMDLINE && arcs_cmdline[0]) ||
|
|
|
|
(USE_DTB_CMDLINE && !boot_command_line[0]))
|
|
|
|
strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
|
|
|
|
|
|
|
|
if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
|
2016-04-28 16:03:08 +07:00
|
|
|
if (boot_command_line[0])
|
|
|
|
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
|
2015-10-12 18:13:02 +07:00
|
|
|
strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(CONFIG_CMDLINE_BOOL)
|
2009-11-22 03:34:41 +07:00
|
|
|
if (builtin_cmdline[0]) {
|
2016-04-28 16:03:08 +07:00
|
|
|
if (boot_command_line[0])
|
|
|
|
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
|
2015-10-12 18:13:02 +07:00
|
|
|
strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
|
2009-11-22 03:34:41 +07:00
|
|
|
}
|
2016-04-28 16:03:09 +07:00
|
|
|
|
|
|
|
if (BUILTIN_EXTEND_WITH_PROM && arcs_cmdline[0]) {
|
|
|
|
if (boot_command_line[0])
|
|
|
|
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
|
|
|
|
strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
|
|
|
|
}
|
2009-11-22 03:34:41 +07:00
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
|
2006-06-18 07:32:22 +07:00
|
|
|
|
|
|
|
*cmdline_p = command_line;
|
|
|
|
|
2006-08-11 22:51:53 +07:00
|
|
|
parse_early_param();
|
|
|
|
|
|
|
|
if (usermem) {
|
2008-07-28 19:12:52 +07:00
|
|
|
pr_info("User-defined physical RAM map:\n");
|
2006-08-11 22:51:53 +07:00
|
|
|
print_memory_map();
|
|
|
|
}
|
|
|
|
|
2016-11-23 20:43:46 +07:00
|
|
|
early_init_fdt_reserve_self();
|
|
|
|
early_init_fdt_scan_reserved_mem();
|
|
|
|
|
2006-06-18 07:32:22 +07:00
|
|
|
bootmem_init();
|
2018-09-10 16:23:18 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Prevent memblock from allocating high memory.
|
|
|
|
* This cannot be done before max_low_pfn is detected, so up
|
|
|
|
* to this point is possible to only reserve physical memory
|
2018-10-31 05:08:04 +07:00
|
|
|
* with memblock_reserve; memblock_alloc* can be used
|
2018-09-10 16:23:18 +07:00
|
|
|
* only after this point
|
|
|
|
*/
|
|
|
|
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
|
|
|
|
|
2013-02-13 02:41:48 +07:00
|
|
|
#ifdef CONFIG_PROC_VMCORE
|
|
|
|
if (setup_elfcorehdr && setup_elfcorehdr_size) {
|
|
|
|
printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
|
|
|
|
setup_elfcorehdr, setup_elfcorehdr_size);
|
2018-09-10 16:23:18 +07:00
|
|
|
memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size);
|
2013-02-13 02:41:48 +07:00
|
|
|
}
|
|
|
|
#endif
|
2013-09-05 00:56:24 +07:00
|
|
|
|
|
|
|
mips_parse_crashkernel();
|
2012-10-11 23:14:58 +07:00
|
|
|
#ifdef CONFIG_KEXEC
|
|
|
|
if (crashk_res.start != crashk_res.end)
|
2018-09-10 16:23:18 +07:00
|
|
|
memblock_reserve(crashk_res.start,
|
|
|
|
crashk_res.end - crashk_res.start + 1);
|
2012-10-11 23:14:58 +07:00
|
|
|
#endif
|
2010-10-13 13:52:46 +07:00
|
|
|
device_tree_init();
|
2006-06-18 07:32:22 +07:00
|
|
|
sparse_init();
|
2010-10-02 03:27:33 +07:00
|
|
|
plat_swiotlb_setup();
|
2014-07-16 22:51:32 +07:00
|
|
|
|
|
|
|
dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
|
|
|
|
/* Tell bootmem about cma reserved memblock section */
|
|
|
|
for_each_memblock(reserved, reg)
|
2014-10-28 18:28:34 +07:00
|
|
|
if (reg->size != 0)
|
2018-09-10 16:23:18 +07:00
|
|
|
memblock_reserve(reg->base, reg->size);
|
2016-03-17 19:37:10 +07:00
|
|
|
|
|
|
|
reserve_bootmem_region(__pa_symbol(&__nosave_begin),
|
|
|
|
__pa_symbol(&__nosave_end)); /* Reserve for hibernation */
|
2006-06-18 07:32:22 +07:00
|
|
|
}
|
|
|
|
|
2006-08-11 22:51:51 +07:00
|
|
|
static void __init resource_init(void)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2006-06-20 18:47:53 +07:00
|
|
|
if (UNCAC_BASE != IO_BASE)
|
|
|
|
return;
|
|
|
|
|
2006-10-19 18:20:03 +07:00
|
|
|
code_resource.start = __pa_symbol(&_text);
|
|
|
|
code_resource.end = __pa_symbol(&_etext) - 1;
|
|
|
|
data_resource.start = __pa_symbol(&_etext);
|
|
|
|
data_resource.end = __pa_symbol(&_edata) - 1;
|
2017-10-13 02:50:34 +07:00
|
|
|
bss_resource.start = __pa_symbol(&__bss_start);
|
|
|
|
bss_resource.end = __pa_symbol(&__bss_stop) - 1;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
|
|
|
struct resource *res;
|
|
|
|
unsigned long start, end;
|
|
|
|
|
|
|
|
start = boot_mem_map.map[i].addr;
|
|
|
|
end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
|
2006-08-11 22:51:52 +07:00
|
|
|
if (start >= HIGHMEM_START)
|
2005-04-17 05:20:36 +07:00
|
|
|
continue;
|
2006-08-11 22:51:52 +07:00
|
|
|
if (end >= HIGHMEM_START)
|
|
|
|
end = HIGHMEM_START - 1;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
memblock: stop using implicit alignment to SMP_CACHE_BYTES
When a memblock allocation APIs are called with align = 0, the alignment
is implicitly set to SMP_CACHE_BYTES.
Implicit alignment is done deep in the memblock allocator and it can
come as a surprise. Not that such an alignment would be wrong even
when used incorrectly but it is better to be explicit for the sake of
clarity and the prinicple of the least surprise.
Replace all such uses of memblock APIs with the 'align' parameter
explicitly set to SMP_CACHE_BYTES and stop implicit alignment assignment
in the memblock internal allocation functions.
For the case when memblock APIs are used via helper functions, e.g. like
iommu_arena_new_node() in Alpha, the helper functions were detected with
Coccinelle's help and then manually examined and updated where
appropriate.
The direct memblock APIs users were updated using the semantic patch below:
@@
expression size, min_addr, max_addr, nid;
@@
(
|
- memblock_alloc_try_nid_raw(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid_raw(size, SMP_CACHE_BYTES, min_addr, max_addr,
nid)
|
- memblock_alloc_try_nid_nopanic(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, min_addr, max_addr,
nid)
|
- memblock_alloc_try_nid(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid)
|
- memblock_alloc(size, 0)
+ memblock_alloc(size, SMP_CACHE_BYTES)
|
- memblock_alloc_raw(size, 0)
+ memblock_alloc_raw(size, SMP_CACHE_BYTES)
|
- memblock_alloc_from(size, 0, min_addr)
+ memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr)
|
- memblock_alloc_nopanic(size, 0)
+ memblock_alloc_nopanic(size, SMP_CACHE_BYTES)
|
- memblock_alloc_low(size, 0)
+ memblock_alloc_low(size, SMP_CACHE_BYTES)
|
- memblock_alloc_low_nopanic(size, 0)
+ memblock_alloc_low_nopanic(size, SMP_CACHE_BYTES)
|
- memblock_alloc_from_nopanic(size, 0, min_addr)
+ memblock_alloc_from_nopanic(size, SMP_CACHE_BYTES, min_addr)
|
- memblock_alloc_node(size, 0, nid)
+ memblock_alloc_node(size, SMP_CACHE_BYTES, nid)
)
[mhocko@suse.com: changelog update]
[akpm@linux-foundation.org: coding-style fixes]
[rppt@linux.ibm.com: fix missed uses of implicit alignment]
Link: http://lkml.kernel.org/r/20181016133656.GA10925@rapoport-lnx
Link: http://lkml.kernel.org/r/1538687224-17535-1-git-send-email-rppt@linux.vnet.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Acked-by: Paul Burton <paul.burton@mips.com> [MIPS]
Acked-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc]
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-10-31 05:09:57 +07:00
|
|
|
res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
|
2016-01-27 03:57:22 +07:00
|
|
|
|
|
|
|
res->start = start;
|
|
|
|
res->end = end;
|
|
|
|
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
switch (boot_mem_map.map[i].type) {
|
|
|
|
case BOOT_MEM_RAM:
|
2011-11-22 21:38:03 +07:00
|
|
|
case BOOT_MEM_INIT_RAM:
|
2005-04-17 05:20:36 +07:00
|
|
|
case BOOT_MEM_ROM_DATA:
|
|
|
|
res->name = "System RAM";
|
2016-01-27 03:57:22 +07:00
|
|
|
res->flags |= IORESOURCE_SYSRAM;
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
case BOOT_MEM_RESERVED:
|
|
|
|
default:
|
|
|
|
res->name = "reserved";
|
|
|
|
}
|
|
|
|
|
|
|
|
request_resource(&iomem_resource, res);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't know which RAM region contains kernel data,
|
|
|
|
* so we try it repeatedly and let the resource manager
|
|
|
|
* test it.
|
|
|
|
*/
|
|
|
|
request_resource(res, &code_resource);
|
|
|
|
request_resource(res, &data_resource);
|
2017-10-13 02:50:34 +07:00
|
|
|
request_resource(res, &bss_resource);
|
2012-10-11 23:14:58 +07:00
|
|
|
request_crashkernel(res);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-26 10:41:25 +07:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static void __init prefill_possible_map(void)
|
|
|
|
{
|
|
|
|
int i, possible = num_possible_cpus();
|
|
|
|
|
|
|
|
if (possible > nr_cpu_ids)
|
|
|
|
possible = nr_cpu_ids;
|
|
|
|
|
|
|
|
for (i = 0; i < possible; i++)
|
|
|
|
set_cpu_possible(i, true);
|
|
|
|
for (; i < NR_CPUS; i++)
|
|
|
|
set_cpu_possible(i, false);
|
|
|
|
|
|
|
|
nr_cpu_ids = possible;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void prefill_possible_map(void) {}
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
void __init setup_arch(char **cmdline_p)
|
|
|
|
{
|
|
|
|
cpu_probe();
|
2016-02-09 00:46:31 +07:00
|
|
|
mips_cm_probe();
|
2005-04-17 05:20:36 +07:00
|
|
|
prom_init();
|
2007-03-01 18:56:43 +07:00
|
|
|
|
2015-01-29 18:14:13 +07:00
|
|
|
setup_early_fdc_console();
|
2007-03-01 18:56:43 +07:00
|
|
|
#ifdef CONFIG_EARLY_PRINTK
|
2008-05-29 21:57:08 +07:00
|
|
|
setup_early_printk();
|
2007-03-01 18:56:43 +07:00
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
cpu_report();
|
2007-10-23 18:43:11 +07:00
|
|
|
check_bugs_early();
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#if defined(CONFIG_VT)
|
|
|
|
#if defined(CONFIG_VGA_CONSOLE)
|
2007-02-05 07:10:11 +07:00
|
|
|
conswitchp = &vga_con;
|
2005-04-17 05:20:36 +07:00
|
|
|
#elif defined(CONFIG_DUMMY_CONSOLE)
|
2007-02-05 07:10:11 +07:00
|
|
|
conswitchp = &dummy_con;
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2006-06-18 07:32:22 +07:00
|
|
|
arch_mem_init(cmdline_p);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
resource_init();
|
2006-02-23 19:23:27 +07:00
|
|
|
plat_smp_setup();
|
2014-06-26 10:41:25 +07:00
|
|
|
prefill_possible_map();
|
2012-05-15 14:04:50 +07:00
|
|
|
|
|
|
|
cpu_cache_init();
|
2016-09-02 21:17:31 +07:00
|
|
|
paging_init();
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2007-01-23 23:21:05 +07:00
|
|
|
unsigned long kernelsp[NR_CPUS];
|
|
|
|
unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
|
2007-06-29 22:55:48 +07:00
|
|
|
|
2016-06-20 16:27:37 +07:00
|
|
|
#ifdef CONFIG_USE_OF
|
|
|
|
unsigned long fw_passed_dtb;
|
|
|
|
#endif
|
|
|
|
|
2007-06-29 22:55:48 +07:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
struct dentry *mips_debugfs_dir;
|
|
|
|
static int __init debugfs_mips(void)
|
|
|
|
{
|
2019-01-22 21:57:42 +07:00
|
|
|
mips_debugfs_dir = debugfs_create_dir("mips", NULL);
|
2007-06-29 22:55:48 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
arch_initcall(debugfs_mips);
|
|
|
|
#endif
|
2018-06-15 18:08:45 +07:00
|
|
|
|
2018-08-16 20:47:53 +07:00
|
|
|
#ifdef CONFIG_DMA_MAYBE_COHERENT
|
2018-06-15 18:08:45 +07:00
|
|
|
/* User defined DMA coherency from command line. */
|
|
|
|
enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
|
|
|
|
EXPORT_SYMBOL_GPL(coherentio);
|
|
|
|
int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
|
|
|
|
|
|
|
|
static int __init setcoherentio(char *str)
|
|
|
|
{
|
|
|
|
coherentio = IO_COHERENCE_ENABLED;
|
|
|
|
pr_info("Hardware DMA cache coherency (command line)\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("coherentio", setcoherentio);
|
|
|
|
|
|
|
|
static int __init setnocoherentio(char *str)
|
|
|
|
{
|
|
|
|
coherentio = IO_COHERENCE_DISABLED;
|
|
|
|
pr_info("Software DMA cache coherency (command line)\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("nocoherentio", setnocoherentio);
|
|
|
|
#endif
|