mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 12:30:53 +07:00
a4a3ede213
Some memory is reserved but unavailable: not present in memblock.memory (because not backed by physical pages), but present in memblock.reserved. Such memory has backing struct pages, but they are not initialized by going through __init_single_page(). In some cases these struct pages are accessed even if they do not contain any data. One example is page_to_pfn() might access page->flags if this is where section information is stored (CONFIG_SPARSEMEM, SECTION_IN_PAGE_FLAGS). One example of such memory: trim_low_memory_range() unconditionally reserves from pfn 0, but e820__memblock_setup() might provide the exiting memory from pfn 1 (i.e. KVM). Since struct pages are zeroed in __init_single_page(), and not during allocation time, we must zero such struct pages explicitly. The patch involves adding a new memblock iterator: for_each_resv_unavail_range(i, p_start, p_end) Which iterates through reserved && !memory lists, and we zero struct pages explicitly by calling mm_zero_struct_page(). === Here is more detailed example of problem that this patch is addressing: Run tested on qemu with the following arguments: -enable-kvm -cpu kvm64 -m 512 -smp 2 This patch reports that there are 98 unavailable pages. They are: pfn 0 and pfns in range [159, 255]. Note, trim_low_memory_range() reserves only pfns in range [0, 15], it does not reserve [159, 255] ones. e820__memblock_setup() reports linux that the following physical ranges are available: [1 , 158] [256, 130783] Notice, that exactly unavailable pfns are missing! Now, lets check what we have in zone 0: [1, 131039] pfn 0, is not part of the zone, but pfns [1, 158], are. However, the bigger problem we have if we do not initialize these struct pages is with memory hotplug. Because, that path operates at 2M boundaries (section_nr). And checks if 2M range of pages is hot removable. It starts with first pfn from zone, rounds it down to 2M boundary (sturct pages are allocated at 2M boundaries when vmemmap is created), and checks if that section is hot removable. In this case start with pfn 1 and convert it down to pfn 0. Later pfn is converted to struct page, and some fields are checked. Now, if we do not zero struct pages, we get unpredictable results. In fact when CONFIG_VM_DEBUG is enabled, and we explicitly set all vmemmap memory to ones, the following panic is observed with kernel test without this patch applied: BUG: unable to handle kernel NULL pointer dereference at (null) IP: is_pageblock_removable_nolock+0x35/0x90 PGD 0 P4D 0 Oops: 0000 [#1] PREEMPT ... task: ffff88001f4e2900 task.stack: ffffc90000314000 RIP: 0010:is_pageblock_removable_nolock+0x35/0x90 Call Trace: ? is_mem_section_removable+0x5a/0xd0 show_mem_removable+0x6b/0xa0 dev_attr_show+0x1b/0x50 sysfs_kf_seq_show+0xa1/0x100 kernfs_seq_show+0x22/0x30 seq_read+0x1ac/0x3a0 kernfs_fop_read+0x36/0x190 ? security_file_permission+0x90/0xb0 __vfs_read+0x16/0x30 vfs_read+0x81/0x130 SyS_read+0x44/0xa0 entry_SYSCALL_64_fastpath+0x1f/0xbd Link: http://lkml.kernel.org/r/20171013173214.27300-7-pasha.tatashin@oracle.com Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com> Reviewed-by: Steven Sistare <steven.sistare@oracle.com> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Reviewed-by: Bob Picco <bob.picco@oracle.com> Tested-by: Bob Picco <bob.picco@oracle.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Michal Hocko <mhocko@kernel.org> Cc: Sam Ravnborg <sam@ravnborg.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
440 lines
15 KiB
C
440 lines
15 KiB
C
#ifndef _LINUX_MEMBLOCK_H
|
|
#define _LINUX_MEMBLOCK_H
|
|
#ifdef __KERNEL__
|
|
|
|
#ifdef CONFIG_HAVE_MEMBLOCK
|
|
/*
|
|
* Logical memory blocks.
|
|
*
|
|
* Copyright (C) 2001 Peter Bergner, IBM Corp.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/mm.h>
|
|
|
|
#define INIT_MEMBLOCK_REGIONS 128
|
|
#define INIT_PHYSMEM_REGIONS 4
|
|
|
|
/* Definition of memblock flags. */
|
|
enum {
|
|
MEMBLOCK_NONE = 0x0, /* No special request */
|
|
MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
|
|
MEMBLOCK_MIRROR = 0x2, /* mirrored region */
|
|
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
|
|
};
|
|
|
|
struct memblock_region {
|
|
phys_addr_t base;
|
|
phys_addr_t size;
|
|
unsigned long flags;
|
|
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
|
int nid;
|
|
#endif
|
|
};
|
|
|
|
struct memblock_type {
|
|
unsigned long cnt; /* number of regions */
|
|
unsigned long max; /* size of the allocated array */
|
|
phys_addr_t total_size; /* size of all regions */
|
|
struct memblock_region *regions;
|
|
char *name;
|
|
};
|
|
|
|
struct memblock {
|
|
bool bottom_up; /* is bottom up direction? */
|
|
phys_addr_t current_limit;
|
|
struct memblock_type memory;
|
|
struct memblock_type reserved;
|
|
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
|
|
struct memblock_type physmem;
|
|
#endif
|
|
};
|
|
|
|
extern struct memblock memblock;
|
|
extern int memblock_debug;
|
|
|
|
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
|
|
#define __init_memblock __meminit
|
|
#define __initdata_memblock __meminitdata
|
|
void memblock_discard(void);
|
|
#else
|
|
#define __init_memblock
|
|
#define __initdata_memblock
|
|
#endif
|
|
|
|
#define memblock_dbg(fmt, ...) \
|
|
if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
|
|
|
|
phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
|
|
phys_addr_t start, phys_addr_t end,
|
|
int nid, ulong flags);
|
|
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
|
|
phys_addr_t size, phys_addr_t align);
|
|
void memblock_allow_resize(void);
|
|
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
|
|
int memblock_add(phys_addr_t base, phys_addr_t size);
|
|
int memblock_remove(phys_addr_t base, phys_addr_t size);
|
|
int memblock_free(phys_addr_t base, phys_addr_t size);
|
|
int memblock_reserve(phys_addr_t base, phys_addr_t size);
|
|
void memblock_trim_memory(phys_addr_t align);
|
|
bool memblock_overlaps_region(struct memblock_type *type,
|
|
phys_addr_t base, phys_addr_t size);
|
|
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
|
|
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
|
|
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
|
|
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
|
|
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
|
|
ulong choose_memblock_flags(void);
|
|
|
|
/* Low level functions */
|
|
int memblock_add_range(struct memblock_type *type,
|
|
phys_addr_t base, phys_addr_t size,
|
|
int nid, unsigned long flags);
|
|
|
|
void __next_mem_range(u64 *idx, int nid, ulong flags,
|
|
struct memblock_type *type_a,
|
|
struct memblock_type *type_b, phys_addr_t *out_start,
|
|
phys_addr_t *out_end, int *out_nid);
|
|
|
|
void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
|
|
struct memblock_type *type_a,
|
|
struct memblock_type *type_b, phys_addr_t *out_start,
|
|
phys_addr_t *out_end, int *out_nid);
|
|
|
|
void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
|
|
phys_addr_t *out_end);
|
|
|
|
void __memblock_free_early(phys_addr_t base, phys_addr_t size);
|
|
void __memblock_free_late(phys_addr_t base, phys_addr_t size);
|
|
|
|
/**
|
|
* for_each_mem_range - iterate through memblock areas from type_a and not
|
|
* included in type_b. Or just type_a if type_b is NULL.
|
|
* @i: u64 used as loop variable
|
|
* @type_a: ptr to memblock_type to iterate
|
|
* @type_b: ptr to memblock_type which excludes from the iteration
|
|
* @nid: node selector, %NUMA_NO_NODE for all nodes
|
|
* @flags: pick from blocks based on memory attributes
|
|
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
|
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
|
* @p_nid: ptr to int for nid of the range, can be %NULL
|
|
*/
|
|
#define for_each_mem_range(i, type_a, type_b, nid, flags, \
|
|
p_start, p_end, p_nid) \
|
|
for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
|
|
p_start, p_end, p_nid); \
|
|
i != (u64)ULLONG_MAX; \
|
|
__next_mem_range(&i, nid, flags, type_a, type_b, \
|
|
p_start, p_end, p_nid))
|
|
|
|
/**
|
|
* for_each_mem_range_rev - reverse iterate through memblock areas from
|
|
* type_a and not included in type_b. Or just type_a if type_b is NULL.
|
|
* @i: u64 used as loop variable
|
|
* @type_a: ptr to memblock_type to iterate
|
|
* @type_b: ptr to memblock_type which excludes from the iteration
|
|
* @nid: node selector, %NUMA_NO_NODE for all nodes
|
|
* @flags: pick from blocks based on memory attributes
|
|
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
|
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
|
* @p_nid: ptr to int for nid of the range, can be %NULL
|
|
*/
|
|
#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
|
|
p_start, p_end, p_nid) \
|
|
for (i = (u64)ULLONG_MAX, \
|
|
__next_mem_range_rev(&i, nid, flags, type_a, type_b,\
|
|
p_start, p_end, p_nid); \
|
|
i != (u64)ULLONG_MAX; \
|
|
__next_mem_range_rev(&i, nid, flags, type_a, type_b, \
|
|
p_start, p_end, p_nid))
|
|
|
|
/**
|
|
* for_each_reserved_mem_region - iterate over all reserved memblock areas
|
|
* @i: u64 used as loop variable
|
|
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
|
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
|
*
|
|
* Walks over reserved areas of memblock. Available as soon as memblock
|
|
* is initialized.
|
|
*/
|
|
#define for_each_reserved_mem_region(i, p_start, p_end) \
|
|
for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
|
|
i != (u64)ULLONG_MAX; \
|
|
__next_reserved_mem_region(&i, p_start, p_end))
|
|
|
|
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
|
|
{
|
|
return m->flags & MEMBLOCK_HOTPLUG;
|
|
}
|
|
|
|
static inline bool memblock_is_mirror(struct memblock_region *m)
|
|
{
|
|
return m->flags & MEMBLOCK_MIRROR;
|
|
}
|
|
|
|
static inline bool memblock_is_nomap(struct memblock_region *m)
|
|
{
|
|
return m->flags & MEMBLOCK_NOMAP;
|
|
}
|
|
|
|
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
|
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
|
|
unsigned long *end_pfn);
|
|
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
|
|
unsigned long *out_end_pfn, int *out_nid);
|
|
unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn);
|
|
|
|
/**
|
|
* for_each_mem_pfn_range - early memory pfn range iterator
|
|
* @i: an integer used as loop variable
|
|
* @nid: node selector, %MAX_NUMNODES for all nodes
|
|
* @p_start: ptr to ulong for start pfn of the range, can be %NULL
|
|
* @p_end: ptr to ulong for end pfn of the range, can be %NULL
|
|
* @p_nid: ptr to int for nid of the range, can be %NULL
|
|
*
|
|
* Walks over configured memory ranges.
|
|
*/
|
|
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
|
|
for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
|
|
i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
|
|
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
|
|
|
/**
|
|
* for_each_free_mem_range - iterate through free memblock areas
|
|
* @i: u64 used as loop variable
|
|
* @nid: node selector, %NUMA_NO_NODE for all nodes
|
|
* @flags: pick from blocks based on memory attributes
|
|
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
|
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
|
* @p_nid: ptr to int for nid of the range, can be %NULL
|
|
*
|
|
* Walks over free (memory && !reserved) areas of memblock. Available as
|
|
* soon as memblock is initialized.
|
|
*/
|
|
#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
|
|
for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
|
|
nid, flags, p_start, p_end, p_nid)
|
|
|
|
/**
|
|
* for_each_free_mem_range_reverse - rev-iterate through free memblock areas
|
|
* @i: u64 used as loop variable
|
|
* @nid: node selector, %NUMA_NO_NODE for all nodes
|
|
* @flags: pick from blocks based on memory attributes
|
|
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
|
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
|
* @p_nid: ptr to int for nid of the range, can be %NULL
|
|
*
|
|
* Walks over free (memory && !reserved) areas of memblock in reverse
|
|
* order. Available as soon as memblock is initialized.
|
|
*/
|
|
#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
|
|
p_nid) \
|
|
for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
|
|
nid, flags, p_start, p_end, p_nid)
|
|
|
|
/**
|
|
* for_each_resv_unavail_range - iterate through reserved and unavailable memory
|
|
* @i: u64 used as loop variable
|
|
* @flags: pick from blocks based on memory attributes
|
|
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
|
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
|
*
|
|
* Walks over unavailable but reserved (reserved && !memory) areas of memblock.
|
|
* Available as soon as memblock is initialized.
|
|
* Note: because this memory does not belong to any physical node, flags and
|
|
* nid arguments do not make sense and thus not exported as arguments.
|
|
*/
|
|
#define for_each_resv_unavail_range(i, p_start, p_end) \
|
|
for_each_mem_range(i, &memblock.reserved, &memblock.memory, \
|
|
NUMA_NO_NODE, MEMBLOCK_NONE, p_start, p_end, NULL)
|
|
|
|
static inline void memblock_set_region_flags(struct memblock_region *r,
|
|
unsigned long flags)
|
|
{
|
|
r->flags |= flags;
|
|
}
|
|
|
|
static inline void memblock_clear_region_flags(struct memblock_region *r,
|
|
unsigned long flags)
|
|
{
|
|
r->flags &= ~flags;
|
|
}
|
|
|
|
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
|
int memblock_set_node(phys_addr_t base, phys_addr_t size,
|
|
struct memblock_type *type, int nid);
|
|
|
|
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
|
|
{
|
|
r->nid = nid;
|
|
}
|
|
|
|
static inline int memblock_get_region_node(const struct memblock_region *r)
|
|
{
|
|
return r->nid;
|
|
}
|
|
#else
|
|
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
|
|
{
|
|
}
|
|
|
|
static inline int memblock_get_region_node(const struct memblock_region *r)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
|
|
|
phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
|
|
phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
|
|
|
|
phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
|
|
|
|
/*
|
|
* Set the allocation direction to bottom-up or top-down.
|
|
*/
|
|
static inline void __init memblock_set_bottom_up(bool enable)
|
|
{
|
|
memblock.bottom_up = enable;
|
|
}
|
|
|
|
/*
|
|
* Check if the allocation direction is bottom-up or not.
|
|
* if this is true, that said, memblock will allocate memory
|
|
* in bottom-up direction.
|
|
*/
|
|
static inline bool memblock_bottom_up(void)
|
|
{
|
|
return memblock.bottom_up;
|
|
}
|
|
|
|
/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
|
|
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
|
|
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
|
|
|
|
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
|
|
phys_addr_t start, phys_addr_t end,
|
|
ulong flags);
|
|
phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
|
|
phys_addr_t max_addr);
|
|
phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
|
|
phys_addr_t max_addr);
|
|
phys_addr_t memblock_phys_mem_size(void);
|
|
phys_addr_t memblock_reserved_size(void);
|
|
phys_addr_t memblock_mem_size(unsigned long limit_pfn);
|
|
phys_addr_t memblock_start_of_DRAM(void);
|
|
phys_addr_t memblock_end_of_DRAM(void);
|
|
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
|
|
void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
|
|
void memblock_mem_limit_remove_map(phys_addr_t limit);
|
|
bool memblock_is_memory(phys_addr_t addr);
|
|
int memblock_is_map_memory(phys_addr_t addr);
|
|
int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
|
|
bool memblock_is_reserved(phys_addr_t addr);
|
|
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
|
|
|
|
extern void __memblock_dump_all(void);
|
|
|
|
static inline void memblock_dump_all(void)
|
|
{
|
|
if (memblock_debug)
|
|
__memblock_dump_all();
|
|
}
|
|
|
|
/**
|
|
* memblock_set_current_limit - Set the current allocation limit to allow
|
|
* limiting allocations to what is currently
|
|
* accessible during boot
|
|
* @limit: New limit value (physical address)
|
|
*/
|
|
void memblock_set_current_limit(phys_addr_t limit);
|
|
|
|
|
|
phys_addr_t memblock_get_current_limit(void);
|
|
|
|
/*
|
|
* pfn conversion functions
|
|
*
|
|
* While the memory MEMBLOCKs should always be page aligned, the reserved
|
|
* MEMBLOCKs may not be. This accessor attempt to provide a very clear
|
|
* idea of what they return for such non aligned MEMBLOCKs.
|
|
*/
|
|
|
|
/**
|
|
* memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region
|
|
* @reg: memblock_region structure
|
|
*/
|
|
static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
|
|
{
|
|
return PFN_UP(reg->base);
|
|
}
|
|
|
|
/**
|
|
* memblock_region_memory_end_pfn - Return the end_pfn this region
|
|
* @reg: memblock_region structure
|
|
*/
|
|
static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
|
|
{
|
|
return PFN_DOWN(reg->base + reg->size);
|
|
}
|
|
|
|
/**
|
|
* memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region
|
|
* @reg: memblock_region structure
|
|
*/
|
|
static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
|
|
{
|
|
return PFN_DOWN(reg->base);
|
|
}
|
|
|
|
/**
|
|
* memblock_region_reserved_end_pfn - Return the end_pfn this region
|
|
* @reg: memblock_region structure
|
|
*/
|
|
static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
|
|
{
|
|
return PFN_UP(reg->base + reg->size);
|
|
}
|
|
|
|
#define for_each_memblock(memblock_type, region) \
|
|
for (region = memblock.memblock_type.regions; \
|
|
region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
|
|
region++)
|
|
|
|
#define for_each_memblock_type(i, memblock_type, rgn) \
|
|
for (i = 0, rgn = &memblock_type->regions[0]; \
|
|
i < memblock_type->cnt; \
|
|
i++, rgn = &memblock_type->regions[i])
|
|
|
|
#ifdef CONFIG_MEMTEST
|
|
extern void early_memtest(phys_addr_t start, phys_addr_t end);
|
|
#else
|
|
static inline void early_memtest(phys_addr_t start, phys_addr_t end)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
|
|
phys_addr_t end_addr);
|
|
#else
|
|
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
|
|
phys_addr_t end_addr)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#endif /* CONFIG_HAVE_MEMBLOCK */
|
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
#endif /* _LINUX_MEMBLOCK_H */
|