mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-03 11:46:40 +07:00
1d931264af
Originally the only early reserved range that is overlapped with high pages is "KVA RAM", but we already do remove that from the active ranges. However, It turns out Xen could have that kind of overlapping to support memory ballooning.x So we need to make add_highpage_with_active_regions() to subtract memblock reserved just like low ram; this is the proper design anyway. In this patch, refactering get_freel_all_memory_range() to make it can be used by add_highpage_with_active_regions(). Also we don't need to remove "KVA RAM" from active ranges. Signed-off-by: Yinghai Lu <yinghai@kernel.org> LKML-Reference: <4CABB183.1040607@kernel.org> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
24 lines
882 B
C
24 lines
882 B
C
#ifndef _X86_MEMBLOCK_H
|
|
#define _X86_MEMBLOCK_H
|
|
|
|
#define ARCH_DISCARD_MEMBLOCK
|
|
|
|
u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
|
|
void memblock_x86_to_bootmem(u64 start, u64 end);
|
|
|
|
void memblock_x86_reserve_range(u64 start, u64 end, char *name);
|
|
void memblock_x86_free_range(u64 start, u64 end);
|
|
struct range;
|
|
int __get_free_all_memory_range(struct range **range, int nodeid,
|
|
unsigned long start_pfn, unsigned long end_pfn);
|
|
int get_free_all_memory_range(struct range **rangep, int nodeid);
|
|
|
|
void memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
|
|
unsigned long last_pfn);
|
|
u64 memblock_x86_hole_size(u64 start, u64 end);
|
|
u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
|
|
u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
|
|
u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
|
|
|
|
#endif
|