mm/sparse: cleanup the code surrounding memory_present()

After removal of CONFIG_HAVE_MEMBLOCK_NODE_MAP we have two equivalent
functions that call memory_present() for each region in memblock.memory:
sparse_memory_present_with_active_regions() and membocks_present().

Moreover, all architectures have a call to either of these functions
preceding the call to sparse_init() and in the most cases they are called
one after the other.

Mark the regions from memblock.memory as present during sparce_init() by
making sparse_init() call memblocks_present(), make memblocks_present()
and memory_present() functions static and remove redundant
sparse_memory_present_with_active_regions() function.

Also remove no longer required HAVE_MEMORY_PRESENT configuration option.

Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20200712083130.22919-1-rppt@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Mike Rapoport 2020-08-06 23:24:02 -07:00 committed by Linus Torvalds
parent 6cda72047e
commit c89ab04feb
23 changed files with 19 additions and 101 deletions

View File

@ -141,11 +141,8 @@ sections:
`mem_section` objects and the number of rows is calculated to fit `mem_section` objects and the number of rows is calculated to fit
all the memory sections. all the memory sections.
The architecture setup code should call :c:func:`memory_present` for The architecture setup code should call sparse_init() to
each active memory range or use :c:func:`memblocks_present` or initialize the memory sections and the memory maps.
:c:func:`sparse_memory_present_with_active_regions` wrappers to
initialize the memory sections. Next, the actual memory maps should be
set up using :c:func:`sparse_init`.
With SPARSEMEM there are two possible ways to convert a PFN to the With SPARSEMEM there are two possible ways to convert a PFN to the
corresponding `struct page` - a "classic sparse" and "sparse corresponding `struct page` - a "classic sparse" and "sparse

View File

@ -243,13 +243,8 @@ void __init bootmem_init(void)
(phys_addr_t)max_low_pfn << PAGE_SHIFT); (phys_addr_t)max_low_pfn << PAGE_SHIFT);
/* /*
* Sparsemem tries to allocate bootmem in memory_present(), * sparse_init() tries to allocate memory from memblock, so must be
* so must be done after the fixed reservations * done after the fixed reservations
*/
memblocks_present();
/*
* sparse_init() needs the bootmem allocator up and running.
*/ */
sparse_init(); sparse_init();

View File

@ -430,11 +430,9 @@ void __init bootmem_init(void)
#endif #endif
/* /*
* Sparsemem tries to allocate bootmem in memory_present(), so must be * sparse_init() tries to allocate memory from memblock, so must be
* done after the fixed reservations. * done after the fixed reservations
*/ */
memblocks_present();
sparse_init(); sparse_init();
zone_sizes_init(min, max); zone_sizes_init(min, max);

View File

@ -600,7 +600,6 @@ void __init paging_init(void)
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init(); sparse_init();
#ifdef CONFIG_VIRTUAL_MEM_MAP #ifdef CONFIG_VIRTUAL_MEM_MAP

View File

@ -172,9 +172,6 @@ void __init setup_memory(void)
&memblock.memory, 0); &memblock.memory, 0);
} }
/* XXX need to clip this if using highmem? */
sparse_memory_present_with_active_regions(0);
paging_init(); paging_init();
} }

View File

@ -371,14 +371,6 @@ static void __init bootmem_init(void)
#endif #endif
} }
/*
* In any case the added to the memblock memory regions
* (highmem/lowmem, available/reserved, etc) are considered
* as present, so inform sparsemem about them.
*/
memblocks_present();
/* /*
* Reserve initrd memory if needed. * Reserve initrd memory if needed.
*/ */

View File

@ -220,7 +220,6 @@ static __init void prom_meminit(void)
cpumask_clear(&__node_cpumask[node]); cpumask_clear(&__node_cpumask[node]);
} }
} }
memblocks_present();
max_low_pfn = PHYS_PFN(memblock_end_of_DRAM()); max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) { for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {

View File

@ -402,8 +402,6 @@ void __init prom_meminit(void)
} }
__node_data[node] = &null_node; __node_data[node] = &null_node;
} }
memblocks_present();
} }
void __init prom_free_prom_memory(void) void __init prom_free_prom_memory(void)

View File

@ -689,11 +689,6 @@ void __init paging_init(void)
flush_cache_all_local(); /* start with known state */ flush_cache_all_local(); /* start with known state */
flush_tlb_all_local(NULL); flush_tlb_all_local(NULL);
/*
* Mark all memblocks as present for sparsemem using
* memory_present() and then initialize sparsemem.
*/
memblocks_present();
sparse_init(); sparse_init();
parisc_bootmem_free(); parisc_bootmem_free();
} }

View File

@ -183,8 +183,6 @@ void __init mem_topology_setup(void)
void __init initmem_init(void) void __init initmem_init(void)
{ {
/* XXX need to clip this if using highmem? */
sparse_memory_present_with_active_regions(0);
sparse_init(); sparse_init();
} }

View File

@ -949,7 +949,6 @@ void __init initmem_init(void)
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
setup_node_data(nid, start_pfn, end_pfn); setup_node_data(nid, start_pfn, end_pfn);
sparse_memory_present_with_active_regions(nid);
} }
sparse_init(); sparse_init();

View File

@ -544,7 +544,6 @@ void mark_rodata_ro(void)
void __init paging_init(void) void __init paging_init(void)
{ {
setup_vm_final(); setup_vm_final();
memblocks_present();
sparse_init(); sparse_init();
setup_zero_page(); setup_zero_page();
zone_sizes_init(); zone_sizes_init();

View File

@ -115,7 +115,6 @@ void __init paging_init(void)
__load_psw_mask(psw.mask); __load_psw_mask(psw.mask);
kasan_free_early_identity(); kasan_free_early_identity();
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init(); sparse_init();
zone_dma_bits = 31; zone_dma_bits = 31;
memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); memset(max_zone_pfns, 0, sizeof(max_zone_pfns));

View File

@ -241,12 +241,6 @@ static void __init do_init_bootmem(void)
plat_mem_setup(); plat_mem_setup();
for_each_memblock(memory, reg) {
int nid = memblock_get_region_node(reg);
memory_present(nid, memblock_region_memory_base_pfn(reg),
memblock_region_memory_end_pfn(reg));
}
sparse_init(); sparse_init();
} }

View File

@ -53,7 +53,4 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
/* It's up */ /* It's up */
node_set_online(nid); node_set_online(nid);
/* Kick sparsemem */
sparse_memory_present_with_active_regions(nid);
} }

View File

@ -1610,7 +1610,6 @@ static unsigned long __init bootmem_init(unsigned long phys_base)
/* XXX cpu notifier XXX */ /* XXX cpu notifier XXX */
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init(); sparse_init();
return end_pfn; return end_pfn;

View File

@ -678,7 +678,6 @@ void __init initmem_init(void)
#endif #endif
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
sparse_memory_present_with_active_regions(0);
#ifdef CONFIG_FLATMEM #ifdef CONFIG_FLATMEM
max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn; max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn;
@ -718,7 +717,6 @@ void __init paging_init(void)
* NOTE: at this point the bootmem allocator is fully available. * NOTE: at this point the bootmem allocator is fully available.
*/ */
olpc_dt_build_devicetree(); olpc_dt_build_devicetree();
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init(); sparse_init();
zone_sizes_init(); zone_sizes_init();
} }

View File

@ -817,7 +817,6 @@ void __init initmem_init(void)
void __init paging_init(void) void __init paging_init(void)
{ {
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init(); sparse_init();
/* /*

View File

@ -2382,9 +2382,6 @@ static inline unsigned long get_num_physpages(void)
* for_each_valid_physical_page_range() * for_each_valid_physical_page_range()
* memblock_add_node(base, size, nid) * memblock_add_node(base, size, nid)
* free_area_init(max_zone_pfns); * free_area_init(max_zone_pfns);
*
* sparse_memory_present_with_active_regions() calls memory_present() for
* each range when SPARSEMEM is enabled.
*/ */
void free_area_init(unsigned long *max_zone_pfn); void free_area_init(unsigned long *max_zone_pfn);
unsigned long node_map_pfn_alignment(void); unsigned long node_map_pfn_alignment(void);
@ -2395,7 +2392,6 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn,
extern void get_pfn_range_for_nid(unsigned int nid, extern void get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn); unsigned long *start_pfn, unsigned long *end_pfn);
extern unsigned long find_min_pfn_with_active_regions(void); extern unsigned long find_min_pfn_with_active_regions(void);
extern void sparse_memory_present_with_active_regions(int nid);
#ifndef CONFIG_NEED_MULTIPLE_NODES #ifndef CONFIG_NEED_MULTIPLE_NODES
static inline int early_pfn_to_nid(unsigned long pfn) static inline int early_pfn_to_nid(unsigned long pfn)

View File

@ -839,18 +839,6 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
#else
static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
#endif
#if defined(CONFIG_SPARSEMEM)
void memblocks_present(void);
#else
static inline void memblocks_present(void) {}
#endif
#ifdef CONFIG_HAVE_MEMORYLESS_NODES #ifdef CONFIG_HAVE_MEMORYLESS_NODES
int local_memory_node(int node_id); int local_memory_node(int node_id);
#else #else
@ -1407,8 +1395,6 @@ struct mminit_pfnnid_cache {
#define early_pfn_valid(pfn) (1) #define early_pfn_valid(pfn) (1)
#endif #endif
void memory_present(int nid, unsigned long start, unsigned long end);
/* /*
* If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
* need to check pfn validity within that MAX_ORDER_NR_PAGES block. * need to check pfn validity within that MAX_ORDER_NR_PAGES block.

View File

@ -88,13 +88,9 @@ config NEED_MULTIPLE_NODES
def_bool y def_bool y
depends on DISCONTIGMEM || NUMA depends on DISCONTIGMEM || NUMA
config HAVE_MEMORY_PRESENT
def_bool y
depends on ARCH_HAVE_MEMORY_PRESENT || SPARSEMEM
# #
# SPARSEMEM_EXTREME (which is the default) does some bootmem # SPARSEMEM_EXTREME (which is the default) does some bootmem
# allocations when memory_present() is called. If this cannot # allocations when sparse_init() is called. If this cannot
# be done on your architecture, select this option. However, # be done on your architecture, select this option. However,
# statically allocating the mem_section[] array can potentially # statically allocating the mem_section[] array can potentially
# consume vast quantities of .bss, so be careful. # consume vast quantities of .bss, so be careful.

View File

@ -6324,22 +6324,6 @@ void __meminit init_currently_empty_zone(struct zone *zone,
zone->initialized = 1; zone->initialized = 1;
} }
/**
* sparse_memory_present_with_active_regions - Call memory_present for each active range
* @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
*
* If an architecture guarantees that all ranges registered contain no holes and may
* be freed, this function may be used instead of calling memory_present() manually.
*/
void __init sparse_memory_present_with_active_regions(int nid)
{
unsigned long start_pfn, end_pfn;
int i, this_nid;
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
memory_present(this_nid, start_pfn, end_pfn);
}
/** /**
* get_pfn_range_for_nid - Return the start and end page frames for a node * get_pfn_range_for_nid - Return the start and end page frames for a node
* @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.

View File

@ -249,7 +249,7 @@ void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
#endif #endif
/* Record a memory area against a node. */ /* Record a memory area against a node. */
void __init memory_present(int nid, unsigned long start, unsigned long end) static void __init memory_present(int nid, unsigned long start, unsigned long end)
{ {
unsigned long pfn; unsigned long pfn;
@ -285,11 +285,11 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
} }
/* /*
* Mark all memblocks as present using memory_present(). This is a * Mark all memblocks as present using memory_present().
* convenience function that is useful for a number of arches * This is a convenience function that is useful to mark all of the systems
* to mark all of the systems memory as present during initialization. * memory as present during initialization.
*/ */
void __init memblocks_present(void) static void __init memblocks_present(void)
{ {
struct memblock_region *reg; struct memblock_region *reg;
@ -574,9 +574,13 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
*/ */
void __init sparse_init(void) void __init sparse_init(void)
{ {
unsigned long pnum_begin = first_present_section_nr(); unsigned long pnum_end, pnum_begin, map_count = 1;
int nid_begin = sparse_early_nid(__nr_to_section(pnum_begin)); int nid_begin;
unsigned long pnum_end, map_count = 1;
memblocks_present();
pnum_begin = first_present_section_nr();
nid_begin = sparse_early_nid(__nr_to_section(pnum_begin));
/* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
set_pageblock_order(); set_pageblock_order();