mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-04-13 11:37:29 +07:00
x86, mm: Find_early_table_space based on ranges that are actually being mapped
Current logic finds enough space for direct mapping page tables from 0
to end. Instead, we only need to find enough space to cover mr[0].start
to mr[nr_range].end -- the range that is actually being mapped by
init_memory_mapping()
This is needed after 1bbbbe779a
, to address
the panic reported here:
https://lkml.org/lkml/2012/10/20/160
https://lkml.org/lkml/2012/10/21/157
Signed-off-by: Jacob Shin <jacob.shin@amd.com>
Link: http://lkml.kernel.org/r/20121024195311.GB11779@jshin-Toonie
Tested-by: Tom Rini <trini@ti.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
parent
1f2ff682ac
commit
844ab6f993
@ -29,36 +29,54 @@ int direct_gbpages
|
|||||||
#endif
|
#endif
|
||||||
;
|
;
|
||||||
|
|
||||||
static void __init find_early_table_space(unsigned long end, int use_pse,
|
struct map_range {
|
||||||
int use_gbpages)
|
unsigned long start;
|
||||||
|
unsigned long end;
|
||||||
|
unsigned page_size_mask;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* First calculate space needed for kernel direct mapping page tables to cover
|
||||||
|
* mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB
|
||||||
|
* pages. Then find enough contiguous space for those page tables.
|
||||||
|
*/
|
||||||
|
static void __init find_early_table_space(struct map_range *mr, int nr_range)
|
||||||
{
|
{
|
||||||
unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
|
int i;
|
||||||
|
unsigned long puds = 0, pmds = 0, ptes = 0, tables;
|
||||||
|
unsigned long start = 0, good_end;
|
||||||
phys_addr_t base;
|
phys_addr_t base;
|
||||||
|
|
||||||
puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
|
for (i = 0; i < nr_range; i++) {
|
||||||
tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
|
unsigned long range, extra;
|
||||||
|
|
||||||
if (use_gbpages) {
|
range = mr[i].end - mr[i].start;
|
||||||
unsigned long extra;
|
puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
|
||||||
|
|
||||||
extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
|
if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) {
|
||||||
pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
|
extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT);
|
||||||
} else
|
pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT;
|
||||||
pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
|
} else {
|
||||||
|
pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT;
|
||||||
|
}
|
||||||
|
|
||||||
tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
|
if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) {
|
||||||
|
extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT);
|
||||||
if (use_pse) {
|
|
||||||
unsigned long extra;
|
|
||||||
|
|
||||||
extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
extra += PMD_SIZE;
|
extra += PMD_SIZE;
|
||||||
#endif
|
#endif
|
||||||
ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
/* The first 2/4M doesn't use large pages. */
|
||||||
} else
|
if (mr[i].start < PMD_SIZE)
|
||||||
ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
extra += range;
|
||||||
|
|
||||||
|
ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
|
} else {
|
||||||
|
ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
|
||||||
|
tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
|
||||||
tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
|
tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
@ -76,7 +94,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
|
|||||||
pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
|
pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
|
||||||
|
|
||||||
printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
|
printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
|
||||||
end - 1, pgt_buf_start << PAGE_SHIFT,
|
mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT,
|
||||||
(pgt_buf_top << PAGE_SHIFT) - 1);
|
(pgt_buf_top << PAGE_SHIFT) - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -85,12 +103,6 @@ void __init native_pagetable_reserve(u64 start, u64 end)
|
|||||||
memblock_reserve(start, end - start);
|
memblock_reserve(start, end - start);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct map_range {
|
|
||||||
unsigned long start;
|
|
||||||
unsigned long end;
|
|
||||||
unsigned page_size_mask;
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
#define NR_RANGE_MR 3
|
#define NR_RANGE_MR 3
|
||||||
#else /* CONFIG_X86_64 */
|
#else /* CONFIG_X86_64 */
|
||||||
@ -263,7 +275,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
|
|||||||
* nodes are discovered.
|
* nodes are discovered.
|
||||||
*/
|
*/
|
||||||
if (!after_bootmem)
|
if (!after_bootmem)
|
||||||
find_early_table_space(end, use_pse, use_gbpages);
|
find_early_table_space(mr, nr_range);
|
||||||
|
|
||||||
for (i = 0; i < nr_range; i++)
|
for (i = 0; i < nr_range; i++)
|
||||||
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
|
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
|
||||||
|
Loading…
Reference in New Issue
Block a user