mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-18 15:56:49 +07:00
8a7f97b902
Add check for the return value of memblock_alloc*() functions and call panic() in case of error. The panic message repeats the one used by panicing memblock allocators with adjustment of parameters to include only relevant ones. The replacement was mostly automated with semantic patches like the one below with manual massaging of format strings. @@ expression ptr, size, align; @@ ptr = memblock_alloc(size, align); + if (!ptr) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, size, align); [anders.roxell@linaro.org: use '%pa' with 'phys_addr_t' type] Link: http://lkml.kernel.org/r/20190131161046.21886-1-anders.roxell@linaro.org [rppt@linux.ibm.com: fix format strings for panics after memblock_alloc] Link: http://lkml.kernel.org/r/1548950940-15145-1-git-send-email-rppt@linux.ibm.com [rppt@linux.ibm.com: don't panic if the allocation in sparse_buffer_init fails] Link: http://lkml.kernel.org/r/20190131074018.GD28876@rapoport-lnx [akpm@linux-foundation.org: fix xtensa printk warning] Link: http://lkml.kernel.org/r/1548057848-15136-20-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Anders Roxell <anders.roxell@linaro.org> Reviewed-by: Guo Ren <ren_guo@c-sky.com> [c-sky] Acked-by: Paul Burton <paul.burton@mips.com> [MIPS] Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> [s390] Reviewed-by: Juergen Gross <jgross@suse.com> [Xen] Reviewed-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k] Acked-by: Max Filippov <jcmvbkbc@gmail.com> [xtensa] Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Christoph Hellwig <hch@lst.de> Cc: "David S. Miller" <davem@davemloft.net> Cc: Dennis Zhou <dennis@kernel.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Mark Salter <msalter@redhat.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Petr Mladek <pmladek@suse.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Rob Herring <robh+dt@kernel.org> Cc: Rob Herring <robh@kernel.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
231 lines
5.9 KiB
C
231 lines
5.9 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Based upon linux/arch/m68k/mm/sun3mmu.c
|
|
* Based upon linux/arch/ppc/mm/mmu_context.c
|
|
*
|
|
* Implementations of mm routines specific to the Coldfire MMU.
|
|
*
|
|
* Copyright (c) 2008 Freescale Semiconductor, Inc.
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/types.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/init.h>
|
|
#include <linux/string.h>
|
|
#include <linux/memblock.h>
|
|
|
|
#include <asm/setup.h>
|
|
#include <asm/page.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/mcf_pgalloc.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
#define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
|
|
|
|
mm_context_t next_mmu_context;
|
|
unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
|
|
atomic_t nr_free_contexts;
|
|
struct mm_struct *context_mm[LAST_CONTEXT+1];
|
|
unsigned long num_pages;
|
|
|
|
/*
|
|
* ColdFire paging_init derived from sun3.
|
|
*/
|
|
void __init paging_init(void)
|
|
{
|
|
pgd_t *pg_dir;
|
|
pte_t *pg_table;
|
|
unsigned long address, size;
|
|
unsigned long next_pgtable, bootmem_end;
|
|
unsigned long zones_size[MAX_NR_ZONES];
|
|
enum zone_type zone;
|
|
int i;
|
|
|
|
empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
|
if (!empty_zero_page)
|
|
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
|
|
__func__, PAGE_SIZE, PAGE_SIZE);
|
|
|
|
pg_dir = swapper_pg_dir;
|
|
memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
|
|
|
|
size = num_pages * sizeof(pte_t);
|
|
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
|
|
next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
|
|
if (!next_pgtable)
|
|
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
|
|
__func__, size, PAGE_SIZE);
|
|
|
|
bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
|
|
pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
|
|
|
|
address = PAGE_OFFSET;
|
|
while (address < (unsigned long)high_memory) {
|
|
pg_table = (pte_t *) next_pgtable;
|
|
next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
|
|
pgd_val(*pg_dir) = (unsigned long) pg_table;
|
|
pg_dir++;
|
|
|
|
/* now change pg_table to kernel virtual addresses */
|
|
for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
|
|
pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
|
|
if (address >= (unsigned long) high_memory)
|
|
pte_val(pte) = 0;
|
|
|
|
set_pte(pg_table, pte);
|
|
address += PAGE_SIZE;
|
|
}
|
|
}
|
|
|
|
current->mm = NULL;
|
|
|
|
for (zone = 0; zone < MAX_NR_ZONES; zone++)
|
|
zones_size[zone] = 0x0;
|
|
zones_size[ZONE_DMA] = num_pages;
|
|
free_area_init(zones_size);
|
|
}
|
|
|
|
int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
|
|
{
|
|
unsigned long flags, mmuar, mmutr;
|
|
struct mm_struct *mm;
|
|
pgd_t *pgd;
|
|
pmd_t *pmd;
|
|
pte_t *pte;
|
|
int asid;
|
|
|
|
local_irq_save(flags);
|
|
|
|
mmuar = (dtlb) ? mmu_read(MMUAR) :
|
|
regs->pc + (extension_word * sizeof(long));
|
|
|
|
mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
|
|
if (!mm) {
|
|
local_irq_restore(flags);
|
|
return -1;
|
|
}
|
|
|
|
pgd = pgd_offset(mm, mmuar);
|
|
if (pgd_none(*pgd)) {
|
|
local_irq_restore(flags);
|
|
return -1;
|
|
}
|
|
|
|
pmd = pmd_offset(pgd, mmuar);
|
|
if (pmd_none(*pmd)) {
|
|
local_irq_restore(flags);
|
|
return -1;
|
|
}
|
|
|
|
pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
|
|
: pte_offset_map(pmd, mmuar);
|
|
if (pte_none(*pte) || !pte_present(*pte)) {
|
|
local_irq_restore(flags);
|
|
return -1;
|
|
}
|
|
|
|
if (write) {
|
|
if (!pte_write(*pte)) {
|
|
local_irq_restore(flags);
|
|
return -1;
|
|
}
|
|
set_pte(pte, pte_mkdirty(*pte));
|
|
}
|
|
|
|
set_pte(pte, pte_mkyoung(*pte));
|
|
asid = mm->context & 0xff;
|
|
if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
|
|
set_pte(pte, pte_wrprotect(*pte));
|
|
|
|
mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
|
|
if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
|
|
mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
|
|
mmu_write(MMUTR, mmutr);
|
|
|
|
mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
|
|
((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
|
|
|
|
if (dtlb)
|
|
mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
|
|
else
|
|
mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
|
|
|
|
local_irq_restore(flags);
|
|
return 0;
|
|
}
|
|
|
|
void __init cf_bootmem_alloc(void)
|
|
{
|
|
unsigned long memstart;
|
|
|
|
/* _rambase and _ramend will be naturally page aligned */
|
|
m68k_memory[0].addr = _rambase;
|
|
m68k_memory[0].size = _ramend - _rambase;
|
|
|
|
memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
|
|
|
|
/* compute total pages in system */
|
|
num_pages = PFN_DOWN(_ramend - _rambase);
|
|
|
|
/* page numbers */
|
|
memstart = PAGE_ALIGN(_ramstart);
|
|
min_low_pfn = PFN_DOWN(_rambase);
|
|
max_pfn = max_low_pfn = PFN_DOWN(_ramend);
|
|
high_memory = (void *)_ramend;
|
|
|
|
/* Reserve kernel text/data/bss */
|
|
memblock_reserve(_rambase, memstart - _rambase);
|
|
|
|
m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
|
|
module_fixup(NULL, __start_fixup, __stop_fixup);
|
|
|
|
/* setup node data */
|
|
m68k_setup_node(0);
|
|
}
|
|
|
|
/*
|
|
* Initialize the context management stuff.
|
|
* The following was taken from arch/ppc/mmu_context.c
|
|
*/
|
|
void __init cf_mmu_context_init(void)
|
|
{
|
|
/*
|
|
* Some processors have too few contexts to reserve one for
|
|
* init_mm, and require using context 0 for a normal task.
|
|
* Other processors reserve the use of context zero for the kernel.
|
|
* This code assumes FIRST_CONTEXT < 32.
|
|
*/
|
|
context_map[0] = (1 << FIRST_CONTEXT) - 1;
|
|
next_mmu_context = FIRST_CONTEXT;
|
|
atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
|
|
}
|
|
|
|
/*
|
|
* Steal a context from a task that has one at the moment.
|
|
* This is only used on 8xx and 4xx and we presently assume that
|
|
* they don't do SMP. If they do then thicfpgalloc.hs will have to check
|
|
* whether the MM we steal is in use.
|
|
* We also assume that this is only used on systems that don't
|
|
* use an MMU hash table - this is true for 8xx and 4xx.
|
|
* This isn't an LRU system, it just frees up each context in
|
|
* turn (sort-of pseudo-random replacement :). This would be the
|
|
* place to implement an LRU scheme if anyone was motivated to do it.
|
|
* -- paulus
|
|
*/
|
|
void steal_context(void)
|
|
{
|
|
struct mm_struct *mm;
|
|
/*
|
|
* free up context `next_mmu_context'
|
|
* if we shouldn't free context 0, don't...
|
|
*/
|
|
if (next_mmu_context < FIRST_CONTEXT)
|
|
next_mmu_context = FIRST_CONTEXT;
|
|
mm = context_mm[next_mmu_context];
|
|
flush_tlb_mm(mm);
|
|
destroy_context(mm);
|
|
}
|
|
|