mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 02:50:53 +07:00
e05c7b1f2b
The powerpc 32-bit implementation of pgtable has nice shortcuts for accessing kernel PMD and PTE for a given virtual address. Make these helpers available for all architectures. [rppt@linux.ibm.com: microblaze: fix page table traversal in setup_rt_frame()] Link: http://lkml.kernel.org/r/20200518191511.GD1118872@kernel.org [akpm@linux-foundation.org: s/pmd_ptr_k/pmd_off_k/ in various powerpc places] Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Cain <bcain@codeaurora.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ley Foon Tan <ley.foon.tan@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Nick Hu <nickhu@andestech.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vincent Chen <deanbo422@gmail.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: http://lkml.kernel.org/r/20200514170327.31389-9-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
415 lines
10 KiB
C
415 lines
10 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* r2300.c: R2000 and R3000 specific mmu/cache code.
|
|
*
|
|
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
|
|
*
|
|
* with a lot of changes to make this thing work for R3000s
|
|
* Tx39XX R4k style caches added. HK
|
|
* Copyright (C) 1998, 1999, 2000 Harald Koerfgen
|
|
* Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
|
|
*/
|
|
#include <linux/init.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/mm.h>
|
|
|
|
#include <asm/cacheops.h>
|
|
#include <asm/page.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/isadep.h>
|
|
#include <asm/io.h>
|
|
#include <asm/bootinfo.h>
|
|
#include <asm/cpu.h>
|
|
|
|
/* For R3000 cores with R4000 style caches */
|
|
static unsigned long icache_size, dcache_size; /* Size in bytes */
|
|
|
|
#include <asm/r4kcache.h>
|
|
|
|
/* This sequence is required to ensure icache is disabled immediately */
|
|
#define TX39_STOP_STREAMING() \
|
|
__asm__ __volatile__( \
|
|
".set push\n\t" \
|
|
".set noreorder\n\t" \
|
|
"b 1f\n\t" \
|
|
"nop\n\t" \
|
|
"1:\n\t" \
|
|
".set pop" \
|
|
)
|
|
|
|
/* TX39H-style cache flush routines. */
|
|
static void tx39h_flush_icache_all(void)
|
|
{
|
|
unsigned long flags, config;
|
|
|
|
/* disable icache (set ICE#) */
|
|
local_irq_save(flags);
|
|
config = read_c0_conf();
|
|
write_c0_conf(config & ~TX39_CONF_ICE);
|
|
TX39_STOP_STREAMING();
|
|
blast_icache16();
|
|
write_c0_conf(config);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size)
|
|
{
|
|
/* Catch bad driver code */
|
|
BUG_ON(size == 0);
|
|
|
|
iob();
|
|
blast_inv_dcache_range(addr, addr + size);
|
|
}
|
|
|
|
|
|
/* TX39H2,TX39H3 */
|
|
static inline void tx39_blast_dcache_page(unsigned long addr)
|
|
{
|
|
if (current_cpu_type() != CPU_TX3912)
|
|
blast_dcache16_page(addr);
|
|
}
|
|
|
|
static inline void tx39_blast_dcache_page_indexed(unsigned long addr)
|
|
{
|
|
blast_dcache16_page_indexed(addr);
|
|
}
|
|
|
|
static inline void tx39_blast_dcache(void)
|
|
{
|
|
blast_dcache16();
|
|
}
|
|
|
|
static inline void tx39_blast_icache_page(unsigned long addr)
|
|
{
|
|
unsigned long flags, config;
|
|
/* disable icache (set ICE#) */
|
|
local_irq_save(flags);
|
|
config = read_c0_conf();
|
|
write_c0_conf(config & ~TX39_CONF_ICE);
|
|
TX39_STOP_STREAMING();
|
|
blast_icache16_page(addr);
|
|
write_c0_conf(config);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
static inline void tx39_blast_icache_page_indexed(unsigned long addr)
|
|
{
|
|
unsigned long flags, config;
|
|
/* disable icache (set ICE#) */
|
|
local_irq_save(flags);
|
|
config = read_c0_conf();
|
|
write_c0_conf(config & ~TX39_CONF_ICE);
|
|
TX39_STOP_STREAMING();
|
|
blast_icache16_page_indexed(addr);
|
|
write_c0_conf(config);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
static inline void tx39_blast_icache(void)
|
|
{
|
|
unsigned long flags, config;
|
|
/* disable icache (set ICE#) */
|
|
local_irq_save(flags);
|
|
config = read_c0_conf();
|
|
write_c0_conf(config & ~TX39_CONF_ICE);
|
|
TX39_STOP_STREAMING();
|
|
blast_icache16();
|
|
write_c0_conf(config);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
static void tx39__flush_cache_vmap(void)
|
|
{
|
|
tx39_blast_dcache();
|
|
}
|
|
|
|
static void tx39__flush_cache_vunmap(void)
|
|
{
|
|
tx39_blast_dcache();
|
|
}
|
|
|
|
static inline void tx39_flush_cache_all(void)
|
|
{
|
|
if (!cpu_has_dc_aliases)
|
|
return;
|
|
|
|
tx39_blast_dcache();
|
|
}
|
|
|
|
static inline void tx39___flush_cache_all(void)
|
|
{
|
|
tx39_blast_dcache();
|
|
tx39_blast_icache();
|
|
}
|
|
|
|
static void tx39_flush_cache_mm(struct mm_struct *mm)
|
|
{
|
|
if (!cpu_has_dc_aliases)
|
|
return;
|
|
|
|
if (cpu_context(smp_processor_id(), mm) != 0)
|
|
tx39_blast_dcache();
|
|
}
|
|
|
|
static void tx39_flush_cache_range(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
if (!cpu_has_dc_aliases)
|
|
return;
|
|
if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
|
|
return;
|
|
|
|
tx39_blast_dcache();
|
|
}
|
|
|
|
static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
|
|
{
|
|
int exec = vma->vm_flags & VM_EXEC;
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
pmd_t *pmdp;
|
|
pte_t *ptep;
|
|
|
|
/*
|
|
* If ownes no valid ASID yet, cannot possibly have gotten
|
|
* this page into the cache.
|
|
*/
|
|
if (cpu_context(smp_processor_id(), mm) == 0)
|
|
return;
|
|
|
|
page &= PAGE_MASK;
|
|
pmdp = pmd_off(mm, page);
|
|
ptep = pte_offset_kernel(pmdp, page);
|
|
|
|
/*
|
|
* If the page isn't marked valid, the page cannot possibly be
|
|
* in the cache.
|
|
*/
|
|
if (!(pte_val(*ptep) & _PAGE_PRESENT))
|
|
return;
|
|
|
|
/*
|
|
* Doing flushes for another ASID than the current one is
|
|
* too difficult since stupid R4k caches do a TLB translation
|
|
* for every cache flush operation. So we do indexed flushes
|
|
* in that case, which doesn't overly flush the cache too much.
|
|
*/
|
|
if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
|
|
if (cpu_has_dc_aliases || exec)
|
|
tx39_blast_dcache_page(page);
|
|
if (exec)
|
|
tx39_blast_icache_page(page);
|
|
|
|
return;
|
|
}
|
|
|
|
/*
|
|
* Do indexed flush, too much work to get the (possible) TLB refills
|
|
* to work correctly.
|
|
*/
|
|
if (cpu_has_dc_aliases || exec)
|
|
tx39_blast_dcache_page_indexed(page);
|
|
if (exec)
|
|
tx39_blast_icache_page_indexed(page);
|
|
}
|
|
|
|
static void local_tx39_flush_data_cache_page(void * addr)
|
|
{
|
|
tx39_blast_dcache_page((unsigned long)addr);
|
|
}
|
|
|
|
static void tx39_flush_data_cache_page(unsigned long addr)
|
|
{
|
|
tx39_blast_dcache_page(addr);
|
|
}
|
|
|
|
static void tx39_flush_icache_range(unsigned long start, unsigned long end)
|
|
{
|
|
if (end - start > dcache_size)
|
|
tx39_blast_dcache();
|
|
else
|
|
protected_blast_dcache_range(start, end);
|
|
|
|
if (end - start > icache_size)
|
|
tx39_blast_icache();
|
|
else {
|
|
unsigned long flags, config;
|
|
/* disable icache (set ICE#) */
|
|
local_irq_save(flags);
|
|
config = read_c0_conf();
|
|
write_c0_conf(config & ~TX39_CONF_ICE);
|
|
TX39_STOP_STREAMING();
|
|
protected_blast_icache_range(start, end);
|
|
write_c0_conf(config);
|
|
local_irq_restore(flags);
|
|
}
|
|
}
|
|
|
|
static void tx39_flush_kernel_vmap_range(unsigned long vaddr, int size)
|
|
{
|
|
BUG();
|
|
}
|
|
|
|
static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
|
|
{
|
|
unsigned long end;
|
|
|
|
if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
|
|
end = addr + size;
|
|
do {
|
|
tx39_blast_dcache_page(addr);
|
|
addr += PAGE_SIZE;
|
|
} while(addr != end);
|
|
} else if (size > dcache_size) {
|
|
tx39_blast_dcache();
|
|
} else {
|
|
blast_dcache_range(addr, addr + size);
|
|
}
|
|
}
|
|
|
|
static void tx39_dma_cache_inv(unsigned long addr, unsigned long size)
|
|
{
|
|
unsigned long end;
|
|
|
|
if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
|
|
end = addr + size;
|
|
do {
|
|
tx39_blast_dcache_page(addr);
|
|
addr += PAGE_SIZE;
|
|
} while(addr != end);
|
|
} else if (size > dcache_size) {
|
|
tx39_blast_dcache();
|
|
} else {
|
|
blast_inv_dcache_range(addr, addr + size);
|
|
}
|
|
}
|
|
|
|
static __init void tx39_probe_cache(void)
|
|
{
|
|
unsigned long config;
|
|
|
|
config = read_c0_conf();
|
|
|
|
icache_size = 1 << (10 + ((config & TX39_CONF_ICS_MASK) >>
|
|
TX39_CONF_ICS_SHIFT));
|
|
dcache_size = 1 << (10 + ((config & TX39_CONF_DCS_MASK) >>
|
|
TX39_CONF_DCS_SHIFT));
|
|
|
|
current_cpu_data.icache.linesz = 16;
|
|
switch (current_cpu_type()) {
|
|
case CPU_TX3912:
|
|
current_cpu_data.icache.ways = 1;
|
|
current_cpu_data.dcache.ways = 1;
|
|
current_cpu_data.dcache.linesz = 4;
|
|
break;
|
|
|
|
case CPU_TX3927:
|
|
current_cpu_data.icache.ways = 2;
|
|
current_cpu_data.dcache.ways = 2;
|
|
current_cpu_data.dcache.linesz = 16;
|
|
break;
|
|
|
|
case CPU_TX3922:
|
|
default:
|
|
current_cpu_data.icache.ways = 1;
|
|
current_cpu_data.dcache.ways = 1;
|
|
current_cpu_data.dcache.linesz = 16;
|
|
break;
|
|
}
|
|
}
|
|
|
|
void tx39_cache_init(void)
|
|
{
|
|
extern void build_clear_page(void);
|
|
extern void build_copy_page(void);
|
|
unsigned long config;
|
|
|
|
config = read_c0_conf();
|
|
config &= ~TX39_CONF_WBON;
|
|
write_c0_conf(config);
|
|
|
|
tx39_probe_cache();
|
|
|
|
switch (current_cpu_type()) {
|
|
case CPU_TX3912:
|
|
/* TX39/H core (writethru direct-map cache) */
|
|
__flush_cache_vmap = tx39__flush_cache_vmap;
|
|
__flush_cache_vunmap = tx39__flush_cache_vunmap;
|
|
flush_cache_all = tx39h_flush_icache_all;
|
|
__flush_cache_all = tx39h_flush_icache_all;
|
|
flush_cache_mm = (void *) tx39h_flush_icache_all;
|
|
flush_cache_range = (void *) tx39h_flush_icache_all;
|
|
flush_cache_page = (void *) tx39h_flush_icache_all;
|
|
flush_icache_range = (void *) tx39h_flush_icache_all;
|
|
local_flush_icache_range = (void *) tx39h_flush_icache_all;
|
|
|
|
local_flush_data_cache_page = (void *) tx39h_flush_icache_all;
|
|
flush_data_cache_page = (void *) tx39h_flush_icache_all;
|
|
|
|
_dma_cache_wback_inv = tx39h_dma_cache_wback_inv;
|
|
|
|
shm_align_mask = PAGE_SIZE - 1;
|
|
|
|
break;
|
|
|
|
case CPU_TX3922:
|
|
case CPU_TX3927:
|
|
default:
|
|
/* TX39/H2,H3 core (writeback 2way-set-associative cache) */
|
|
/* board-dependent init code may set WBON */
|
|
|
|
__flush_cache_vmap = tx39__flush_cache_vmap;
|
|
__flush_cache_vunmap = tx39__flush_cache_vunmap;
|
|
|
|
flush_cache_all = tx39_flush_cache_all;
|
|
__flush_cache_all = tx39___flush_cache_all;
|
|
flush_cache_mm = tx39_flush_cache_mm;
|
|
flush_cache_range = tx39_flush_cache_range;
|
|
flush_cache_page = tx39_flush_cache_page;
|
|
flush_icache_range = tx39_flush_icache_range;
|
|
local_flush_icache_range = tx39_flush_icache_range;
|
|
|
|
__flush_kernel_vmap_range = tx39_flush_kernel_vmap_range;
|
|
|
|
local_flush_data_cache_page = local_tx39_flush_data_cache_page;
|
|
flush_data_cache_page = tx39_flush_data_cache_page;
|
|
|
|
_dma_cache_wback_inv = tx39_dma_cache_wback_inv;
|
|
_dma_cache_wback = tx39_dma_cache_wback_inv;
|
|
_dma_cache_inv = tx39_dma_cache_inv;
|
|
|
|
shm_align_mask = max_t(unsigned long,
|
|
(dcache_size / current_cpu_data.dcache.ways) - 1,
|
|
PAGE_SIZE - 1);
|
|
|
|
break;
|
|
}
|
|
|
|
__flush_icache_user_range = flush_icache_range;
|
|
__local_flush_icache_user_range = local_flush_icache_range;
|
|
|
|
current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways;
|
|
current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways;
|
|
|
|
current_cpu_data.icache.sets =
|
|
current_cpu_data.icache.waysize / current_cpu_data.icache.linesz;
|
|
current_cpu_data.dcache.sets =
|
|
current_cpu_data.dcache.waysize / current_cpu_data.dcache.linesz;
|
|
|
|
if (current_cpu_data.dcache.waysize > PAGE_SIZE)
|
|
current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES;
|
|
|
|
current_cpu_data.icache.waybit = 0;
|
|
current_cpu_data.dcache.waybit = 0;
|
|
|
|
pr_info("Primary instruction cache %ldkB, linesize %d bytes\n",
|
|
icache_size >> 10, current_cpu_data.icache.linesz);
|
|
pr_info("Primary data cache %ldkB, linesize %d bytes\n",
|
|
dcache_size >> 10, current_cpu_data.dcache.linesz);
|
|
|
|
build_clear_page();
|
|
build_copy_page();
|
|
tx39h_flush_icache_all();
|
|
}
|