mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
a135c717d5
Pull MIPS updates from Ralf Baechle: "This is the main pull request for MIPS: - a number of fixes that didn't make the 3.19 release. - a number of cleanups. - preliminary support for Cavium's Octeon 3 SOCs which feature up to 48 MIPS64 R3 cores with FPU and hardware virtualization. - support for MIPS R6 processors. Revision 6 of the MIPS architecture is a major revision of the MIPS architecture which does away with many of original sins of the architecture such as branch delay slots. This and other changes in R6 require major changes throughout the entire MIPS core architecture code and make up for the lion share of this pull request. - finally some preparatory work for eXtendend Physical Address support, which allows support of up to 40 bit of physical address space on 32 bit processors" [ Ahh, MIPS can't leave the PAE brain damage alone. It's like every CPU architect has to make that mistake, but pee in the snow by changing the TLA. But whether it's called PAE, LPAE or XPA, it's horrid crud - Linus ] * 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (114 commits) MIPS: sead3: Corrected get_c0_perfcount_int MIPS: mm: Remove dead macro definitions MIPS: OCTEON: irq: add CIB and other fixes MIPS: OCTEON: Don't do acknowledge operations for level triggered irqs. MIPS: OCTEON: More OCTEONIII support MIPS: OCTEON: Remove setting of processor specific CVMCTL icache bits. MIPS: OCTEON: Core-15169 Workaround and general CVMSEG cleanup. MIPS: OCTEON: Update octeon-model.h code for new SoCs. MIPS: OCTEON: Implement DCache errata workaround for all CN6XXX MIPS: OCTEON: Add little-endian support to asm/octeon/octeon.h MIPS: OCTEON: Implement the core-16057 workaround MIPS: OCTEON: Delete unused COP2 saving code MIPS: OCTEON: Use correct instruction to read 64-bit COP0 register MIPS: OCTEON: Save and restore CP2 SHA3 state MIPS: OCTEON: Fix FP context save. MIPS: OCTEON: Save/Restore wider multiply registers in OCTEON III CPUs MIPS: boot: Provide more uImage options MIPS: Remove unneeded #ifdef __KERNEL__ from asm/processor.h MIPS: ip22-gio: Remove legacy suspend/resume support mips: pci: Add ifdef around pci_proc_domain ...
255 lines
8.5 KiB
C
255 lines
8.5 KiB
C
/*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
* Copyright (C) 1994 - 2002 by Ralf Baechle
|
|
* Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
|
|
* Copyright (C) 2002 Maciej W. Rozycki
|
|
*/
|
|
#ifndef _ASM_PGTABLE_BITS_H
|
|
#define _ASM_PGTABLE_BITS_H
|
|
|
|
|
|
/*
|
|
* Note that we shift the lower 32bits of each EntryLo[01] entry
|
|
* 6 bits to the left. That way we can convert the PFN into the
|
|
* physical address by a single 'and' operation and gain 6 additional
|
|
* bits for storing information which isn't present in a normal
|
|
* MIPS page table.
|
|
*
|
|
* Similar to the Alpha port, we need to keep track of the ref
|
|
* and mod bits in software. We have a software "yeah you can read
|
|
* from this page" bit, and a hardware one which actually lets the
|
|
* process read from the page. On the same token we have a software
|
|
* writable bit and the real hardware one which actually lets the
|
|
* process write to the page, this keeps a mod bit via the hardware
|
|
* dirty bit.
|
|
*
|
|
* Certain revisions of the R4000 and R5000 have a bug where if a
|
|
* certain sequence occurs in the last 3 instructions of an executable
|
|
* page, and the following page is not mapped, the cpu can do
|
|
* unpredictable things. The code (when it is written) to deal with
|
|
* this problem will be in the update_mmu_cache() code for the r4k.
|
|
*/
|
|
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
|
|
|
|
/*
|
|
* The following bits are implemented by the TLB hardware
|
|
*/
|
|
#define _PAGE_GLOBAL_SHIFT 0
|
|
#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
|
|
#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
|
|
#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
|
|
#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
|
|
#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
|
|
#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1)
|
|
#define _CACHE_MASK (7 << _CACHE_SHIFT)
|
|
|
|
/*
|
|
* The following bits are implemented in software
|
|
*/
|
|
#define _PAGE_PRESENT_SHIFT (_CACHE_SHIFT + 3)
|
|
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
|
|
#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
|
|
#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
|
|
#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
|
|
#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
|
|
#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
|
|
#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
|
|
#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
|
|
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
|
|
|
|
#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
|
|
|
|
#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
|
|
|
|
/*
|
|
* The following bits are implemented in software
|
|
*/
|
|
#define _PAGE_PRESENT_SHIFT (0)
|
|
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
|
|
#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
|
|
#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
|
|
#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
|
|
#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
|
|
#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
|
|
#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
|
|
#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
|
|
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
|
|
|
|
/*
|
|
* The following bits are implemented by the TLB hardware
|
|
*/
|
|
#define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 4)
|
|
#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
|
|
#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
|
|
#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
|
|
#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
|
|
#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
|
|
#define _CACHE_UNCACHED_SHIFT (_PAGE_DIRTY_SHIFT + 1)
|
|
#define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT)
|
|
#define _CACHE_MASK _CACHE_UNCACHED
|
|
|
|
#define _PFN_SHIFT PAGE_SHIFT
|
|
|
|
#else
|
|
/*
|
|
* When using the RI/XI bit support, we have 13 bits of flags below
|
|
* the physical address. The RI/XI bits are placed such that a SRL 5
|
|
* can strip off the software bits, then a ROTR 2 can move the RI/XI
|
|
* into bits [63:62]. This also limits physical address to 56 bits,
|
|
* which is more than we need right now.
|
|
*/
|
|
|
|
/*
|
|
* The following bits are implemented in software
|
|
*/
|
|
#define _PAGE_PRESENT_SHIFT 0
|
|
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
|
|
#define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1)
|
|
#define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; })
|
|
#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
|
|
#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
|
|
#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
|
|
#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
|
|
#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
|
|
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
|
|
|
|
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
|
|
/* huge tlb page */
|
|
#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
|
|
#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
|
|
#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1)
|
|
#define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT)
|
|
#else
|
|
#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT)
|
|
#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */
|
|
#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT)
|
|
#define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */
|
|
#endif
|
|
|
|
/* Page cannot be executed */
|
|
#define _PAGE_NO_EXEC_SHIFT (cpu_has_rixi ? _PAGE_SPLITTING_SHIFT + 1 : _PAGE_SPLITTING_SHIFT)
|
|
#define _PAGE_NO_EXEC ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_EXEC_SHIFT; })
|
|
|
|
/* Page cannot be read */
|
|
#define _PAGE_NO_READ_SHIFT (cpu_has_rixi ? _PAGE_NO_EXEC_SHIFT + 1 : _PAGE_NO_EXEC_SHIFT)
|
|
#define _PAGE_NO_READ ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_READ_SHIFT; })
|
|
|
|
#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1)
|
|
#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
|
|
#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
|
|
#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
|
|
#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
|
|
#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
|
|
#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1)
|
|
#define _CACHE_MASK (7 << _CACHE_SHIFT)
|
|
|
|
#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
|
|
|
|
#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
|
|
|
|
#define _PAGE_SILENT_READ _PAGE_VALID
|
|
#define _PAGE_SILENT_WRITE _PAGE_DIRTY
|
|
|
|
#define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1))
|
|
|
|
#ifndef _PAGE_NO_READ
|
|
#define _PAGE_NO_READ ({BUG(); 0; })
|
|
#define _PAGE_NO_READ_SHIFT ({BUG(); 0; })
|
|
#endif
|
|
#ifndef _PAGE_NO_EXEC
|
|
#define _PAGE_NO_EXEC ({BUG(); 0; })
|
|
#endif
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
/*
|
|
* pte_to_entrylo converts a page table entry (PTE) into a Mips
|
|
* entrylo0/1 value.
|
|
*/
|
|
static inline uint64_t pte_to_entrylo(unsigned long pte_val)
|
|
{
|
|
if (cpu_has_rixi) {
|
|
int sa;
|
|
#ifdef CONFIG_32BIT
|
|
sa = 31 - _PAGE_NO_READ_SHIFT;
|
|
#else
|
|
sa = 63 - _PAGE_NO_READ_SHIFT;
|
|
#endif
|
|
/*
|
|
* C has no way to express that this is a DSRL
|
|
* _PAGE_NO_EXEC_SHIFT followed by a ROTR 2. Luckily
|
|
* in the fast path this is done in assembly
|
|
*/
|
|
return (pte_val >> _PAGE_GLOBAL_SHIFT) |
|
|
((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa);
|
|
}
|
|
|
|
return pte_val >> _PAGE_GLOBAL_SHIFT;
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Cache attributes
|
|
*/
|
|
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
|
|
|
|
#define _CACHE_CACHABLE_NONCOHERENT 0
|
|
#define _CACHE_UNCACHED_ACCELERATED _CACHE_UNCACHED
|
|
|
|
#elif defined(CONFIG_CPU_SB1)
|
|
|
|
/* No penalty for being coherent on the SB1, so just
|
|
use it for "noncoherent" spaces, too. Shouldn't hurt. */
|
|
|
|
#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)
|
|
|
|
#elif defined(CONFIG_CPU_LOONGSON3)
|
|
|
|
/* Using COHERENT flag for NONCOHERENT doesn't hurt. */
|
|
|
|
#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* LOONGSON */
|
|
#define _CACHE_CACHABLE_COHERENT (3<<_CACHE_SHIFT) /* LOONGSON-3 */
|
|
|
|
#elif defined(CONFIG_MACH_JZ4740)
|
|
|
|
/* Ingenic uses the WA bit to achieve write-combine memory writes */
|
|
#define _CACHE_UNCACHED_ACCELERATED (1<<_CACHE_SHIFT)
|
|
|
|
#endif
|
|
|
|
#ifndef _CACHE_CACHABLE_NO_WA
|
|
#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT)
|
|
#endif
|
|
#ifndef _CACHE_CACHABLE_WA
|
|
#define _CACHE_CACHABLE_WA (1<<_CACHE_SHIFT)
|
|
#endif
|
|
#ifndef _CACHE_UNCACHED
|
|
#define _CACHE_UNCACHED (2<<_CACHE_SHIFT)
|
|
#endif
|
|
#ifndef _CACHE_CACHABLE_NONCOHERENT
|
|
#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT)
|
|
#endif
|
|
#ifndef _CACHE_CACHABLE_CE
|
|
#define _CACHE_CACHABLE_CE (4<<_CACHE_SHIFT)
|
|
#endif
|
|
#ifndef _CACHE_CACHABLE_COW
|
|
#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT)
|
|
#endif
|
|
#ifndef _CACHE_CACHABLE_CUW
|
|
#define _CACHE_CACHABLE_CUW (6<<_CACHE_SHIFT)
|
|
#endif
|
|
#ifndef _CACHE_UNCACHED_ACCELERATED
|
|
#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)
|
|
#endif
|
|
|
|
#define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ))
|
|
#define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED)
|
|
|
|
#define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \
|
|
_PFN_MASK | _CACHE_MASK)
|
|
|
|
#endif /* _ASM_PGTABLE_BITS_H */
|