mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
f033d659c3
This update the proto-VSID and VSID scramble related information to be more generic by using names instead of current values. Reviewed-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
552 lines
18 KiB
C
552 lines
18 KiB
C
#ifndef _ASM_POWERPC_MMU_HASH64_H_
|
|
#define _ASM_POWERPC_MMU_HASH64_H_
|
|
/*
|
|
* PowerPC64 memory management structures
|
|
*
|
|
* Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
|
|
* PPC64 rework.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <asm/asm-compat.h>
|
|
#include <asm/page.h>
|
|
|
|
/*
|
|
* Segment table
|
|
*/
|
|
|
|
#define STE_ESID_V 0x80
|
|
#define STE_ESID_KS 0x20
|
|
#define STE_ESID_KP 0x10
|
|
#define STE_ESID_N 0x08
|
|
|
|
#define STE_VSID_SHIFT 12
|
|
|
|
/* Location of cpu0's segment table */
|
|
#define STAB0_PAGE 0x8
|
|
#define STAB0_OFFSET (STAB0_PAGE << 12)
|
|
#define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
|
|
|
|
#ifndef __ASSEMBLY__
|
|
extern char initial_stab[];
|
|
#endif /* ! __ASSEMBLY */
|
|
|
|
/*
|
|
* SLB
|
|
*/
|
|
|
|
#define SLB_NUM_BOLTED 3
|
|
#define SLB_CACHE_ENTRIES 8
|
|
#define SLB_MIN_SIZE 32
|
|
|
|
/* Bits in the SLB ESID word */
|
|
#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
|
|
|
|
/* Bits in the SLB VSID word */
|
|
#define SLB_VSID_SHIFT 12
|
|
#define SLB_VSID_SHIFT_1T 24
|
|
#define SLB_VSID_SSIZE_SHIFT 62
|
|
#define SLB_VSID_B ASM_CONST(0xc000000000000000)
|
|
#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
|
|
#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
|
|
#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
|
|
#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
|
|
#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
|
|
#define SLB_VSID_L ASM_CONST(0x0000000000000100)
|
|
#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
|
|
#define SLB_VSID_LP ASM_CONST(0x0000000000000030)
|
|
#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
|
|
#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
|
|
#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
|
|
#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
|
|
#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
|
|
|
|
#define SLB_VSID_KERNEL (SLB_VSID_KP)
|
|
#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
|
|
|
|
#define SLBIE_C (0x08000000)
|
|
#define SLBIE_SSIZE_SHIFT 25
|
|
|
|
/*
|
|
* Hash table
|
|
*/
|
|
|
|
#define HPTES_PER_GROUP 8
|
|
|
|
#define HPTE_V_SSIZE_SHIFT 62
|
|
#define HPTE_V_AVPN_SHIFT 7
|
|
#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
|
|
#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
|
|
#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
|
|
#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
|
|
#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
|
|
#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
|
|
#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
|
|
#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
|
|
|
|
#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
|
|
#define HPTE_R_TS ASM_CONST(0x4000000000000000)
|
|
#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
|
|
#define HPTE_R_RPN_SHIFT 12
|
|
#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
|
|
#define HPTE_R_PP ASM_CONST(0x0000000000000003)
|
|
#define HPTE_R_N ASM_CONST(0x0000000000000004)
|
|
#define HPTE_R_G ASM_CONST(0x0000000000000008)
|
|
#define HPTE_R_M ASM_CONST(0x0000000000000010)
|
|
#define HPTE_R_I ASM_CONST(0x0000000000000020)
|
|
#define HPTE_R_W ASM_CONST(0x0000000000000040)
|
|
#define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
|
|
#define HPTE_R_C ASM_CONST(0x0000000000000080)
|
|
#define HPTE_R_R ASM_CONST(0x0000000000000100)
|
|
#define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
|
|
|
|
#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
|
|
#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
|
|
|
|
/* Values for PP (assumes Ks=0, Kp=1) */
|
|
#define PP_RWXX 0 /* Supervisor read/write, User none */
|
|
#define PP_RWRX 1 /* Supervisor read/write, User read */
|
|
#define PP_RWRW 2 /* Supervisor read/write, User read/write */
|
|
#define PP_RXRX 3 /* Supervisor read, User read */
|
|
#define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
struct hash_pte {
|
|
unsigned long v;
|
|
unsigned long r;
|
|
};
|
|
|
|
extern struct hash_pte *htab_address;
|
|
extern unsigned long htab_size_bytes;
|
|
extern unsigned long htab_hash_mask;
|
|
|
|
/*
|
|
* Page size definition
|
|
*
|
|
* shift : is the "PAGE_SHIFT" value for that page size
|
|
* sllp : is a bit mask with the value of SLB L || LP to be or'ed
|
|
* directly to a slbmte "vsid" value
|
|
* penc : is the HPTE encoding mask for the "LP" field:
|
|
*
|
|
*/
|
|
struct mmu_psize_def
|
|
{
|
|
unsigned int shift; /* number of bits */
|
|
unsigned int penc; /* HPTE encoding */
|
|
unsigned int tlbiel; /* tlbiel supported for that page size */
|
|
unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
|
|
unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
|
|
};
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
/*
|
|
* Segment sizes.
|
|
* These are the values used by hardware in the B field of
|
|
* SLB entries and the first dword of MMU hashtable entries.
|
|
* The B field is 2 bits; the values 2 and 3 are unused and reserved.
|
|
*/
|
|
#define MMU_SEGSIZE_256M 0
|
|
#define MMU_SEGSIZE_1T 1
|
|
|
|
/*
|
|
* encode page number shift.
|
|
* in order to fit the 78 bit va in a 64 bit variable we shift the va by
|
|
* 12 bits. This enable us to address upto 76 bit va.
|
|
* For hpt hash from a va we can ignore the page size bits of va and for
|
|
* hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
|
|
* we work in all cases including 4k page size.
|
|
*/
|
|
#define VPN_SHIFT 12
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
static inline int segment_shift(int ssize)
|
|
{
|
|
if (ssize == MMU_SEGSIZE_256M)
|
|
return SID_SHIFT;
|
|
return SID_SHIFT_1T;
|
|
}
|
|
|
|
/*
|
|
* The current system page and segment sizes
|
|
*/
|
|
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
|
|
extern int mmu_linear_psize;
|
|
extern int mmu_virtual_psize;
|
|
extern int mmu_vmalloc_psize;
|
|
extern int mmu_vmemmap_psize;
|
|
extern int mmu_io_psize;
|
|
extern int mmu_kernel_ssize;
|
|
extern int mmu_highuser_ssize;
|
|
extern u16 mmu_slb_size;
|
|
extern unsigned long tce_alloc_start, tce_alloc_end;
|
|
|
|
/*
|
|
* If the processor supports 64k normal pages but not 64k cache
|
|
* inhibited pages, we have to be prepared to switch processes
|
|
* to use 4k pages when they create cache-inhibited mappings.
|
|
* If this is the case, mmu_ci_restrictions will be set to 1.
|
|
*/
|
|
extern int mmu_ci_restrictions;
|
|
|
|
/*
|
|
* This computes the AVPN and B fields of the first dword of a HPTE,
|
|
* for use when we want to match an existing PTE. The bottom 7 bits
|
|
* of the returned value are zero.
|
|
*/
|
|
static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
|
|
int ssize)
|
|
{
|
|
unsigned long v;
|
|
/*
|
|
* The AVA field omits the low-order 23 bits of the 78 bits VA.
|
|
* These bits are not needed in the PTE, because the
|
|
* low-order b of these bits are part of the byte offset
|
|
* into the virtual page and, if b < 23, the high-order
|
|
* 23-b of these bits are always used in selecting the
|
|
* PTEGs to be searched
|
|
*/
|
|
v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
|
|
v <<= HPTE_V_AVPN_SHIFT;
|
|
v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
|
|
return v;
|
|
}
|
|
|
|
/*
|
|
* This function sets the AVPN and L fields of the HPTE appropriately
|
|
* for the page size
|
|
*/
|
|
static inline unsigned long hpte_encode_v(unsigned long vpn,
|
|
int psize, int ssize)
|
|
{
|
|
unsigned long v;
|
|
v = hpte_encode_avpn(vpn, psize, ssize);
|
|
if (psize != MMU_PAGE_4K)
|
|
v |= HPTE_V_LARGE;
|
|
return v;
|
|
}
|
|
|
|
/*
|
|
* This function sets the ARPN, and LP fields of the HPTE appropriately
|
|
* for the page size. We assume the pa is already "clean" that is properly
|
|
* aligned for the requested page size
|
|
*/
|
|
static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
|
|
{
|
|
unsigned long r;
|
|
|
|
/* A 4K page needs no special encoding */
|
|
if (psize == MMU_PAGE_4K)
|
|
return pa & HPTE_R_RPN;
|
|
else {
|
|
unsigned int penc = mmu_psize_defs[psize].penc;
|
|
unsigned int shift = mmu_psize_defs[psize].shift;
|
|
return (pa & ~((1ul << shift) - 1)) | (penc << 12);
|
|
}
|
|
return r;
|
|
}
|
|
|
|
/*
|
|
* Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
|
|
*/
|
|
static inline unsigned long hpt_vpn(unsigned long ea,
|
|
unsigned long vsid, int ssize)
|
|
{
|
|
unsigned long mask;
|
|
int s_shift = segment_shift(ssize);
|
|
|
|
mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
|
|
return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
|
|
}
|
|
|
|
/*
|
|
* This hashes a virtual address
|
|
*/
|
|
static inline unsigned long hpt_hash(unsigned long vpn,
|
|
unsigned int shift, int ssize)
|
|
{
|
|
int mask;
|
|
unsigned long hash, vsid;
|
|
|
|
/* VPN_SHIFT can be atmost 12 */
|
|
if (ssize == MMU_SEGSIZE_256M) {
|
|
mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
|
|
hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
|
|
((vpn & mask) >> (shift - VPN_SHIFT));
|
|
} else {
|
|
mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
|
|
vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
|
|
hash = vsid ^ (vsid << 25) ^
|
|
((vpn & mask) >> (shift - VPN_SHIFT)) ;
|
|
}
|
|
return hash & 0x7fffffffffUL;
|
|
}
|
|
|
|
extern int __hash_page_4K(unsigned long ea, unsigned long access,
|
|
unsigned long vsid, pte_t *ptep, unsigned long trap,
|
|
unsigned int local, int ssize, int subpage_prot);
|
|
extern int __hash_page_64K(unsigned long ea, unsigned long access,
|
|
unsigned long vsid, pte_t *ptep, unsigned long trap,
|
|
unsigned int local, int ssize);
|
|
struct mm_struct;
|
|
unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
|
|
extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
|
|
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
|
|
pte_t *ptep, unsigned long trap, int local, int ssize,
|
|
unsigned int shift, unsigned int mmu_psize);
|
|
extern void hash_failure_debug(unsigned long ea, unsigned long access,
|
|
unsigned long vsid, unsigned long trap,
|
|
int ssize, int psize, unsigned long pte);
|
|
extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
|
|
unsigned long pstart, unsigned long prot,
|
|
int psize, int ssize);
|
|
extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
|
|
extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
|
|
|
|
extern void hpte_init_native(void);
|
|
extern void hpte_init_lpar(void);
|
|
extern void hpte_init_beat(void);
|
|
extern void hpte_init_beat_v3(void);
|
|
|
|
extern void stabs_alloc(void);
|
|
extern void slb_initialize(void);
|
|
extern void slb_flush_and_rebolt(void);
|
|
extern void stab_initialize(unsigned long stab);
|
|
|
|
extern void slb_vmalloc_update(void);
|
|
extern void slb_set_size(u16 size);
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
/*
|
|
* VSID allocation (256MB segment)
|
|
*
|
|
* We first generate a 38-bit "proto-VSID". For kernel addresses this
|
|
* is equal to the ESID | 1 << 37, for user addresses it is:
|
|
* (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1)
|
|
*
|
|
* This splits the proto-VSID into the below range
|
|
* 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range
|
|
* 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range
|
|
*
|
|
* We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1
|
|
* That is, we assign half of the space to user processes and half
|
|
* to the kernel.
|
|
*
|
|
* The proto-VSIDs are then scrambled into real VSIDs with the
|
|
* multiplicative hash:
|
|
*
|
|
* VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
|
|
*
|
|
* VSID_MULTIPLIER is prime, so in particular it is
|
|
* co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
|
|
* Because the modulus is 2^n-1 we can compute it efficiently without
|
|
* a divide or extra multiply (see below).
|
|
*
|
|
* This scheme has several advantages over older methods:
|
|
*
|
|
* - We have VSIDs allocated for every kernel address
|
|
* (i.e. everything above 0xC000000000000000), except the very top
|
|
* segment, which simplifies several things.
|
|
*
|
|
* - We allow for USER_ESID_BITS significant bits of ESID and
|
|
* CONTEXT_BITS bits of context for user addresses.
|
|
* i.e. 64T (46 bits) of address space for up to half a million contexts.
|
|
*
|
|
* - The scramble function gives robust scattering in the hash
|
|
* table (at least based on some initial results). The previous
|
|
* method was more susceptible to pathological cases giving excessive
|
|
* hash collisions.
|
|
*/
|
|
|
|
/*
|
|
* This should be computed such that protovosid * vsid_mulitplier
|
|
* doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
|
|
*/
|
|
#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
|
|
#define VSID_BITS_256M 38
|
|
#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
|
|
|
|
#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
|
|
#define VSID_BITS_1T 26
|
|
#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
|
|
|
|
#define CONTEXT_BITS 19
|
|
#define USER_ESID_BITS 18
|
|
#define USER_ESID_BITS_1T 6
|
|
|
|
#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
|
|
|
|
/*
|
|
* This macro generates asm code to compute the VSID scramble
|
|
* function. Used in slb_allocate() and do_stab_bolted. The function
|
|
* computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
|
|
*
|
|
* rt = register continaing the proto-VSID and into which the
|
|
* VSID will be stored
|
|
* rx = scratch register (clobbered)
|
|
*
|
|
* - rt and rx must be different registers
|
|
* - The answer will end up in the low VSID_BITS bits of rt. The higher
|
|
* bits may contain other garbage, so you may need to mask the
|
|
* result.
|
|
*/
|
|
#define ASM_VSID_SCRAMBLE(rt, rx, size) \
|
|
lis rx,VSID_MULTIPLIER_##size@h; \
|
|
ori rx,rx,VSID_MULTIPLIER_##size@l; \
|
|
mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
|
|
\
|
|
srdi rx,rt,VSID_BITS_##size; \
|
|
clrldi rt,rt,(64-VSID_BITS_##size); \
|
|
add rt,rt,rx; /* add high and low bits */ \
|
|
/* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
|
|
* 2^36-1+2^28-1. That in particular means that if r3 >= \
|
|
* 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
|
|
* the bit clear, r3 already has the answer we want, if it \
|
|
* doesn't, the answer is the low 36 bits of r3+1. So in all \
|
|
* cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
|
|
addi rx,rt,1; \
|
|
srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
|
|
add rt,rt,rx
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#ifdef CONFIG_PPC_SUBPAGE_PROT
|
|
/*
|
|
* For the sub-page protection option, we extend the PGD with one of
|
|
* these. Basically we have a 3-level tree, with the top level being
|
|
* the protptrs array. To optimize speed and memory consumption when
|
|
* only addresses < 4GB are being protected, pointers to the first
|
|
* four pages of sub-page protection words are stored in the low_prot
|
|
* array.
|
|
* Each page of sub-page protection words protects 1GB (4 bytes
|
|
* protects 64k). For the 3-level tree, each page of pointers then
|
|
* protects 8TB.
|
|
*/
|
|
struct subpage_prot_table {
|
|
unsigned long maxaddr; /* only addresses < this are protected */
|
|
unsigned int **protptrs[2];
|
|
unsigned int *low_prot[4];
|
|
};
|
|
|
|
#define SBP_L1_BITS (PAGE_SHIFT - 2)
|
|
#define SBP_L2_BITS (PAGE_SHIFT - 3)
|
|
#define SBP_L1_COUNT (1 << SBP_L1_BITS)
|
|
#define SBP_L2_COUNT (1 << SBP_L2_BITS)
|
|
#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
|
|
#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
|
|
|
|
extern void subpage_prot_free(struct mm_struct *mm);
|
|
extern void subpage_prot_init_new_context(struct mm_struct *mm);
|
|
#else
|
|
static inline void subpage_prot_free(struct mm_struct *mm) {}
|
|
static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
|
|
#endif /* CONFIG_PPC_SUBPAGE_PROT */
|
|
|
|
typedef unsigned long mm_context_id_t;
|
|
struct spinlock;
|
|
|
|
typedef struct {
|
|
mm_context_id_t id;
|
|
u16 user_psize; /* page size index */
|
|
|
|
#ifdef CONFIG_PPC_MM_SLICES
|
|
u64 low_slices_psize; /* SLB page size encodings */
|
|
/*
|
|
* Right now we support 64TB and 4 bits for each
|
|
* 1TB slice we need 32 bytes for 64TB.
|
|
*/
|
|
unsigned char high_slices_psize[32]; /* 4 bits per slice for now */
|
|
#else
|
|
u16 sllp; /* SLB page size encoding */
|
|
#endif
|
|
unsigned long vdso_base;
|
|
#ifdef CONFIG_PPC_SUBPAGE_PROT
|
|
struct subpage_prot_table spt;
|
|
#endif /* CONFIG_PPC_SUBPAGE_PROT */
|
|
#ifdef CONFIG_PPC_ICSWX
|
|
struct spinlock *cop_lockp; /* guard acop and cop_pid */
|
|
unsigned long acop; /* mask of enabled coprocessor types */
|
|
unsigned int cop_pid; /* pid value used with coprocessors */
|
|
#endif /* CONFIG_PPC_ICSWX */
|
|
} mm_context_t;
|
|
|
|
|
|
#if 0
|
|
/*
|
|
* The code below is equivalent to this function for arguments
|
|
* < 2^VSID_BITS, which is all this should ever be called
|
|
* with. However gcc is not clever enough to compute the
|
|
* modulus (2^n-1) without a second multiply.
|
|
*/
|
|
#define vsid_scramble(protovsid, size) \
|
|
((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
|
|
|
|
#else /* 1 */
|
|
#define vsid_scramble(protovsid, size) \
|
|
({ \
|
|
unsigned long x; \
|
|
x = (protovsid) * VSID_MULTIPLIER_##size; \
|
|
x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
|
|
(x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
|
|
})
|
|
#endif /* 1 */
|
|
|
|
/*
|
|
* This is only valid for addresses >= PAGE_OFFSET
|
|
* The proto-VSID space is divided into two class
|
|
* User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1
|
|
* kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1
|
|
*
|
|
* With KERNEL_START at 0xc000000000000000, the proto vsid for
|
|
* the kernel ends up with 0xc00000000 (36 bits). With 64TB
|
|
* support we need to have kernel proto-VSID in the
|
|
* [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS.
|
|
*/
|
|
static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
|
|
{
|
|
unsigned long proto_vsid;
|
|
/*
|
|
* We need to make sure proto_vsid for the kernel is
|
|
* >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T])
|
|
*/
|
|
if (ssize == MMU_SEGSIZE_256M) {
|
|
proto_vsid = ea >> SID_SHIFT;
|
|
proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS));
|
|
return vsid_scramble(proto_vsid, 256M);
|
|
}
|
|
proto_vsid = ea >> SID_SHIFT_1T;
|
|
proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T));
|
|
return vsid_scramble(proto_vsid, 1T);
|
|
}
|
|
|
|
/* Returns the segment size indicator for a user address */
|
|
static inline int user_segment_size(unsigned long addr)
|
|
{
|
|
/* Use 1T segments if possible for addresses >= 1T */
|
|
if (addr >= (1UL << SID_SHIFT_1T))
|
|
return mmu_highuser_ssize;
|
|
return MMU_SEGSIZE_256M;
|
|
}
|
|
|
|
/* This is only valid for user addresses (which are below 2^44) */
|
|
static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
|
|
int ssize)
|
|
{
|
|
if (ssize == MMU_SEGSIZE_256M)
|
|
return vsid_scramble((context << USER_ESID_BITS)
|
|
| (ea >> SID_SHIFT), 256M);
|
|
return vsid_scramble((context << USER_ESID_BITS_1T)
|
|
| (ea >> SID_SHIFT_1T), 1T);
|
|
}
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_POWERPC_MMU_HASH64_H_ */
|