2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* S390 version
|
2012-07-20 16:15:04 +07:00
|
|
|
* Copyright IBM Corp. 1999, 2000
|
2005-04-17 05:20:36 +07:00
|
|
|
* Author(s): Hartmut Penner (hp@de.ibm.com)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _S390_PAGE_H
|
|
|
|
#define _S390_PAGE_H
|
|
|
|
|
2007-10-12 21:11:50 +07:00
|
|
|
#include <linux/const.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <asm/types.h>
|
|
|
|
|
2017-06-16 22:24:39 +07:00
|
|
|
#define _PAGE_SHIFT 12
|
|
|
|
#define _PAGE_SIZE (_AC(1, UL) << _PAGE_SHIFT)
|
|
|
|
#define _PAGE_MASK (~(_PAGE_SIZE - 1))
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* PAGE_SHIFT determines the page size */
|
2017-06-16 22:24:39 +07:00
|
|
|
#define PAGE_SHIFT _PAGE_SHIFT
|
|
|
|
#define PAGE_SIZE _PAGE_SIZE
|
|
|
|
#define PAGE_MASK _PAGE_MASK
|
2005-05-01 22:58:58 +07:00
|
|
|
#define PAGE_DEFAULT_ACC 0
|
|
|
|
#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2015-07-18 06:23:34 +07:00
|
|
|
#define HPAGE_SHIFT 20
|
2008-04-30 18:38:46 +07:00
|
|
|
#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
|
|
|
|
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
|
|
|
|
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
|
2016-07-04 19:47:01 +07:00
|
|
|
#define HUGE_MAX_HSTATE 2
|
2008-04-30 18:38:46 +07:00
|
|
|
|
|
|
|
#define ARCH_HAS_SETCLEAR_HUGE_PTE
|
|
|
|
#define ARCH_HAS_HUGE_PTE_TYPE
|
|
|
|
#define ARCH_HAS_PREPARE_HUGEPAGE
|
|
|
|
#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
|
|
|
|
|
2015-07-18 06:23:34 +07:00
|
|
|
#include <asm/setup.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
2016-06-14 11:55:43 +07:00
|
|
|
void __storage_key_init_range(unsigned long start, unsigned long end);
|
|
|
|
|
2013-10-07 17:12:32 +07:00
|
|
|
static inline void storage_key_init_range(unsigned long start, unsigned long end)
|
|
|
|
{
|
2016-06-14 11:55:43 +07:00
|
|
|
if (PAGE_DEFAULT_KEY)
|
|
|
|
__storage_key_init_range(start, end);
|
2013-10-07 17:12:32 +07:00
|
|
|
}
|
2012-11-02 18:56:43 +07:00
|
|
|
|
s390: let the compiler do page clearing
The hardware folks told me that for page clearing "when you exactly
know what to do, hand written xc+pfd is usally faster then mvcl for
page clearing, as it saves millicode overhead and parameter parsing
and checking" as long as you dont need the cache bypassing.
Turns out that gcc already does a proper xc,pfd loop.
A small test on z196 that does
buff = mmap(NULL, bufsize,PROT_EXEC|PROT_WRITE|PROT_READ,AP_PRIVATE| MAP_ANONYMOUS,0,0);
for ( i = 0; i < bufsize; i+= 256)
buff[i] = 0x5;
gets 20% faster (touches every cache line of a page)
and
buff = mmap(NULL, bufsize,PROT_EXEC|PROT_WRITE|PROT_READ,AP_PRIVATE| MAP_ANONYMOUS,0,0);
for ( i = 0; i < bufsize; i+= 4096)
buff[i] = 0x5;
is within noise ratio (touches one cache line of a page).
As the clear_page is usually called for first memory accesses
we can assume that at least one cache line is used afterwards,
so this change should be always better.
Another benchmark, a make -j 40 of my testsuite in tmpfs with
hot caches on a 32cpu system:
-- unpatched -- -- patched --
real 0m1.017s real 0m0.994s (~2% faster, but in noise)
user 0m5.339s user 0m5.016s (~6% faster)
sys 0m0.691s sys 0m0.632s (~8% faster)
Let use the same define to memset as the asm-generic variant
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2015-01-30 16:31:13 +07:00
|
|
|
#define clear_page(page) memset((page), 0, PAGE_SIZE)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-11-19 20:25:17 +07:00
|
|
|
/*
|
|
|
|
* copy_page uses the mvcl instruction with 0xb0 padding byte in order to
|
|
|
|
* bypass caches when copying a page. Especially when copying huge pages
|
|
|
|
* this keeps L1 and L2 data caches alive.
|
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
static inline void copy_page(void *to, void *from)
|
|
|
|
{
|
2013-11-19 20:25:17 +07:00
|
|
|
register void *reg2 asm ("2") = to;
|
|
|
|
register unsigned long reg3 asm ("3") = 0x1000;
|
|
|
|
register void *reg4 asm ("4") = from;
|
|
|
|
register unsigned long reg5 asm ("5") = 0xb0001000;
|
|
|
|
asm volatile(
|
|
|
|
" mvcl 2,4"
|
|
|
|
: "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5)
|
|
|
|
: : "memory", "cc");
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
#define clear_user_page(page, vaddr, pg) clear_page(page)
|
|
|
|
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
|
|
|
|
|
2007-07-17 18:03:05 +07:00
|
|
|
#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
|
|
|
|
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
|
2005-04-17 05:20:36 +07:00
|
|
|
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These are used to make use of C type-checking..
|
|
|
|
*/
|
|
|
|
|
|
|
|
typedef struct { unsigned long pgprot; } pgprot_t;
|
2011-05-23 15:24:40 +07:00
|
|
|
typedef struct { unsigned long pgste; } pgste_t;
|
2005-04-17 05:20:36 +07:00
|
|
|
typedef struct { unsigned long pte; } pte_t;
|
|
|
|
typedef struct { unsigned long pmd; } pmd_t;
|
2007-10-22 17:52:48 +07:00
|
|
|
typedef struct { unsigned long pud; } pud_t;
|
2017-04-24 23:19:10 +07:00
|
|
|
typedef struct { unsigned long p4d; } p4d_t;
|
2005-04-17 05:20:36 +07:00
|
|
|
typedef struct { unsigned long pgd; } pgd_t;
|
2008-02-10 00:24:35 +07:00
|
|
|
typedef pte_t *pgtable_t;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-02-10 00:24:35 +07:00
|
|
|
#define pgprot_val(x) ((x).pgprot)
|
2011-05-23 15:24:40 +07:00
|
|
|
#define pgste_val(x) ((x).pgste)
|
2008-02-10 00:24:35 +07:00
|
|
|
#define pte_val(x) ((x).pte)
|
|
|
|
#define pmd_val(x) ((x).pmd)
|
2007-10-22 17:52:48 +07:00
|
|
|
#define pud_val(x) ((x).pud)
|
2017-04-24 23:19:10 +07:00
|
|
|
#define p4d_val(x) ((x).p4d)
|
2005-04-17 05:20:36 +07:00
|
|
|
#define pgd_val(x) ((x).pgd)
|
|
|
|
|
2011-05-23 15:24:40 +07:00
|
|
|
#define __pgste(x) ((pgste_t) { (x) } )
|
2005-04-17 05:20:36 +07:00
|
|
|
#define __pte(x) ((pte_t) { (x) } )
|
|
|
|
#define __pmd(x) ((pmd_t) { (x) } )
|
2011-05-23 15:24:40 +07:00
|
|
|
#define __pud(x) ((pud_t) { (x) } )
|
2017-04-24 23:19:10 +07:00
|
|
|
#define __p4d(x) ((p4d_t) { (x) } )
|
2005-04-17 05:20:36 +07:00
|
|
|
#define __pgd(x) ((pgd_t) { (x) } )
|
|
|
|
#define __pgprot(x) ((pgprot_t) { (x) } )
|
|
|
|
|
2011-05-23 15:24:39 +07:00
|
|
|
static inline void page_set_storage_key(unsigned long addr,
|
|
|
|
unsigned char skey, int mapped)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2010-10-25 21:10:14 +07:00
|
|
|
if (!mapped)
|
|
|
|
asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
|
|
|
|
: : "d" (skey), "a" (addr));
|
|
|
|
else
|
|
|
|
asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2011-05-23 15:24:39 +07:00
|
|
|
static inline unsigned char page_get_storage_key(unsigned long addr)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2011-05-23 15:24:39 +07:00
|
|
|
unsigned char skey;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2011-05-23 15:24:39 +07:00
|
|
|
asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
|
2005-04-17 05:20:36 +07:00
|
|
|
return skey;
|
|
|
|
}
|
|
|
|
|
2011-05-23 15:24:39 +07:00
|
|
|
static inline int page_reset_referenced(unsigned long addr)
|
|
|
|
{
|
2016-05-10 14:50:21 +07:00
|
|
|
int cc;
|
2011-05-23 15:24:39 +07:00
|
|
|
|
|
|
|
asm volatile(
|
|
|
|
" rrbe 0,%1\n"
|
|
|
|
" ipm %0\n"
|
2016-05-10 14:50:21 +07:00
|
|
|
" srl %0,28\n"
|
|
|
|
: "=d" (cc) : "a" (addr) : "cc");
|
|
|
|
return cc;
|
2011-05-23 15:24:39 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Bits int the storage key */
|
|
|
|
#define _PAGE_CHANGED 0x02 /* HW changed bit */
|
|
|
|
#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
|
|
|
|
#define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */
|
|
|
|
#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
|
|
|
|
|
2008-05-07 14:22:59 +07:00
|
|
|
struct page;
|
|
|
|
void arch_free_page(struct page *page, int order);
|
|
|
|
void arch_alloc_page(struct page *page, int order);
|
2016-06-14 17:56:01 +07:00
|
|
|
void arch_set_page_dat(struct page *page, int order);
|
|
|
|
void arch_set_page_nodat(struct page *page, int order);
|
|
|
|
int arch_test_page_nodat(struct page *page);
|
2011-10-30 21:17:13 +07:00
|
|
|
void arch_set_page_states(int make_stable);
|
2008-05-07 14:22:59 +07:00
|
|
|
|
2010-11-10 16:05:55 +07:00
|
|
|
static inline int devmem_is_allowed(unsigned long pfn)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-05-07 14:22:59 +07:00
|
|
|
#define HAVE_ARCH_FREE_PAGE
|
|
|
|
#define HAVE_ARCH_ALLOC_PAGE
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
|
2017-08-07 20:16:15 +07:00
|
|
|
#define __PAGE_OFFSET 0x0UL
|
|
|
|
#define PAGE_OFFSET 0x0UL
|
|
|
|
|
|
|
|
#define __pa(x) ((unsigned long)(x))
|
|
|
|
#define __va(x) ((void *)(unsigned long)(x))
|
|
|
|
|
|
|
|
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
|
2015-11-16 16:45:03 +07:00
|
|
|
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
|
2017-08-07 20:16:15 +07:00
|
|
|
|
|
|
|
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
|
2015-11-16 16:45:03 +07:00
|
|
|
#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2017-08-07 20:16:15 +07:00
|
|
|
#define phys_to_pfn(kaddr) ((kaddr) >> PAGE_SHIFT)
|
|
|
|
#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
|
|
|
|
|
|
|
|
#define phys_to_page(kaddr) pfn_to_page(phys_to_pfn(kaddr))
|
|
|
|
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
|
|
|
|
|
|
|
|
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
|
|
|
|
|
2008-02-10 00:24:35 +07:00
|
|
|
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
|
2005-04-17 05:20:36 +07:00
|
|
|
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
|
|
|
|
2006-03-27 16:15:45 +07:00
|
|
|
#include <asm-generic/memory_model.h>
|
2009-05-14 05:56:30 +07:00
|
|
|
#include <asm-generic/getorder.h>
|
2005-09-04 05:54:30 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif /* _S390_PAGE_H */
|