2008-10-23 12:26:29 +07:00
|
|
|
#ifndef _ASM_X86_PGTABLE_64_H
|
|
|
|
#define _ASM_X86_PGTABLE_64_H
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-05-08 14:31:11 +07:00
|
|
|
#include <linux/const.h>
|
2009-02-09 09:50:52 +07:00
|
|
|
#include <asm/pgtable_64_types.h>
|
|
|
|
|
2007-05-03 00:27:06 +07:00
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* This file contains the functions and defines necessary to modify and use
|
|
|
|
* the x86-64 page table tree.
|
|
|
|
*/
|
|
|
|
#include <asm/processor.h>
|
2007-10-19 13:40:25 +07:00
|
|
|
#include <linux/bitops.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/threads.h>
|
|
|
|
|
2017-06-06 18:31:28 +07:00
|
|
|
extern p4d_t level4_kernel_pgt[512];
|
|
|
|
extern p4d_t level4_ident_pgt[512];
|
2005-04-17 05:20:36 +07:00
|
|
|
extern pud_t level3_kernel_pgt[512];
|
|
|
|
extern pud_t level3_ident_pgt[512];
|
|
|
|
extern pmd_t level2_kernel_pgt[512];
|
2008-07-09 05:06:50 +07:00
|
|
|
extern pmd_t level2_fixmap_pgt[512];
|
|
|
|
extern pmd_t level2_ident_pgt[512];
|
2014-09-02 17:16:01 +07:00
|
|
|
extern pte_t level1_fixmap_pgt[512];
|
2017-06-06 18:31:27 +07:00
|
|
|
extern pgd_t init_top_pgt[];
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2017-06-06 18:31:27 +07:00
|
|
|
#define swapper_pg_dir init_top_pgt
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
extern void paging_init(void);
|
|
|
|
|
2008-03-23 15:03:11 +07:00
|
|
|
#define pte_ERROR(e) \
|
2012-05-22 09:50:07 +07:00
|
|
|
pr_err("%s:%d: bad pte %p(%016lx)\n", \
|
2008-03-23 15:03:11 +07:00
|
|
|
__FILE__, __LINE__, &(e), pte_val(e))
|
|
|
|
#define pmd_ERROR(e) \
|
2012-05-22 09:50:07 +07:00
|
|
|
pr_err("%s:%d: bad pmd %p(%016lx)\n", \
|
2008-03-23 15:03:11 +07:00
|
|
|
__FILE__, __LINE__, &(e), pmd_val(e))
|
|
|
|
#define pud_ERROR(e) \
|
2012-05-22 09:50:07 +07:00
|
|
|
pr_err("%s:%d: bad pud %p(%016lx)\n", \
|
2008-03-23 15:03:11 +07:00
|
|
|
__FILE__, __LINE__, &(e), pud_val(e))
|
2017-03-30 15:07:29 +07:00
|
|
|
|
|
|
|
#if CONFIG_PGTABLE_LEVELS >= 5
|
|
|
|
#define p4d_ERROR(e) \
|
|
|
|
pr_err("%s:%d: bad p4d %p(%016lx)\n", \
|
|
|
|
__FILE__, __LINE__, &(e), p4d_val(e))
|
|
|
|
#endif
|
|
|
|
|
2008-03-23 15:03:11 +07:00
|
|
|
#define pgd_ERROR(e) \
|
2012-05-22 09:50:07 +07:00
|
|
|
pr_err("%s:%d: bad pgd %p(%016lx)\n", \
|
2008-03-23 15:03:11 +07:00
|
|
|
__FILE__, __LINE__, &(e), pgd_val(e))
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-01-30 19:32:58 +07:00
|
|
|
struct mm_struct;
|
|
|
|
|
2017-03-18 01:55:15 +07:00
|
|
|
void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
|
2008-06-25 11:19:22 +07:00
|
|
|
void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
|
|
|
|
|
2008-01-30 19:32:58 +07:00
|
|
|
static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pte_t *ptep)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2008-01-30 19:32:58 +07:00
|
|
|
*ptep = native_make_pte(0);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-01-30 19:32:58 +07:00
|
|
|
static inline void native_set_pte(pte_t *ptep, pte_t pte)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2008-01-30 19:32:58 +07:00
|
|
|
*ptep = pte;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-01-30 19:34:01 +07:00
|
|
|
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
|
|
|
|
{
|
|
|
|
native_set_pte(ptep, pte);
|
|
|
|
}
|
|
|
|
|
2011-01-14 06:46:41 +07:00
|
|
|
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
|
|
|
|
{
|
|
|
|
*pmdp = pmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void native_pmd_clear(pmd_t *pmd)
|
|
|
|
{
|
|
|
|
native_set_pmd(pmd, native_make_pmd(0));
|
|
|
|
}
|
|
|
|
|
2008-01-30 19:32:58 +07:00
|
|
|
static inline pte_t native_ptep_get_and_clear(pte_t *xp)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2008-01-30 19:32:58 +07:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
return native_make_pte(xchg(&xp->pte, 0));
|
|
|
|
#else
|
2008-03-23 15:03:11 +07:00
|
|
|
/* native_local_ptep_get_and_clear,
|
|
|
|
but duplicated because of cyclic dependency */
|
2008-01-30 19:32:58 +07:00
|
|
|
pte_t ret = *xp;
|
|
|
|
native_pte_clear(NULL, 0, xp);
|
|
|
|
return ret;
|
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2011-01-14 06:46:41 +07:00
|
|
|
static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2011-01-14 06:46:41 +07:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
return native_make_pmd(xchg(&xp->pmd, 0));
|
|
|
|
#else
|
|
|
|
/* native_local_pmdp_get_and_clear,
|
|
|
|
but duplicated because of cyclic dependency */
|
|
|
|
pmd_t ret = *xp;
|
|
|
|
native_pmd_clear(xp);
|
|
|
|
return ret;
|
|
|
|
#endif
|
2008-01-30 19:32:58 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-01-30 19:32:58 +07:00
|
|
|
static inline void native_set_pud(pud_t *pudp, pud_t pud)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2008-01-30 19:32:58 +07:00
|
|
|
*pudp = pud;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2008-01-30 19:32:58 +07:00
|
|
|
static inline void native_pud_clear(pud_t *pud)
|
|
|
|
{
|
|
|
|
native_set_pud(pud, native_make_pud(0));
|
|
|
|
}
|
2005-09-04 05:55:06 +07:00
|
|
|
|
2017-02-25 05:57:02 +07:00
|
|
|
static inline pud_t native_pudp_get_and_clear(pud_t *xp)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
return native_make_pud(xchg(&xp->pud, 0));
|
|
|
|
#else
|
|
|
|
/* native_local_pudp_get_and_clear,
|
|
|
|
* but duplicated because of cyclic dependency
|
|
|
|
*/
|
|
|
|
pud_t ret = *xp;
|
|
|
|
|
|
|
|
native_pud_clear(xp);
|
|
|
|
return ret;
|
|
|
|
#endif
|
2017-03-18 01:55:15 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
|
|
|
|
{
|
|
|
|
*p4dp = p4d;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void native_p4d_clear(p4d_t *p4d)
|
|
|
|
{
|
2017-03-30 15:07:29 +07:00
|
|
|
#ifdef CONFIG_X86_5LEVEL
|
|
|
|
native_set_p4d(p4d, native_make_p4d(0));
|
|
|
|
#else
|
2017-03-18 01:55:15 +07:00
|
|
|
native_set_p4d(p4d, (p4d_t) { .pgd = native_make_pgd(0)});
|
2017-03-30 15:07:29 +07:00
|
|
|
#endif
|
2017-02-25 05:57:02 +07:00
|
|
|
}
|
|
|
|
|
2008-01-30 19:32:58 +07:00
|
|
|
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
|
|
|
|
{
|
|
|
|
*pgdp = pgd;
|
|
|
|
}
|
2005-11-07 15:59:43 +07:00
|
|
|
|
2008-03-23 15:03:11 +07:00
|
|
|
static inline void native_pgd_clear(pgd_t *pgd)
|
2005-09-04 05:55:06 +07:00
|
|
|
{
|
2008-01-30 19:32:58 +07:00
|
|
|
native_set_pgd(pgd, native_make_pgd(0));
|
2005-09-04 05:55:06 +07:00
|
|
|
}
|
|
|
|
|
2016-12-15 06:44:03 +07:00
|
|
|
extern void sync_global_pgds(unsigned long start, unsigned long end);
|
2010-05-19 16:42:14 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Conversion functions: convert a page and protection to a page entry,
|
|
|
|
* and a page entry and page directory to the page they refer to.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Level 4 access.
|
|
|
|
*/
|
2008-02-19 22:18:32 +07:00
|
|
|
static inline int pgd_large(pgd_t pgd) { return 0; }
|
2008-06-25 11:19:05 +07:00
|
|
|
#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* PUD - Level3 access */
|
|
|
|
|
|
|
|
/* PMD - Level 2 access */
|
|
|
|
|
|
|
|
/* PTE - Level 1 access. */
|
|
|
|
|
|
|
|
/* x86-64 always has all page tables mapped. */
|
2008-03-23 15:03:11 +07:00
|
|
|
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
|
2010-08-10 07:19:03 +07:00
|
|
|
#define pte_unmap(pte) ((void)(pte))/* NOP */
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2016-07-08 07:19:11 +07:00
|
|
|
/*
|
|
|
|
* Encode and de-code a swap entry
|
|
|
|
*
|
2017-09-09 06:10:46 +07:00
|
|
|
* | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
|
|
|
|
* | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
|
|
|
|
* | OFFSET (14->63) | TYPE (9-13) |0|0|X|X| X| X|X|SD|0| <- swp entry
|
2016-07-08 07:19:11 +07:00
|
|
|
*
|
|
|
|
* G (8) is aliased and used as a PROT_NONE indicator for
|
|
|
|
* !present ptes. We need to start storing swap entries above
|
|
|
|
* there. We also need to avoid using A and D because of an
|
|
|
|
* erratum where they can be incorrectly set by hardware on
|
|
|
|
* non-present PTEs.
|
2017-09-09 06:10:46 +07:00
|
|
|
*
|
|
|
|
* SD (1) in swp entry is used to store soft dirty bit, which helps us
|
|
|
|
* remember soft dirty over page migration
|
|
|
|
*
|
|
|
|
* Bit 7 in swp entry should be 0 because pmd_present checks not only P,
|
|
|
|
* but also L and G.
|
2016-07-08 07:19:11 +07:00
|
|
|
*/
|
|
|
|
#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
|
2015-02-11 05:11:22 +07:00
|
|
|
#define SWP_TYPE_BITS 5
|
2016-07-08 07:19:11 +07:00
|
|
|
/* Place the offset above the type: */
|
2016-08-11 00:23:25 +07:00
|
|
|
#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS)
|
2008-12-16 18:35:24 +07:00
|
|
|
|
|
|
|
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
|
|
|
|
|
2016-07-08 07:19:11 +07:00
|
|
|
#define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \
|
2008-12-16 18:35:24 +07:00
|
|
|
& ((1U << SWP_TYPE_BITS) - 1))
|
2016-07-08 07:19:11 +07:00
|
|
|
#define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT)
|
2008-12-16 18:35:24 +07:00
|
|
|
#define __swp_entry(type, offset) ((swp_entry_t) { \
|
2016-07-08 07:19:11 +07:00
|
|
|
((type) << (SWP_TYPE_FIRST_BIT)) \
|
|
|
|
| ((offset) << SWP_OFFSET_FIRST_BIT) })
|
2008-03-23 15:03:11 +07:00
|
|
|
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
|
2017-09-09 06:10:57 +07:00
|
|
|
#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) })
|
2008-01-30 19:32:57 +07:00
|
|
|
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
|
2017-09-09 06:10:57 +07:00
|
|
|
#define __swp_entry_to_pmd(x) ((pmd_t) { .pmd = (x).val })
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-03-23 15:03:11 +07:00
|
|
|
extern int kern_addr_valid(unsigned long addr);
|
2008-02-15 23:29:12 +07:00
|
|
|
extern void cleanup_highmap(void);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#define HAVE_ARCH_UNMAPPED_AREA
|
2008-01-30 19:31:07 +07:00
|
|
|
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#define pgtable_cache_init() do { } while (0)
|
2007-09-22 02:09:41 +07:00
|
|
|
#define check_pgt_cache() do { } while (0)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#define PAGE_AGP PAGE_KERNEL_NOCACHE
|
|
|
|
#define HAVE_PAGE_AGP 1
|
|
|
|
|
|
|
|
/* fs/proc/kcore.c */
|
|
|
|
#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
|
x86, 64-bit: Clean up user address masking
The discussion about using "access_ok()" in get_user_pages_fast() (see
commit 7f8189068726492950bf1a2dcfd9b51314560abf: "x86: don't use
'access_ok()' as a range check in get_user_pages_fast()" for details and
end result), made us notice that x86-64 was really being very sloppy
about virtual address checking.
So be way more careful and straightforward about masking x86-64 virtual
addresses:
- All the VIRTUAL_MASK* variants now cover half of the address
space, it's not like we can use the full mask on a signed
integer, and the larger mask just invites mistakes when
applying it to either half of the 48-bit address space.
- /proc/kcore's kc_offset_to_vaddr() becomes a lot more
obvious when it transforms a file offset into a
(kernel-half) virtual address.
- Unify/simplify the 32-bit and 64-bit USER_DS definition to
be based on TASK_SIZE_MAX.
This cleanup and more careful/obvious user virtual address checking also
uncovered a buglet in the x86-64 implementation of strnlen_user(): it
would do an "access_ok()" check on the whole potential area, even if the
string itself was much shorter, and thus return an error even for valid
strings. Our sloppy checking had hidden this.
So this fixes 'strnlen_user()' to do this properly, the same way we
already handled user strings in 'strncpy_from_user()'. Namely by just
checking the first byte, and then relying on fault handling for the
rest. That always works, since we impose a guard page that cannot be
mapped at the end of the user space address space (and even if we
didn't, we'd have the address space hole).
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-06-21 05:40:00 +07:00
|
|
|
#define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#define __HAVE_ARCH_PTE_SAME
|
2011-01-14 06:46:40 +07:00
|
|
|
|
2012-11-17 04:53:09 +07:00
|
|
|
#define vmemmap ((struct page *)VMEMMAP_START)
|
|
|
|
|
|
|
|
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
|
|
|
|
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
|
|
|
|
|
2017-06-06 18:31:20 +07:00
|
|
|
#define gup_fast_permitted gup_fast_permitted
|
|
|
|
static inline bool gup_fast_permitted(unsigned long start, int nr_pages,
|
|
|
|
int write)
|
|
|
|
{
|
|
|
|
unsigned long len, end;
|
|
|
|
|
|
|
|
len = (unsigned long)nr_pages << PAGE_SHIFT;
|
|
|
|
end = start + len;
|
|
|
|
if (end < start)
|
|
|
|
return false;
|
|
|
|
if (end >> __VIRTUAL_MASK_SHIFT)
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
2017-04-23 16:37:17 +07:00
|
|
|
|
2017-06-06 18:31:20 +07:00
|
|
|
#endif /* !__ASSEMBLY__ */
|
2008-10-23 12:26:29 +07:00
|
|
|
#endif /* _ASM_X86_PGTABLE_64_H */
|