mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-25 04:57:24 +07:00
ca5999fde0
The include/linux/pgtable.h is going to be the home of generic page table manipulation functions. Start with moving asm-generic/pgtable.h to include/linux/pgtable.h and make the latter include asm/pgtable.h. Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Cain <bcain@codeaurora.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ley Foon Tan <ley.foon.tan@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Nick Hu <nickhu@andestech.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vincent Chen <deanbo422@gmail.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: http://lkml.kernel.org/r/20200514170327.31389-3-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
102 lines
3.3 KiB
C
102 lines
3.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* Copyright 2013 Red Hat Inc.
|
|
*
|
|
* Authors: Jérôme Glisse <jglisse@redhat.com>
|
|
*
|
|
* See Documentation/vm/hmm.rst for reasons and overview of what HMM is.
|
|
*/
|
|
#ifndef LINUX_HMM_H
|
|
#define LINUX_HMM_H
|
|
|
|
#include <linux/kconfig.h>
|
|
#include <linux/pgtable.h>
|
|
|
|
#include <linux/device.h>
|
|
#include <linux/migrate.h>
|
|
#include <linux/memremap.h>
|
|
#include <linux/completion.h>
|
|
#include <linux/mmu_notifier.h>
|
|
|
|
/*
|
|
* On output:
|
|
* 0 - The page is faultable and a future call with
|
|
* HMM_PFN_REQ_FAULT could succeed.
|
|
* HMM_PFN_VALID - the pfn field points to a valid PFN. This PFN is at
|
|
* least readable. If dev_private_owner is !NULL then this could
|
|
* point at a DEVICE_PRIVATE page.
|
|
* HMM_PFN_WRITE - if the page memory can be written to (requires HMM_PFN_VALID)
|
|
* HMM_PFN_ERROR - accessing the pfn is impossible and the device should
|
|
* fail. ie poisoned memory, special pages, no vma, etc
|
|
*
|
|
* On input:
|
|
* 0 - Return the current state of the page, do not fault it.
|
|
* HMM_PFN_REQ_FAULT - The output must have HMM_PFN_VALID or hmm_range_fault()
|
|
* will fail
|
|
* HMM_PFN_REQ_WRITE - The output must have HMM_PFN_WRITE or hmm_range_fault()
|
|
* will fail. Must be combined with HMM_PFN_REQ_FAULT.
|
|
*/
|
|
enum hmm_pfn_flags {
|
|
/* Output flags */
|
|
HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1),
|
|
HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2),
|
|
HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3),
|
|
|
|
/* Input flags */
|
|
HMM_PFN_REQ_FAULT = HMM_PFN_VALID,
|
|
HMM_PFN_REQ_WRITE = HMM_PFN_WRITE,
|
|
|
|
HMM_PFN_FLAGS = HMM_PFN_VALID | HMM_PFN_WRITE | HMM_PFN_ERROR,
|
|
};
|
|
|
|
/*
|
|
* hmm_pfn_to_page() - return struct page pointed to by a device entry
|
|
*
|
|
* This must be called under the caller 'user_lock' after a successful
|
|
* mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID
|
|
* already.
|
|
*/
|
|
static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn)
|
|
{
|
|
return pfn_to_page(hmm_pfn & ~HMM_PFN_FLAGS);
|
|
}
|
|
|
|
/*
|
|
* struct hmm_range - track invalidation lock on virtual address range
|
|
*
|
|
* @notifier: a mmu_interval_notifier that includes the start/end
|
|
* @notifier_seq: result of mmu_interval_read_begin()
|
|
* @start: range virtual start address (inclusive)
|
|
* @end: range virtual end address (exclusive)
|
|
* @hmm_pfns: array of pfns (big enough for the range)
|
|
* @default_flags: default flags for the range (write, read, ... see hmm doc)
|
|
* @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
|
|
* @dev_private_owner: owner of device private pages
|
|
*/
|
|
struct hmm_range {
|
|
struct mmu_interval_notifier *notifier;
|
|
unsigned long notifier_seq;
|
|
unsigned long start;
|
|
unsigned long end;
|
|
unsigned long *hmm_pfns;
|
|
unsigned long default_flags;
|
|
unsigned long pfn_flags_mask;
|
|
void *dev_private_owner;
|
|
};
|
|
|
|
/*
|
|
* Please see Documentation/vm/hmm.rst for how to use the range API.
|
|
*/
|
|
int hmm_range_fault(struct hmm_range *range);
|
|
|
|
/*
|
|
* HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
|
|
*
|
|
* When waiting for mmu notifiers we need some kind of time out otherwise we
|
|
* could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
|
|
* wait already.
|
|
*/
|
|
#define HMM_RANGE_DEFAULT_TIMEOUT 1000
|
|
|
|
#endif /* LINUX_HMM_H */
|