mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
3b50a6e536
hmm_range_fault() returns an array of page frame numbers and flags for how the pages are mapped in the requested process' page tables. The PFN can be used to get the struct page with hmm_pfn_to_page() and the page size order can be determined with compound_order(page). However, if the page is larger than order 0 (PAGE_SIZE), there is no indication that a compound page is mapped by the CPU using a larger page size. Without this information, the caller can't safely use a large device PTE to map the compound page because the CPU might be using smaller PTEs with different read/write permissions. Add a new function hmm_pfn_to_map_order() to return the mapping size order so that callers know the pages are being mapped with consistent permissions and a large device page table mapping can be used if one is available. This will allow devices to optimize mapping the page into HW by avoiding or batching work for huge pages. For instance the dma_map can be done with a high order directly. Link: https://lore.kernel.org/r/20200701225352.9649-3-rcampbell@nvidia.com Signed-off-by: Ralph Campbell <rcampbell@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
122 lines
4.1 KiB
C
122 lines
4.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* Copyright 2013 Red Hat Inc.
|
|
*
|
|
* Authors: Jérôme Glisse <jglisse@redhat.com>
|
|
*
|
|
* See Documentation/vm/hmm.rst for reasons and overview of what HMM is.
|
|
*/
|
|
#ifndef LINUX_HMM_H
|
|
#define LINUX_HMM_H
|
|
|
|
#include <linux/kconfig.h>
|
|
#include <linux/pgtable.h>
|
|
|
|
#include <linux/device.h>
|
|
#include <linux/migrate.h>
|
|
#include <linux/memremap.h>
|
|
#include <linux/completion.h>
|
|
#include <linux/mmu_notifier.h>
|
|
|
|
/*
|
|
* On output:
|
|
* 0 - The page is faultable and a future call with
|
|
* HMM_PFN_REQ_FAULT could succeed.
|
|
* HMM_PFN_VALID - the pfn field points to a valid PFN. This PFN is at
|
|
* least readable. If dev_private_owner is !NULL then this could
|
|
* point at a DEVICE_PRIVATE page.
|
|
* HMM_PFN_WRITE - if the page memory can be written to (requires HMM_PFN_VALID)
|
|
* HMM_PFN_ERROR - accessing the pfn is impossible and the device should
|
|
* fail. ie poisoned memory, special pages, no vma, etc
|
|
*
|
|
* On input:
|
|
* 0 - Return the current state of the page, do not fault it.
|
|
* HMM_PFN_REQ_FAULT - The output must have HMM_PFN_VALID or hmm_range_fault()
|
|
* will fail
|
|
* HMM_PFN_REQ_WRITE - The output must have HMM_PFN_WRITE or hmm_range_fault()
|
|
* will fail. Must be combined with HMM_PFN_REQ_FAULT.
|
|
*/
|
|
enum hmm_pfn_flags {
|
|
/* Output fields and flags */
|
|
HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1),
|
|
HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2),
|
|
HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3),
|
|
HMM_PFN_ORDER_SHIFT = (BITS_PER_LONG - 8),
|
|
|
|
/* Input flags */
|
|
HMM_PFN_REQ_FAULT = HMM_PFN_VALID,
|
|
HMM_PFN_REQ_WRITE = HMM_PFN_WRITE,
|
|
|
|
HMM_PFN_FLAGS = 0xFFUL << HMM_PFN_ORDER_SHIFT,
|
|
};
|
|
|
|
/*
|
|
* hmm_pfn_to_page() - return struct page pointed to by a device entry
|
|
*
|
|
* This must be called under the caller 'user_lock' after a successful
|
|
* mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID
|
|
* already.
|
|
*/
|
|
static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn)
|
|
{
|
|
return pfn_to_page(hmm_pfn & ~HMM_PFN_FLAGS);
|
|
}
|
|
|
|
/*
|
|
* hmm_pfn_to_map_order() - return the CPU mapping size order
|
|
*
|
|
* This is optionally useful to optimize processing of the pfn result
|
|
* array. It indicates that the page starts at the order aligned VA and is
|
|
* 1<<order bytes long. Every pfn within an high order page will have the
|
|
* same pfn flags, both access protections and the map_order. The caller must
|
|
* be careful with edge cases as the start and end VA of the given page may
|
|
* extend past the range used with hmm_range_fault().
|
|
*
|
|
* This must be called under the caller 'user_lock' after a successful
|
|
* mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID
|
|
* already.
|
|
*/
|
|
static inline unsigned int hmm_pfn_to_map_order(unsigned long hmm_pfn)
|
|
{
|
|
return (hmm_pfn >> HMM_PFN_ORDER_SHIFT) & 0x1F;
|
|
}
|
|
|
|
/*
|
|
* struct hmm_range - track invalidation lock on virtual address range
|
|
*
|
|
* @notifier: a mmu_interval_notifier that includes the start/end
|
|
* @notifier_seq: result of mmu_interval_read_begin()
|
|
* @start: range virtual start address (inclusive)
|
|
* @end: range virtual end address (exclusive)
|
|
* @hmm_pfns: array of pfns (big enough for the range)
|
|
* @default_flags: default flags for the range (write, read, ... see hmm doc)
|
|
* @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
|
|
* @dev_private_owner: owner of device private pages
|
|
*/
|
|
struct hmm_range {
|
|
struct mmu_interval_notifier *notifier;
|
|
unsigned long notifier_seq;
|
|
unsigned long start;
|
|
unsigned long end;
|
|
unsigned long *hmm_pfns;
|
|
unsigned long default_flags;
|
|
unsigned long pfn_flags_mask;
|
|
void *dev_private_owner;
|
|
};
|
|
|
|
/*
|
|
* Please see Documentation/vm/hmm.rst for how to use the range API.
|
|
*/
|
|
int hmm_range_fault(struct hmm_range *range);
|
|
|
|
/*
|
|
* HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
|
|
*
|
|
* When waiting for mmu notifiers we need some kind of time out otherwise we
|
|
* could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
|
|
* wait already.
|
|
*/
|
|
#define HMM_RANGE_DEFAULT_TIMEOUT 1000
|
|
|
|
#endif /* LINUX_HMM_H */
|