mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-11 17:26:43 +07:00
db78c22230
The pfn_t type uses an unsigned long to store a pfn + flags value. On a
64-bit platform the upper 12 bits of an unsigned long are never used for
storing the value of a pfn. However, this is not true on highmem
platforms, all 32-bits of a pfn value are used to address a 44-bit
physical address space. A pfn_t needs to store a 64-bit value.
Link: https://bugzilla.kernel.org/show_bug.cgi?id=112211
Fixes: 01c8f1c44b
("mm, dax, gpu: convert vm_insert_mixed to pfn_t")
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reported-by: Stuart Foster <smf.linux@ntlworld.com>
Reported-by: Julian Margetson <runaway@candw.ms>
Tested-by: Julian Margetson <runaway@candw.ms>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
102 lines
2.4 KiB
C
102 lines
2.4 KiB
C
#ifndef _LINUX_PFN_T_H_
|
|
#define _LINUX_PFN_T_H_
|
|
#include <linux/mm.h>
|
|
|
|
/*
|
|
* PFN_FLAGS_MASK - mask of all the possible valid pfn_t flags
|
|
* PFN_SG_CHAIN - pfn is a pointer to the next scatterlist entry
|
|
* PFN_SG_LAST - pfn references a page and is the last scatterlist entry
|
|
* PFN_DEV - pfn is not covered by system memmap by default
|
|
* PFN_MAP - pfn has a dynamic page mapping established by a device driver
|
|
*/
|
|
#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
|
|
#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1))
|
|
#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
|
|
#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
|
|
#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4))
|
|
|
|
static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags)
|
|
{
|
|
pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), };
|
|
|
|
return pfn_t;
|
|
}
|
|
|
|
/* a default pfn to pfn_t conversion assumes that @pfn is pfn_valid() */
|
|
static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
|
|
{
|
|
return __pfn_to_pfn_t(pfn, 0);
|
|
}
|
|
|
|
extern pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags);
|
|
|
|
static inline bool pfn_t_has_page(pfn_t pfn)
|
|
{
|
|
return (pfn.val & PFN_MAP) == PFN_MAP || (pfn.val & PFN_DEV) == 0;
|
|
}
|
|
|
|
static inline unsigned long pfn_t_to_pfn(pfn_t pfn)
|
|
{
|
|
return pfn.val & ~PFN_FLAGS_MASK;
|
|
}
|
|
|
|
static inline struct page *pfn_t_to_page(pfn_t pfn)
|
|
{
|
|
if (pfn_t_has_page(pfn))
|
|
return pfn_to_page(pfn_t_to_pfn(pfn));
|
|
return NULL;
|
|
}
|
|
|
|
static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
|
|
{
|
|
return PFN_PHYS(pfn_t_to_pfn(pfn));
|
|
}
|
|
|
|
static inline void *pfn_t_to_virt(pfn_t pfn)
|
|
{
|
|
if (pfn_t_has_page(pfn))
|
|
return __va(pfn_t_to_phys(pfn));
|
|
return NULL;
|
|
}
|
|
|
|
static inline pfn_t page_to_pfn_t(struct page *page)
|
|
{
|
|
return pfn_to_pfn_t(page_to_pfn(page));
|
|
}
|
|
|
|
static inline int pfn_t_valid(pfn_t pfn)
|
|
{
|
|
return pfn_valid(pfn_t_to_pfn(pfn));
|
|
}
|
|
|
|
#ifdef CONFIG_MMU
|
|
static inline pte_t pfn_t_pte(pfn_t pfn, pgprot_t pgprot)
|
|
{
|
|
return pfn_pte(pfn_t_to_pfn(pfn), pgprot);
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot)
|
|
{
|
|
return pfn_pmd(pfn_t_to_pfn(pfn), pgprot);
|
|
}
|
|
#endif
|
|
|
|
#ifdef __HAVE_ARCH_PTE_DEVMAP
|
|
static inline bool pfn_t_devmap(pfn_t pfn)
|
|
{
|
|
const u64 flags = PFN_DEV|PFN_MAP;
|
|
|
|
return (pfn.val & flags) == flags;
|
|
}
|
|
#else
|
|
static inline bool pfn_t_devmap(pfn_t pfn)
|
|
{
|
|
return false;
|
|
}
|
|
pte_t pte_mkdevmap(pte_t pte);
|
|
pmd_t pmd_mkdevmap(pmd_t pmd);
|
|
#endif
|
|
#endif /* _LINUX_PFN_T_H_ */
|