mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-04-10 02:00:36 +07:00
device-dax: Enable page_mapping()
In support of enabling memory_failure() handling for device-dax mappings, set the ->mapping association of pages backing device-dax mappings. The rmap implementation requires page_mapping() to return the address_space hosting the vmas that map the page. The ->mapping pointer is never cleared. There is no possibility for the page to become associated with another address_space while the device is enabled. When the device is disabled the 'struct page' array for the device is destroyed / later reinitialized to zero. Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
This commit is contained in:
parent
226ab56107
commit
2232c6382a
@ -245,12 +245,11 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
|
static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
|
||||||
struct vm_fault *vmf)
|
struct vm_fault *vmf, pfn_t *pfn)
|
||||||
{
|
{
|
||||||
struct device *dev = &dev_dax->dev;
|
struct device *dev = &dev_dax->dev;
|
||||||
struct dax_region *dax_region;
|
struct dax_region *dax_region;
|
||||||
phys_addr_t phys;
|
phys_addr_t phys;
|
||||||
pfn_t pfn;
|
|
||||||
unsigned int fault_size = PAGE_SIZE;
|
unsigned int fault_size = PAGE_SIZE;
|
||||||
|
|
||||||
if (check_vma(dev_dax, vmf->vma, __func__))
|
if (check_vma(dev_dax, vmf->vma, __func__))
|
||||||
@ -272,20 +271,19 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
|
|||||||
return VM_FAULT_SIGBUS;
|
return VM_FAULT_SIGBUS;
|
||||||
}
|
}
|
||||||
|
|
||||||
pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
|
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
|
||||||
|
|
||||||
return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
|
return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
|
static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
|
||||||
struct vm_fault *vmf)
|
struct vm_fault *vmf, pfn_t *pfn)
|
||||||
{
|
{
|
||||||
unsigned long pmd_addr = vmf->address & PMD_MASK;
|
unsigned long pmd_addr = vmf->address & PMD_MASK;
|
||||||
struct device *dev = &dev_dax->dev;
|
struct device *dev = &dev_dax->dev;
|
||||||
struct dax_region *dax_region;
|
struct dax_region *dax_region;
|
||||||
phys_addr_t phys;
|
phys_addr_t phys;
|
||||||
pgoff_t pgoff;
|
pgoff_t pgoff;
|
||||||
pfn_t pfn;
|
|
||||||
unsigned int fault_size = PMD_SIZE;
|
unsigned int fault_size = PMD_SIZE;
|
||||||
|
|
||||||
if (check_vma(dev_dax, vmf->vma, __func__))
|
if (check_vma(dev_dax, vmf->vma, __func__))
|
||||||
@ -321,22 +319,21 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
|
|||||||
return VM_FAULT_SIGBUS;
|
return VM_FAULT_SIGBUS;
|
||||||
}
|
}
|
||||||
|
|
||||||
pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
|
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
|
||||||
|
|
||||||
return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, pfn,
|
return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn,
|
||||||
vmf->flags & FAULT_FLAG_WRITE);
|
vmf->flags & FAULT_FLAG_WRITE);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
|
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
|
||||||
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
|
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
|
||||||
struct vm_fault *vmf)
|
struct vm_fault *vmf, pfn_t *pfn)
|
||||||
{
|
{
|
||||||
unsigned long pud_addr = vmf->address & PUD_MASK;
|
unsigned long pud_addr = vmf->address & PUD_MASK;
|
||||||
struct device *dev = &dev_dax->dev;
|
struct device *dev = &dev_dax->dev;
|
||||||
struct dax_region *dax_region;
|
struct dax_region *dax_region;
|
||||||
phys_addr_t phys;
|
phys_addr_t phys;
|
||||||
pgoff_t pgoff;
|
pgoff_t pgoff;
|
||||||
pfn_t pfn;
|
|
||||||
unsigned int fault_size = PUD_SIZE;
|
unsigned int fault_size = PUD_SIZE;
|
||||||
|
|
||||||
|
|
||||||
@ -373,14 +370,14 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
|
|||||||
return VM_FAULT_SIGBUS;
|
return VM_FAULT_SIGBUS;
|
||||||
}
|
}
|
||||||
|
|
||||||
pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
|
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
|
||||||
|
|
||||||
return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, pfn,
|
return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn,
|
||||||
vmf->flags & FAULT_FLAG_WRITE);
|
vmf->flags & FAULT_FLAG_WRITE);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
|
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
|
||||||
struct vm_fault *vmf)
|
struct vm_fault *vmf, pfn_t *pfn)
|
||||||
{
|
{
|
||||||
return VM_FAULT_FALLBACK;
|
return VM_FAULT_FALLBACK;
|
||||||
}
|
}
|
||||||
@ -389,8 +386,10 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
|
|||||||
static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
|
static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
|
||||||
enum page_entry_size pe_size)
|
enum page_entry_size pe_size)
|
||||||
{
|
{
|
||||||
int rc, id;
|
|
||||||
struct file *filp = vmf->vma->vm_file;
|
struct file *filp = vmf->vma->vm_file;
|
||||||
|
unsigned long fault_size;
|
||||||
|
int rc, id;
|
||||||
|
pfn_t pfn;
|
||||||
struct dev_dax *dev_dax = filp->private_data;
|
struct dev_dax *dev_dax = filp->private_data;
|
||||||
|
|
||||||
dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm,
|
dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm,
|
||||||
@ -400,17 +399,39 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
|
|||||||
id = dax_read_lock();
|
id = dax_read_lock();
|
||||||
switch (pe_size) {
|
switch (pe_size) {
|
||||||
case PE_SIZE_PTE:
|
case PE_SIZE_PTE:
|
||||||
rc = __dev_dax_pte_fault(dev_dax, vmf);
|
fault_size = PAGE_SIZE;
|
||||||
|
rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn);
|
||||||
break;
|
break;
|
||||||
case PE_SIZE_PMD:
|
case PE_SIZE_PMD:
|
||||||
rc = __dev_dax_pmd_fault(dev_dax, vmf);
|
fault_size = PMD_SIZE;
|
||||||
|
rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn);
|
||||||
break;
|
break;
|
||||||
case PE_SIZE_PUD:
|
case PE_SIZE_PUD:
|
||||||
rc = __dev_dax_pud_fault(dev_dax, vmf);
|
fault_size = PUD_SIZE;
|
||||||
|
rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
rc = VM_FAULT_SIGBUS;
|
rc = VM_FAULT_SIGBUS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (rc == VM_FAULT_NOPAGE) {
|
||||||
|
unsigned long i;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In the device-dax case the only possibility for a
|
||||||
|
* VM_FAULT_NOPAGE result is when device-dax capacity is
|
||||||
|
* mapped. No need to consider the zero page, or racing
|
||||||
|
* conflicting mappings.
|
||||||
|
*/
|
||||||
|
for (i = 0; i < fault_size / PAGE_SIZE; i++) {
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
|
page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
|
||||||
|
if (page->mapping)
|
||||||
|
continue;
|
||||||
|
page->mapping = filp->f_mapping;
|
||||||
|
}
|
||||||
|
}
|
||||||
dax_read_unlock(id);
|
dax_read_unlock(id);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
|
Loading…
Reference in New Issue
Block a user