mm: fix memory_failure() handling of dax-namespace metadata

[ Upstream commit 34dc45be4563f344d59ba0428416d0d265aa4f4d ]

Given 'struct dev_pagemap' spans both data pages and metadata pages be
careful to consult the altmap if present to delineate metadata.  In fact
the pfn_first() helper already identifies the first valid data pfn, so
export that helper for other code paths via pgmap_pfn_valid().

Other usage of get_dev_pagemap() are not a concern because those are
operating on known data pfns having been looked up by get_user_pages().
I.e.  metadata pfns are never user mapped.

Link: https://lkml.kernel.org/r/161058501758.1840162.4239831989762604527.stgit@dwillia2-desk3.amr.corp.intel.com
Fixes: 6100e34b25 ("mm, memory_failure: Teach memory_failure() about dev_pagemap pages")
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reported-by: David Hildenbrand <david@redhat.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Qian Cai <cai@lca.pw>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Dan Williams 2021-02-25 17:17:08 -08:00 committed by Greg Kroah-Hartman
parent a7fbcb3b56
commit dc495b59ff
3 changed files with 27 additions and 0 deletions

View File

@ -137,6 +137,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
struct dev_pagemap *get_dev_pagemap(unsigned long pfn, struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
struct dev_pagemap *pgmap); struct dev_pagemap *pgmap);
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
@ -165,6 +166,11 @@ static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
return NULL; return NULL;
} }
static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
{
return false;
}
static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
{ {
return 0; return 0;

View File

@ -1237,6 +1237,12 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
*/ */
put_page(page); put_page(page);
/* device metadata space is not recoverable */
if (!pgmap_pfn_valid(pgmap, pfn)) {
rc = -ENXIO;
goto out;
}
/* /*
* Prevent the inode from being freed while we are interrogating * Prevent the inode from being freed while we are interrogating
* the address_space, typically this would be handled by * the address_space, typically this would be handled by

View File

@ -80,6 +80,21 @@ static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id)
return pfn + vmem_altmap_offset(pgmap_altmap(pgmap)); return pfn + vmem_altmap_offset(pgmap_altmap(pgmap));
} }
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
{
int i;
for (i = 0; i < pgmap->nr_range; i++) {
struct range *range = &pgmap->ranges[i];
if (pfn >= PHYS_PFN(range->start) &&
pfn <= PHYS_PFN(range->end))
return pfn >= pfn_first(pgmap, i);
}
return false;
}
static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id) static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id)
{ {
const struct range *range = &pgmap->ranges[range_id]; const struct range *range = &pgmap->ranges[range_id];