mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 01:20:52 +07:00
mm/thp: fix vma_address() if virtual address below file offset
commit 494334e43c16d63b878536a26505397fce6ff3a2 upstream. Running certain tests with a DEBUG_VM kernel would crash within hours, on the total_mapcount BUG() in split_huge_page_to_list(), while trying to free up some memory by punching a hole in a shmem huge page: split's try_to_unmap() was unable to find all the mappings of the page (which, on a !DEBUG_VM kernel, would then keep the huge page pinned in memory). When that BUG() was changed to a WARN(), it would later crash on the VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma) in mm/internal.h:vma_address(), used by rmap_walk_file() for try_to_unmap(). vma_address() is usually correct, but there's a wraparound case when the vm_start address is unusually low, but vm_pgoff not so low: vma_address() chooses max(start, vma->vm_start), but that decides on the wrong address, because start has become almost ULONG_MAX. Rewrite vma_address() to be more careful about vm_pgoff; move the VM_BUG_ON_VMA() out of it, returning -EFAULT for errors, so that it can be safely used from page_mapped_in_vma() and page_address_in_vma() too. Add vma_address_end() to apply similar care to end address calculation, in page_vma_mapped_walk() and page_mkclean_one() and try_to_unmap_one(); though it raises a question of whether callers would do better to supply pvmw->end to page_vma_mapped_walk() - I chose not, for a smaller patch. An irritation is that their apparent generality breaks down on KSM pages, which cannot be located by the page->index that page_to_pgoff() uses: as commit4b0ece6fa0
("mm: migrate: fix remove_migration_pte() for ksm pages") once discovered. I dithered over the best thing to do about that, and have ended up with a VM_BUG_ON_PAGE(PageKsm) in both vma_address() and vma_address_end(); though the only place in danger of using it on them was try_to_unmap_one(). Sidenote: vma_address() and vma_address_end() now use compound_nr() on a head page, instead of thp_size(): to make the right calculation on a hugetlbfs page, whether or not THPs are configured. try_to_unmap() is used on hugetlbfs pages, but perhaps the wrong calculation never mattered. Link: https://lkml.kernel.org/r/caf1c1a3-7cfb-7f8f-1beb-ba816e932825@google.com Fixes:a8fa41ad2f
("mm, rmap: check all VMAs that PTE-mapped THP can be part of") Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Jan Kara <jack@suse.cz> Cc: Jue Wang <juew@google.com> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Naoya Horiguchi <naoya.horiguchi@nec.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Peter Xu <peterx@redhat.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Shakeel Butt <shakeelb@google.com> Cc: Wang Yugui <wangyugui@e16-tech.com> Cc: Yang Shi <shy828301@gmail.com> Cc: Zi Yan <ziy@nvidia.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
66be14a926
commit
37ffe9f4d7
@ -379,27 +379,52 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
|
||||
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
|
||||
|
||||
/*
|
||||
* At what user virtual address is page expected in @vma?
|
||||
* At what user virtual address is page expected in vma?
|
||||
* Returns -EFAULT if all of the page is outside the range of vma.
|
||||
* If page is a compound head, the entire compound page is considered.
|
||||
*/
|
||||
static inline unsigned long
|
||||
__vma_address(struct page *page, struct vm_area_struct *vma)
|
||||
{
|
||||
pgoff_t pgoff = page_to_pgoff(page);
|
||||
return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
vma_address(struct page *page, struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long start, end;
|
||||
pgoff_t pgoff;
|
||||
unsigned long address;
|
||||
|
||||
start = __vma_address(page, vma);
|
||||
end = start + thp_size(page) - PAGE_SIZE;
|
||||
VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
|
||||
pgoff = page_to_pgoff(page);
|
||||
if (pgoff >= vma->vm_pgoff) {
|
||||
address = vma->vm_start +
|
||||
((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
|
||||
/* Check for address beyond vma (or wrapped through 0?) */
|
||||
if (address < vma->vm_start || address >= vma->vm_end)
|
||||
address = -EFAULT;
|
||||
} else if (PageHead(page) &&
|
||||
pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) {
|
||||
/* Test above avoids possibility of wrap to 0 on 32-bit */
|
||||
address = vma->vm_start;
|
||||
} else {
|
||||
address = -EFAULT;
|
||||
}
|
||||
return address;
|
||||
}
|
||||
|
||||
/* page should be within @vma mapping range */
|
||||
VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
|
||||
/*
|
||||
* Then at what user virtual address will none of the page be found in vma?
|
||||
* Assumes that vma_address() already returned a good starting address.
|
||||
* If page is a compound head, the entire compound page is considered.
|
||||
*/
|
||||
static inline unsigned long
|
||||
vma_address_end(struct page *page, struct vm_area_struct *vma)
|
||||
{
|
||||
pgoff_t pgoff;
|
||||
unsigned long address;
|
||||
|
||||
return max(start, vma->vm_start);
|
||||
VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
|
||||
pgoff = page_to_pgoff(page) + compound_nr(page);
|
||||
address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
|
||||
/* Check for address beyond vma (or wrapped through 0?) */
|
||||
if (address < vma->vm_start || address > vma->vm_end)
|
||||
address = vma->vm_end;
|
||||
return address;
|
||||
}
|
||||
|
||||
static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
|
||||
|
@ -227,18 +227,18 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
|
||||
if (!map_pte(pvmw))
|
||||
goto next_pte;
|
||||
while (1) {
|
||||
unsigned long end;
|
||||
|
||||
if (check_pte(pvmw))
|
||||
return true;
|
||||
next_pte:
|
||||
/* Seek to next pte only makes sense for THP */
|
||||
if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
|
||||
return not_found(pvmw);
|
||||
end = vma_address_end(pvmw->page, pvmw->vma);
|
||||
do {
|
||||
pvmw->address += PAGE_SIZE;
|
||||
if (pvmw->address >= pvmw->vma->vm_end ||
|
||||
pvmw->address >=
|
||||
__vma_address(pvmw->page, pvmw->vma) +
|
||||
thp_size(pvmw->page))
|
||||
if (pvmw->address >= end)
|
||||
return not_found(pvmw);
|
||||
/* Did we cross page table boundary? */
|
||||
if (pvmw->address % PMD_SIZE == 0) {
|
||||
@ -276,14 +276,10 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
|
||||
.vma = vma,
|
||||
.flags = PVMW_SYNC,
|
||||
};
|
||||
unsigned long start, end;
|
||||
|
||||
start = __vma_address(page, vma);
|
||||
end = start + thp_size(page) - PAGE_SIZE;
|
||||
|
||||
if (unlikely(end < vma->vm_start || start >= vma->vm_end))
|
||||
pvmw.address = vma_address(page, vma);
|
||||
if (pvmw.address == -EFAULT)
|
||||
return 0;
|
||||
pvmw.address = max(start, vma->vm_start);
|
||||
if (!page_vma_mapped_walk(&pvmw))
|
||||
return 0;
|
||||
page_vma_mapped_walk_done(&pvmw);
|
||||
|
16
mm/rmap.c
16
mm/rmap.c
@ -700,7 +700,6 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
|
||||
*/
|
||||
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long address;
|
||||
if (PageAnon(page)) {
|
||||
struct anon_vma *page__anon_vma = page_anon_vma(page);
|
||||
/*
|
||||
@ -715,10 +714,8 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
|
||||
return -EFAULT;
|
||||
} else
|
||||
return -EFAULT;
|
||||
address = __vma_address(page, vma);
|
||||
if (unlikely(address < vma->vm_start || address >= vma->vm_end))
|
||||
return -EFAULT;
|
||||
return address;
|
||||
|
||||
return vma_address(page, vma);
|
||||
}
|
||||
|
||||
pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
|
||||
@ -912,7 +909,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
||||
*/
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
|
||||
0, vma, vma->vm_mm, address,
|
||||
min(vma->vm_end, address + page_size(page)));
|
||||
vma_address_end(page, vma));
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
||||
while (page_vma_mapped_walk(&pvmw)) {
|
||||
@ -1415,9 +1412,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
||||
* Note that the page can not be free in this function as call of
|
||||
* try_to_unmap() must hold a reference on the page.
|
||||
*/
|
||||
range.end = PageKsm(page) ?
|
||||
address + PAGE_SIZE : vma_address_end(page, vma);
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
|
||||
address,
|
||||
min(vma->vm_end, address + page_size(page)));
|
||||
address, range.end);
|
||||
if (PageHuge(page)) {
|
||||
/*
|
||||
* If sharing is possible, start and end will be adjusted
|
||||
@ -1869,6 +1867,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
|
||||
struct vm_area_struct *vma = avc->vma;
|
||||
unsigned long address = vma_address(page, vma);
|
||||
|
||||
VM_BUG_ON_VMA(address == -EFAULT, vma);
|
||||
cond_resched();
|
||||
|
||||
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
|
||||
@ -1923,6 +1922,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
|
||||
pgoff_start, pgoff_end) {
|
||||
unsigned long address = vma_address(page, vma);
|
||||
|
||||
VM_BUG_ON_VMA(address == -EFAULT, vma);
|
||||
cond_resched();
|
||||
|
||||
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
|
||||
|
Loading…
Reference in New Issue
Block a user