mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 15:20:58 +07:00
Merge branch 'vm' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull munmap/truncate race fixes from Al Viro: "Fixes for racy use of unmap_vmas() on truncate-related codepaths" * 'vm' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: VM: make zap_page_range() callers that act on a single VMA use separate helper VM: make unmap_vmas() return void VM: don't bother with feeding upper limit to tlb_finish_mmu() in exit_mmap() VM: make zap_page_range() return void VM: can't go through the inner loop in unmap_vmas() more than once... VM: unmap_page_range() can return void
This commit is contained in:
commit
3a990a52f9
@ -893,9 +893,9 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
|
||||
|
||||
int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long size);
|
||||
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
|
||||
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long size, struct zap_details *);
|
||||
unsigned long unmap_vmas(struct mmu_gather *tlb,
|
||||
void unmap_vmas(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *start_vma, unsigned long start_addr,
|
||||
unsigned long end_addr, unsigned long *nr_accounted,
|
||||
struct zap_details *);
|
||||
|
133
mm/memory.c
133
mm/memory.c
@ -1282,10 +1282,10 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
|
||||
return addr;
|
||||
}
|
||||
|
||||
static unsigned long unmap_page_range(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long end,
|
||||
struct zap_details *details)
|
||||
static void unmap_page_range(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long end,
|
||||
struct zap_details *details)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
unsigned long next;
|
||||
@ -1305,8 +1305,47 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
tlb_end_vma(tlb, vma);
|
||||
mem_cgroup_uncharge_end();
|
||||
}
|
||||
|
||||
return addr;
|
||||
|
||||
static void unmap_single_vma(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma, unsigned long start_addr,
|
||||
unsigned long end_addr, unsigned long *nr_accounted,
|
||||
struct zap_details *details)
|
||||
{
|
||||
unsigned long start = max(vma->vm_start, start_addr);
|
||||
unsigned long end;
|
||||
|
||||
if (start >= vma->vm_end)
|
||||
return;
|
||||
end = min(vma->vm_end, end_addr);
|
||||
if (end <= vma->vm_start)
|
||||
return;
|
||||
|
||||
if (vma->vm_flags & VM_ACCOUNT)
|
||||
*nr_accounted += (end - start) >> PAGE_SHIFT;
|
||||
|
||||
if (unlikely(is_pfn_mapping(vma)))
|
||||
untrack_pfn_vma(vma, 0, 0);
|
||||
|
||||
if (start != end) {
|
||||
if (unlikely(is_vm_hugetlb_page(vma))) {
|
||||
/*
|
||||
* It is undesirable to test vma->vm_file as it
|
||||
* should be non-null for valid hugetlb area.
|
||||
* However, vm_file will be NULL in the error
|
||||
* cleanup path of do_mmap_pgoff. When
|
||||
* hugetlbfs ->mmap method fails,
|
||||
* do_mmap_pgoff() nullifies vma->vm_file
|
||||
* before calling this function to clean up.
|
||||
* Since no pte has actually been setup, it is
|
||||
* safe to do nothing in this case.
|
||||
*/
|
||||
if (vma->vm_file)
|
||||
unmap_hugepage_range(vma, start, end, NULL);
|
||||
} else
|
||||
unmap_page_range(tlb, vma, start, end, details);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1318,8 +1357,6 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
|
||||
* @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
|
||||
* @details: details of nonlinear truncation or shared cache invalidation
|
||||
*
|
||||
* Returns the end address of the unmapping (restart addr if interrupted).
|
||||
*
|
||||
* Unmap all pages in the vma list.
|
||||
*
|
||||
* Only addresses between `start' and `end' will be unmapped.
|
||||
@ -1331,55 +1368,18 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
|
||||
* ensure that any thus-far unmapped pages are flushed before unmap_vmas()
|
||||
* drops the lock and schedules.
|
||||
*/
|
||||
unsigned long unmap_vmas(struct mmu_gather *tlb,
|
||||
void unmap_vmas(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma, unsigned long start_addr,
|
||||
unsigned long end_addr, unsigned long *nr_accounted,
|
||||
struct zap_details *details)
|
||||
{
|
||||
unsigned long start = start_addr;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
|
||||
mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
|
||||
for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
|
||||
unsigned long end;
|
||||
|
||||
start = max(vma->vm_start, start_addr);
|
||||
if (start >= vma->vm_end)
|
||||
continue;
|
||||
end = min(vma->vm_end, end_addr);
|
||||
if (end <= vma->vm_start)
|
||||
continue;
|
||||
|
||||
if (vma->vm_flags & VM_ACCOUNT)
|
||||
*nr_accounted += (end - start) >> PAGE_SHIFT;
|
||||
|
||||
if (unlikely(is_pfn_mapping(vma)))
|
||||
untrack_pfn_vma(vma, 0, 0);
|
||||
|
||||
while (start != end) {
|
||||
if (unlikely(is_vm_hugetlb_page(vma))) {
|
||||
/*
|
||||
* It is undesirable to test vma->vm_file as it
|
||||
* should be non-null for valid hugetlb area.
|
||||
* However, vm_file will be NULL in the error
|
||||
* cleanup path of do_mmap_pgoff. When
|
||||
* hugetlbfs ->mmap method fails,
|
||||
* do_mmap_pgoff() nullifies vma->vm_file
|
||||
* before calling this function to clean up.
|
||||
* Since no pte has actually been setup, it is
|
||||
* safe to do nothing in this case.
|
||||
*/
|
||||
if (vma->vm_file)
|
||||
unmap_hugepage_range(vma, start, end, NULL);
|
||||
|
||||
start = end;
|
||||
} else
|
||||
start = unmap_page_range(tlb, vma, start, end, details);
|
||||
}
|
||||
}
|
||||
|
||||
for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
|
||||
unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted,
|
||||
details);
|
||||
mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
|
||||
return start; /* which is now the end (or restart) address */
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1388,8 +1388,10 @@ unsigned long unmap_vmas(struct mmu_gather *tlb,
|
||||
* @address: starting address of pages to zap
|
||||
* @size: number of bytes to zap
|
||||
* @details: details of nonlinear truncation or shared cache invalidation
|
||||
*
|
||||
* Caller must protect the VMA list
|
||||
*/
|
||||
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
|
||||
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long size, struct zap_details *details)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
@ -1400,9 +1402,34 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
|
||||
lru_add_drain();
|
||||
tlb_gather_mmu(&tlb, mm, 0);
|
||||
update_hiwater_rss(mm);
|
||||
end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
|
||||
unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
|
||||
tlb_finish_mmu(&tlb, address, end);
|
||||
}
|
||||
|
||||
/**
|
||||
* zap_page_range_single - remove user pages in a given range
|
||||
* @vma: vm_area_struct holding the applicable pages
|
||||
* @address: starting address of pages to zap
|
||||
* @size: number of bytes to zap
|
||||
* @details: details of nonlinear truncation or shared cache invalidation
|
||||
*
|
||||
* The range must fit into one VMA.
|
||||
*/
|
||||
static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long size, struct zap_details *details)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct mmu_gather tlb;
|
||||
unsigned long end = address + size;
|
||||
unsigned long nr_accounted = 0;
|
||||
|
||||
lru_add_drain();
|
||||
tlb_gather_mmu(&tlb, mm, 0);
|
||||
update_hiwater_rss(mm);
|
||||
mmu_notifier_invalidate_range_start(mm, address, end);
|
||||
unmap_single_vma(&tlb, vma, address, end, &nr_accounted, details);
|
||||
mmu_notifier_invalidate_range_end(mm, address, end);
|
||||
tlb_finish_mmu(&tlb, address, end);
|
||||
return end;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1423,7 +1450,7 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
|
||||
if (address < vma->vm_start || address + size > vma->vm_end ||
|
||||
!(vma->vm_flags & VM_PFNMAP))
|
||||
return -1;
|
||||
zap_page_range(vma, address, size, NULL);
|
||||
zap_page_range_single(vma, address, size, NULL);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zap_vma_ptes);
|
||||
@ -2770,7 +2797,7 @@ static void unmap_mapping_range_vma(struct vm_area_struct *vma,
|
||||
unsigned long start_addr, unsigned long end_addr,
|
||||
struct zap_details *details)
|
||||
{
|
||||
zap_page_range(vma, start_addr, end_addr - start_addr, details);
|
||||
zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
|
||||
}
|
||||
|
||||
static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
|
||||
|
@ -2237,7 +2237,6 @@ void exit_mmap(struct mm_struct *mm)
|
||||
struct mmu_gather tlb;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long nr_accounted = 0;
|
||||
unsigned long end;
|
||||
|
||||
/* mm's last user has gone, and its about to be pulled down */
|
||||
mmu_notifier_release(mm);
|
||||
@ -2262,11 +2261,11 @@ void exit_mmap(struct mm_struct *mm)
|
||||
tlb_gather_mmu(&tlb, mm, 1);
|
||||
/* update_hiwater_rss(mm) here? but nobody should be looking */
|
||||
/* Use -1 here to ensure all VMAs in the mm are unmapped */
|
||||
end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
|
||||
unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
|
||||
vm_unacct_memory(nr_accounted);
|
||||
|
||||
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
|
||||
tlb_finish_mmu(&tlb, 0, end);
|
||||
tlb_finish_mmu(&tlb, 0, -1);
|
||||
|
||||
/*
|
||||
* Walk the list again, actually closing and freeing it,
|
||||
|
Loading…
Reference in New Issue
Block a user