mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 06:40:54 +07:00
mm: replace vma_lock_anon_vma with anon_vma_lock_read/write
Sequence vma_lock_anon_vma() - vma_unlock_anon_vma() isn't safe if anon_vma appeared between lock and unlock. We have to check anon_vma first or call anon_vma_prepare() to be sure that it's here. There are only few users of these legacy helpers. Let's get rid of them. This patch fixes anon_vma lock imbalance in validate_mm(). Write lock isn't required here, read lock is enough. And reorders expand_downwards/expand_upwards: security_mmap_addr() and wrapping-around check don't have to be under anon vma lock. Link: https://lkml.kernel.org/r/CACT4Y+Y908EjM2z=706dv4rV6dWtxTLK9nFg9_7DhRMLppBo2g@mail.gmail.com Signed-off-by: Konstantin Khlebnikov <koct9i@gmail.com> Reported-by: Dmitry Vyukov <dvyukov@google.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
c95a51807b
commit
12352d3cae
@ -109,20 +109,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma)
|
||||
__put_anon_vma(anon_vma);
|
||||
}
|
||||
|
||||
static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
|
||||
{
|
||||
struct anon_vma *anon_vma = vma->anon_vma;
|
||||
if (anon_vma)
|
||||
down_write(&anon_vma->root->rwsem);
|
||||
}
|
||||
|
||||
static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
|
||||
{
|
||||
struct anon_vma *anon_vma = vma->anon_vma;
|
||||
if (anon_vma)
|
||||
up_write(&anon_vma->root->rwsem);
|
||||
}
|
||||
|
||||
static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
|
||||
{
|
||||
down_write(&anon_vma->root->rwsem);
|
||||
|
55
mm/mmap.c
55
mm/mmap.c
@ -459,12 +459,16 @@ static void validate_mm(struct mm_struct *mm)
|
||||
struct vm_area_struct *vma = mm->mmap;
|
||||
|
||||
while (vma) {
|
||||
struct anon_vma *anon_vma = vma->anon_vma;
|
||||
struct anon_vma_chain *avc;
|
||||
|
||||
vma_lock_anon_vma(vma);
|
||||
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
|
||||
anon_vma_interval_tree_verify(avc);
|
||||
vma_unlock_anon_vma(vma);
|
||||
if (anon_vma) {
|
||||
anon_vma_lock_read(anon_vma);
|
||||
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
|
||||
anon_vma_interval_tree_verify(avc);
|
||||
anon_vma_unlock_read(anon_vma);
|
||||
}
|
||||
|
||||
highest_address = vma->vm_end;
|
||||
vma = vma->vm_next;
|
||||
i++;
|
||||
@ -2145,32 +2149,27 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
|
||||
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
int error;
|
||||
int error = 0;
|
||||
|
||||
if (!(vma->vm_flags & VM_GROWSUP))
|
||||
return -EFAULT;
|
||||
|
||||
/*
|
||||
* We must make sure the anon_vma is allocated
|
||||
* so that the anon_vma locking is not a noop.
|
||||
*/
|
||||
/* Guard against wrapping around to address 0. */
|
||||
if (address < PAGE_ALIGN(address+4))
|
||||
address = PAGE_ALIGN(address+4);
|
||||
else
|
||||
return -ENOMEM;
|
||||
|
||||
/* We must make sure the anon_vma is allocated. */
|
||||
if (unlikely(anon_vma_prepare(vma)))
|
||||
return -ENOMEM;
|
||||
vma_lock_anon_vma(vma);
|
||||
|
||||
/*
|
||||
* vma->vm_start/vm_end cannot change under us because the caller
|
||||
* is required to hold the mmap_sem in read mode. We need the
|
||||
* anon_vma lock to serialize against concurrent expand_stacks.
|
||||
* Also guard against wrapping around to address 0.
|
||||
*/
|
||||
if (address < PAGE_ALIGN(address+4))
|
||||
address = PAGE_ALIGN(address+4);
|
||||
else {
|
||||
vma_unlock_anon_vma(vma);
|
||||
return -ENOMEM;
|
||||
}
|
||||
error = 0;
|
||||
anon_vma_lock_write(vma->anon_vma);
|
||||
|
||||
/* Somebody else might have raced and expanded it already */
|
||||
if (address > vma->vm_end) {
|
||||
@ -2188,7 +2187,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
||||
* updates, but we only hold a shared mmap_sem
|
||||
* lock here, so we need to protect against
|
||||
* concurrent vma expansions.
|
||||
* vma_lock_anon_vma() doesn't help here, as
|
||||
* anon_vma_lock_write() doesn't help here, as
|
||||
* we don't guarantee that all growable vmas
|
||||
* in a mm share the same root anon vma.
|
||||
* So, we reuse mm->page_table_lock to guard
|
||||
@ -2211,7 +2210,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
||||
}
|
||||
}
|
||||
}
|
||||
vma_unlock_anon_vma(vma);
|
||||
anon_vma_unlock_write(vma->anon_vma);
|
||||
khugepaged_enter_vma_merge(vma, vma->vm_flags);
|
||||
validate_mm(mm);
|
||||
return error;
|
||||
@ -2227,25 +2226,21 @@ int expand_downwards(struct vm_area_struct *vma,
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* We must make sure the anon_vma is allocated
|
||||
* so that the anon_vma locking is not a noop.
|
||||
*/
|
||||
if (unlikely(anon_vma_prepare(vma)))
|
||||
return -ENOMEM;
|
||||
|
||||
address &= PAGE_MASK;
|
||||
error = security_mmap_addr(address);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
vma_lock_anon_vma(vma);
|
||||
/* We must make sure the anon_vma is allocated. */
|
||||
if (unlikely(anon_vma_prepare(vma)))
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* vma->vm_start/vm_end cannot change under us because the caller
|
||||
* is required to hold the mmap_sem in read mode. We need the
|
||||
* anon_vma lock to serialize against concurrent expand_stacks.
|
||||
*/
|
||||
anon_vma_lock_write(vma->anon_vma);
|
||||
|
||||
/* Somebody else might have raced and expanded it already */
|
||||
if (address < vma->vm_start) {
|
||||
@ -2263,7 +2258,7 @@ int expand_downwards(struct vm_area_struct *vma,
|
||||
* updates, but we only hold a shared mmap_sem
|
||||
* lock here, so we need to protect against
|
||||
* concurrent vma expansions.
|
||||
* vma_lock_anon_vma() doesn't help here, as
|
||||
* anon_vma_lock_write() doesn't help here, as
|
||||
* we don't guarantee that all growable vmas
|
||||
* in a mm share the same root anon vma.
|
||||
* So, we reuse mm->page_table_lock to guard
|
||||
@ -2284,7 +2279,7 @@ int expand_downwards(struct vm_area_struct *vma,
|
||||
}
|
||||
}
|
||||
}
|
||||
vma_unlock_anon_vma(vma);
|
||||
anon_vma_unlock_write(vma->anon_vma);
|
||||
khugepaged_enter_vma_merge(vma, vma->vm_flags);
|
||||
validate_mm(mm);
|
||||
return error;
|
||||
|
Loading…
Reference in New Issue
Block a user