mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
50f5aa8a9b
BUG_ON() is a big hammer, and should be used _only_ if there is some major corruption that you cannot possibly recover from, making it imperative that the current process (and possibly the whole machine) be terminated with extreme prejudice. The trivial sanity check in the vmacache code is *not* such a fatal error. Recovering from it is absolutely trivial, and using BUG_ON() just makes it harder to debug for no actual advantage. To make matters worse, the placement of the BUG_ON() (only if the range check matched) actually makes it harder to hit the sanity check to begin with, so _if_ there is a bug (and we just got a report from Srivatsa Bhat that this can indeed trigger), it is harder to debug not just because the machine is possibly dead, but because we don't have better coverage. BUG_ON() must *die*. Maybe we should add a checkpatch warning for it, because it is simply just about the worst thing you can ever do if you hit some "this cannot happen" situation. Reported-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Davidlohr Bueso <davidlohr@hp.com> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
115 lines
2.6 KiB
C
115 lines
2.6 KiB
C
/*
|
|
* Copyright (C) 2014 Davidlohr Bueso.
|
|
*/
|
|
#include <linux/sched.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/vmacache.h>
|
|
|
|
/*
|
|
* Flush vma caches for threads that share a given mm.
|
|
*
|
|
* The operation is safe because the caller holds the mmap_sem
|
|
* exclusively and other threads accessing the vma cache will
|
|
* have mmap_sem held at least for read, so no extra locking
|
|
* is required to maintain the vma cache.
|
|
*/
|
|
void vmacache_flush_all(struct mm_struct *mm)
|
|
{
|
|
struct task_struct *g, *p;
|
|
|
|
rcu_read_lock();
|
|
for_each_process_thread(g, p) {
|
|
/*
|
|
* Only flush the vmacache pointers as the
|
|
* mm seqnum is already set and curr's will
|
|
* be set upon invalidation when the next
|
|
* lookup is done.
|
|
*/
|
|
if (mm == p->mm)
|
|
vmacache_flush(p);
|
|
}
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
/*
|
|
* This task may be accessing a foreign mm via (for example)
|
|
* get_user_pages()->find_vma(). The vmacache is task-local and this
|
|
* task's vmacache pertains to a different mm (ie, its own). There is
|
|
* nothing we can do here.
|
|
*
|
|
* Also handle the case where a kernel thread has adopted this mm via use_mm().
|
|
* That kernel thread's vmacache is not applicable to this mm.
|
|
*/
|
|
static bool vmacache_valid_mm(struct mm_struct *mm)
|
|
{
|
|
return current->mm == mm && !(current->flags & PF_KTHREAD);
|
|
}
|
|
|
|
void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
|
|
{
|
|
if (vmacache_valid_mm(newvma->vm_mm))
|
|
current->vmacache[VMACACHE_HASH(addr)] = newvma;
|
|
}
|
|
|
|
static bool vmacache_valid(struct mm_struct *mm)
|
|
{
|
|
struct task_struct *curr;
|
|
|
|
if (!vmacache_valid_mm(mm))
|
|
return false;
|
|
|
|
curr = current;
|
|
if (mm->vmacache_seqnum != curr->vmacache_seqnum) {
|
|
/*
|
|
* First attempt will always be invalid, initialize
|
|
* the new cache for this task here.
|
|
*/
|
|
curr->vmacache_seqnum = mm->vmacache_seqnum;
|
|
vmacache_flush(curr);
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
int i;
|
|
|
|
if (!vmacache_valid(mm))
|
|
return NULL;
|
|
|
|
for (i = 0; i < VMACACHE_SIZE; i++) {
|
|
struct vm_area_struct *vma = current->vmacache[i];
|
|
|
|
if (!vma)
|
|
continue;
|
|
if (WARN_ON_ONCE(vma->vm_mm != mm))
|
|
break;
|
|
if (vma->vm_start <= addr && vma->vm_end > addr)
|
|
return vma;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
#ifndef CONFIG_MMU
|
|
struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
|
|
unsigned long start,
|
|
unsigned long end)
|
|
{
|
|
int i;
|
|
|
|
if (!vmacache_valid(mm))
|
|
return NULL;
|
|
|
|
for (i = 0; i < VMACACHE_SIZE; i++) {
|
|
struct vm_area_struct *vma = current->vmacache[i];
|
|
|
|
if (vma && vma->vm_start == start && vma->vm_end == end)
|
|
return vma;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
#endif
|