mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 19:20:50 +07:00
f808c13fd3
Allow interval trees to quickly check for overlaps to avoid unnecesary tree lookups in interval_tree_iter_first(). As of this patch, all interval tree flavors will require using a 'rb_root_cached' such that we can have the leftmost node easily available. While most users will make use of this feature, those with special functions (in addition to the generic insert, delete, search calls) will avoid using the cached option as they can do funky things with insertions -- for example, vma_interval_tree_insert_after(). [jglisse@redhat.com: fix deadlock from typo vm_lock_anon_vma()] Link: http://lkml.kernel.org/r/20170808225719.20723-1-jglisse@redhat.com Link: http://lkml.kernel.org/r/20170719014603.19029-12-dave@stgolabs.net Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Acked-by: Christian König <christian.koenig@amd.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Doug Ledford <dledford@redhat.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Cc: David Airlie <airlied@linux.ie> Cc: Jason Wang <jasowang@redhat.com> Cc: Christian Benvenuti <benve@cisco.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
113 lines
3.2 KiB
C
113 lines
3.2 KiB
C
/*
|
|
* mm/interval_tree.c - interval tree for mapping->i_mmap
|
|
*
|
|
* Copyright (C) 2012, Michel Lespinasse <walken@google.com>
|
|
*
|
|
* This file is released under the GPL v2.
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/rmap.h>
|
|
#include <linux/interval_tree_generic.h>
|
|
|
|
static inline unsigned long vma_start_pgoff(struct vm_area_struct *v)
|
|
{
|
|
return v->vm_pgoff;
|
|
}
|
|
|
|
static inline unsigned long vma_last_pgoff(struct vm_area_struct *v)
|
|
{
|
|
return v->vm_pgoff + ((v->vm_end - v->vm_start) >> PAGE_SHIFT) - 1;
|
|
}
|
|
|
|
INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb,
|
|
unsigned long, shared.rb_subtree_last,
|
|
vma_start_pgoff, vma_last_pgoff,, vma_interval_tree)
|
|
|
|
/* Insert node immediately after prev in the interval tree */
|
|
void vma_interval_tree_insert_after(struct vm_area_struct *node,
|
|
struct vm_area_struct *prev,
|
|
struct rb_root_cached *root)
|
|
{
|
|
struct rb_node **link;
|
|
struct vm_area_struct *parent;
|
|
unsigned long last = vma_last_pgoff(node);
|
|
|
|
VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node);
|
|
|
|
if (!prev->shared.rb.rb_right) {
|
|
parent = prev;
|
|
link = &prev->shared.rb.rb_right;
|
|
} else {
|
|
parent = rb_entry(prev->shared.rb.rb_right,
|
|
struct vm_area_struct, shared.rb);
|
|
if (parent->shared.rb_subtree_last < last)
|
|
parent->shared.rb_subtree_last = last;
|
|
while (parent->shared.rb.rb_left) {
|
|
parent = rb_entry(parent->shared.rb.rb_left,
|
|
struct vm_area_struct, shared.rb);
|
|
if (parent->shared.rb_subtree_last < last)
|
|
parent->shared.rb_subtree_last = last;
|
|
}
|
|
link = &parent->shared.rb.rb_left;
|
|
}
|
|
|
|
node->shared.rb_subtree_last = last;
|
|
rb_link_node(&node->shared.rb, &parent->shared.rb, link);
|
|
rb_insert_augmented(&node->shared.rb, &root->rb_root,
|
|
&vma_interval_tree_augment);
|
|
}
|
|
|
|
static inline unsigned long avc_start_pgoff(struct anon_vma_chain *avc)
|
|
{
|
|
return vma_start_pgoff(avc->vma);
|
|
}
|
|
|
|
static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc)
|
|
{
|
|
return vma_last_pgoff(avc->vma);
|
|
}
|
|
|
|
INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
|
|
avc_start_pgoff, avc_last_pgoff,
|
|
static inline, __anon_vma_interval_tree)
|
|
|
|
void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
|
|
struct rb_root_cached *root)
|
|
{
|
|
#ifdef CONFIG_DEBUG_VM_RB
|
|
node->cached_vma_start = avc_start_pgoff(node);
|
|
node->cached_vma_last = avc_last_pgoff(node);
|
|
#endif
|
|
__anon_vma_interval_tree_insert(node, root);
|
|
}
|
|
|
|
void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
|
|
struct rb_root_cached *root)
|
|
{
|
|
__anon_vma_interval_tree_remove(node, root);
|
|
}
|
|
|
|
struct anon_vma_chain *
|
|
anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
|
|
unsigned long first, unsigned long last)
|
|
{
|
|
return __anon_vma_interval_tree_iter_first(root, first, last);
|
|
}
|
|
|
|
struct anon_vma_chain *
|
|
anon_vma_interval_tree_iter_next(struct anon_vma_chain *node,
|
|
unsigned long first, unsigned long last)
|
|
{
|
|
return __anon_vma_interval_tree_iter_next(node, first, last);
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_VM_RB
|
|
void anon_vma_interval_tree_verify(struct anon_vma_chain *node)
|
|
{
|
|
WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node));
|
|
WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node));
|
|
}
|
|
#endif
|