mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 14:21:55 +07:00
108a7ac448
page_ref_freeze and page_ref_unfreeze are designed to be used as a pair, wrapping a critical section where struct pages can be modified without having to worry about consistency for a concurrent fast-GUP. Whilst page_ref_freeze has full barrier semantics due to its use of atomic_cmpxchg, page_ref_unfreeze is implemented using atomic_set, which doesn't provide any barrier semantics and allows the operation to be reordered with respect to page modifications in the critical section. This patch ensures that page_ref_unfreeze is ordered after any critical section updates, by invoking smp_mb() prior to the atomic_set. Link: http://lkml.kernel.org/r/1497349722-6731-3-git-send-email-will.deacon@arm.com Signed-off-by: Will Deacon <will.deacon@arm.com> Acked-by: Steve Capper <steve.capper@arm.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
184 lines
5.0 KiB
C
184 lines
5.0 KiB
C
#ifndef _LINUX_PAGE_REF_H
|
|
#define _LINUX_PAGE_REF_H
|
|
|
|
#include <linux/atomic.h>
|
|
#include <linux/mm_types.h>
|
|
#include <linux/page-flags.h>
|
|
#include <linux/tracepoint-defs.h>
|
|
|
|
extern struct tracepoint __tracepoint_page_ref_set;
|
|
extern struct tracepoint __tracepoint_page_ref_mod;
|
|
extern struct tracepoint __tracepoint_page_ref_mod_and_test;
|
|
extern struct tracepoint __tracepoint_page_ref_mod_and_return;
|
|
extern struct tracepoint __tracepoint_page_ref_mod_unless;
|
|
extern struct tracepoint __tracepoint_page_ref_freeze;
|
|
extern struct tracepoint __tracepoint_page_ref_unfreeze;
|
|
|
|
#ifdef CONFIG_DEBUG_PAGE_REF
|
|
|
|
/*
|
|
* Ideally we would want to use the trace_<tracepoint>_enabled() helper
|
|
* functions. But due to include header file issues, that is not
|
|
* feasible. Instead we have to open code the static key functions.
|
|
*
|
|
* See trace_##name##_enabled(void) in include/linux/tracepoint.h
|
|
*/
|
|
#define page_ref_tracepoint_active(t) static_key_false(&(t).key)
|
|
|
|
extern void __page_ref_set(struct page *page, int v);
|
|
extern void __page_ref_mod(struct page *page, int v);
|
|
extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
|
|
extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
|
|
extern void __page_ref_mod_unless(struct page *page, int v, int u);
|
|
extern void __page_ref_freeze(struct page *page, int v, int ret);
|
|
extern void __page_ref_unfreeze(struct page *page, int v);
|
|
|
|
#else
|
|
|
|
#define page_ref_tracepoint_active(t) false
|
|
|
|
static inline void __page_ref_set(struct page *page, int v)
|
|
{
|
|
}
|
|
static inline void __page_ref_mod(struct page *page, int v)
|
|
{
|
|
}
|
|
static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
|
|
{
|
|
}
|
|
static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
|
|
{
|
|
}
|
|
static inline void __page_ref_mod_unless(struct page *page, int v, int u)
|
|
{
|
|
}
|
|
static inline void __page_ref_freeze(struct page *page, int v, int ret)
|
|
{
|
|
}
|
|
static inline void __page_ref_unfreeze(struct page *page, int v)
|
|
{
|
|
}
|
|
|
|
#endif
|
|
|
|
static inline int page_ref_count(struct page *page)
|
|
{
|
|
return atomic_read(&page->_refcount);
|
|
}
|
|
|
|
static inline int page_count(struct page *page)
|
|
{
|
|
return atomic_read(&compound_head(page)->_refcount);
|
|
}
|
|
|
|
static inline void set_page_count(struct page *page, int v)
|
|
{
|
|
atomic_set(&page->_refcount, v);
|
|
if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
|
|
__page_ref_set(page, v);
|
|
}
|
|
|
|
/*
|
|
* Setup the page count before being freed into the page allocator for
|
|
* the first time (boot or memory hotplug)
|
|
*/
|
|
static inline void init_page_count(struct page *page)
|
|
{
|
|
set_page_count(page, 1);
|
|
}
|
|
|
|
static inline void page_ref_add(struct page *page, int nr)
|
|
{
|
|
atomic_add(nr, &page->_refcount);
|
|
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
|
|
__page_ref_mod(page, nr);
|
|
}
|
|
|
|
static inline void page_ref_sub(struct page *page, int nr)
|
|
{
|
|
atomic_sub(nr, &page->_refcount);
|
|
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
|
|
__page_ref_mod(page, -nr);
|
|
}
|
|
|
|
static inline void page_ref_inc(struct page *page)
|
|
{
|
|
atomic_inc(&page->_refcount);
|
|
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
|
|
__page_ref_mod(page, 1);
|
|
}
|
|
|
|
static inline void page_ref_dec(struct page *page)
|
|
{
|
|
atomic_dec(&page->_refcount);
|
|
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
|
|
__page_ref_mod(page, -1);
|
|
}
|
|
|
|
static inline int page_ref_sub_and_test(struct page *page, int nr)
|
|
{
|
|
int ret = atomic_sub_and_test(nr, &page->_refcount);
|
|
|
|
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
|
|
__page_ref_mod_and_test(page, -nr, ret);
|
|
return ret;
|
|
}
|
|
|
|
static inline int page_ref_inc_return(struct page *page)
|
|
{
|
|
int ret = atomic_inc_return(&page->_refcount);
|
|
|
|
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
|
|
__page_ref_mod_and_return(page, 1, ret);
|
|
return ret;
|
|
}
|
|
|
|
static inline int page_ref_dec_and_test(struct page *page)
|
|
{
|
|
int ret = atomic_dec_and_test(&page->_refcount);
|
|
|
|
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
|
|
__page_ref_mod_and_test(page, -1, ret);
|
|
return ret;
|
|
}
|
|
|
|
static inline int page_ref_dec_return(struct page *page)
|
|
{
|
|
int ret = atomic_dec_return(&page->_refcount);
|
|
|
|
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
|
|
__page_ref_mod_and_return(page, -1, ret);
|
|
return ret;
|
|
}
|
|
|
|
static inline int page_ref_add_unless(struct page *page, int nr, int u)
|
|
{
|
|
int ret = atomic_add_unless(&page->_refcount, nr, u);
|
|
|
|
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
|
|
__page_ref_mod_unless(page, nr, ret);
|
|
return ret;
|
|
}
|
|
|
|
static inline int page_ref_freeze(struct page *page, int count)
|
|
{
|
|
int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
|
|
|
|
if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
|
|
__page_ref_freeze(page, count, ret);
|
|
return ret;
|
|
}
|
|
|
|
static inline void page_ref_unfreeze(struct page *page, int count)
|
|
{
|
|
VM_BUG_ON_PAGE(page_count(page) != 0, page);
|
|
VM_BUG_ON(count == 0);
|
|
|
|
smp_mb();
|
|
atomic_set(&page->_refcount, count);
|
|
if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
|
|
__page_ref_unfreeze(page, count);
|
|
}
|
|
|
|
#endif
|