mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
e4dcad204d
When a process updates the RSS of a different process, the rss_stat tracepoint appears in the context of the process doing the update. This can confuse userspace that the RSS of process doing the update is updated, while in reality a different process's RSS was updated. This issue happens in reclaim paths such as with direct reclaim or background reclaim. This patch adds more information to the tracepoint about whether the mm being updated belongs to the current process's context (curr field). We also include a hash of the mm pointer so that the process who the mm belongs to can be uniquely identified (mm_id field). Also vsprintf.c is refactored a bit to allow reuse of hashing code. [akpm@linux-foundation.org: remove unused local `str'] [joelaf@google.com: inline call to ptr_to_hashval] Link: http://lore.kernel.org/r/20191113153816.14b95acd@gandalf.local.home Link: http://lkml.kernel.org/r/20191114164622.GC233237@google.com Link: http://lkml.kernel.org/r/20191106024452.81923-1-joel@joelfernandes.org Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> Reported-by: Ioannis Ilkos <ilkos@google.com> Acked-by: Petr Mladek <pmladek@suse.com> [lib/vsprintf.c] Cc: Tim Murray <timmurray@google.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Carmen Jackson <carmenjackson@google.com> Cc: Mayank Gupta <mayankgupta@google.com> Cc: Daniel Colascione <dancol@google.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Minchan Kim <minchan@kernel.org> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Jerome Glisse <jglisse@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
370 lines
8.3 KiB
C
370 lines
8.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#undef TRACE_SYSTEM
|
|
#define TRACE_SYSTEM kmem
|
|
|
|
#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
|
|
#define _TRACE_KMEM_H
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/tracepoint.h>
|
|
#include <trace/events/mmflags.h>
|
|
|
|
DECLARE_EVENT_CLASS(kmem_alloc,
|
|
|
|
TP_PROTO(unsigned long call_site,
|
|
const void *ptr,
|
|
size_t bytes_req,
|
|
size_t bytes_alloc,
|
|
gfp_t gfp_flags),
|
|
|
|
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
|
|
|
|
TP_STRUCT__entry(
|
|
__field( unsigned long, call_site )
|
|
__field( const void *, ptr )
|
|
__field( size_t, bytes_req )
|
|
__field( size_t, bytes_alloc )
|
|
__field( gfp_t, gfp_flags )
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->call_site = call_site;
|
|
__entry->ptr = ptr;
|
|
__entry->bytes_req = bytes_req;
|
|
__entry->bytes_alloc = bytes_alloc;
|
|
__entry->gfp_flags = gfp_flags;
|
|
),
|
|
|
|
TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
|
|
(void *)__entry->call_site,
|
|
__entry->ptr,
|
|
__entry->bytes_req,
|
|
__entry->bytes_alloc,
|
|
show_gfp_flags(__entry->gfp_flags))
|
|
);
|
|
|
|
DEFINE_EVENT(kmem_alloc, kmalloc,
|
|
|
|
TP_PROTO(unsigned long call_site, const void *ptr,
|
|
size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
|
|
|
|
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
|
|
);
|
|
|
|
DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
|
|
|
|
TP_PROTO(unsigned long call_site, const void *ptr,
|
|
size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
|
|
|
|
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
|
|
);
|
|
|
|
DECLARE_EVENT_CLASS(kmem_alloc_node,
|
|
|
|
TP_PROTO(unsigned long call_site,
|
|
const void *ptr,
|
|
size_t bytes_req,
|
|
size_t bytes_alloc,
|
|
gfp_t gfp_flags,
|
|
int node),
|
|
|
|
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
|
|
|
|
TP_STRUCT__entry(
|
|
__field( unsigned long, call_site )
|
|
__field( const void *, ptr )
|
|
__field( size_t, bytes_req )
|
|
__field( size_t, bytes_alloc )
|
|
__field( gfp_t, gfp_flags )
|
|
__field( int, node )
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->call_site = call_site;
|
|
__entry->ptr = ptr;
|
|
__entry->bytes_req = bytes_req;
|
|
__entry->bytes_alloc = bytes_alloc;
|
|
__entry->gfp_flags = gfp_flags;
|
|
__entry->node = node;
|
|
),
|
|
|
|
TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
|
|
__entry->call_site,
|
|
__entry->ptr,
|
|
__entry->bytes_req,
|
|
__entry->bytes_alloc,
|
|
show_gfp_flags(__entry->gfp_flags),
|
|
__entry->node)
|
|
);
|
|
|
|
DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
|
|
|
|
TP_PROTO(unsigned long call_site, const void *ptr,
|
|
size_t bytes_req, size_t bytes_alloc,
|
|
gfp_t gfp_flags, int node),
|
|
|
|
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
|
|
);
|
|
|
|
DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
|
|
|
|
TP_PROTO(unsigned long call_site, const void *ptr,
|
|
size_t bytes_req, size_t bytes_alloc,
|
|
gfp_t gfp_flags, int node),
|
|
|
|
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
|
|
);
|
|
|
|
DECLARE_EVENT_CLASS(kmem_free,
|
|
|
|
TP_PROTO(unsigned long call_site, const void *ptr),
|
|
|
|
TP_ARGS(call_site, ptr),
|
|
|
|
TP_STRUCT__entry(
|
|
__field( unsigned long, call_site )
|
|
__field( const void *, ptr )
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->call_site = call_site;
|
|
__entry->ptr = ptr;
|
|
),
|
|
|
|
TP_printk("call_site=%pS ptr=%p",
|
|
(void *)__entry->call_site, __entry->ptr)
|
|
);
|
|
|
|
DEFINE_EVENT(kmem_free, kfree,
|
|
|
|
TP_PROTO(unsigned long call_site, const void *ptr),
|
|
|
|
TP_ARGS(call_site, ptr)
|
|
);
|
|
|
|
DEFINE_EVENT(kmem_free, kmem_cache_free,
|
|
|
|
TP_PROTO(unsigned long call_site, const void *ptr),
|
|
|
|
TP_ARGS(call_site, ptr)
|
|
);
|
|
|
|
TRACE_EVENT(mm_page_free,
|
|
|
|
TP_PROTO(struct page *page, unsigned int order),
|
|
|
|
TP_ARGS(page, order),
|
|
|
|
TP_STRUCT__entry(
|
|
__field( unsigned long, pfn )
|
|
__field( unsigned int, order )
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->pfn = page_to_pfn(page);
|
|
__entry->order = order;
|
|
),
|
|
|
|
TP_printk("page=%p pfn=%lu order=%d",
|
|
pfn_to_page(__entry->pfn),
|
|
__entry->pfn,
|
|
__entry->order)
|
|
);
|
|
|
|
TRACE_EVENT(mm_page_free_batched,
|
|
|
|
TP_PROTO(struct page *page),
|
|
|
|
TP_ARGS(page),
|
|
|
|
TP_STRUCT__entry(
|
|
__field( unsigned long, pfn )
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->pfn = page_to_pfn(page);
|
|
),
|
|
|
|
TP_printk("page=%p pfn=%lu order=0",
|
|
pfn_to_page(__entry->pfn),
|
|
__entry->pfn)
|
|
);
|
|
|
|
TRACE_EVENT(mm_page_alloc,
|
|
|
|
TP_PROTO(struct page *page, unsigned int order,
|
|
gfp_t gfp_flags, int migratetype),
|
|
|
|
TP_ARGS(page, order, gfp_flags, migratetype),
|
|
|
|
TP_STRUCT__entry(
|
|
__field( unsigned long, pfn )
|
|
__field( unsigned int, order )
|
|
__field( gfp_t, gfp_flags )
|
|
__field( int, migratetype )
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->pfn = page ? page_to_pfn(page) : -1UL;
|
|
__entry->order = order;
|
|
__entry->gfp_flags = gfp_flags;
|
|
__entry->migratetype = migratetype;
|
|
),
|
|
|
|
TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
|
|
__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
|
|
__entry->pfn != -1UL ? __entry->pfn : 0,
|
|
__entry->order,
|
|
__entry->migratetype,
|
|
show_gfp_flags(__entry->gfp_flags))
|
|
);
|
|
|
|
DECLARE_EVENT_CLASS(mm_page,
|
|
|
|
TP_PROTO(struct page *page, unsigned int order, int migratetype),
|
|
|
|
TP_ARGS(page, order, migratetype),
|
|
|
|
TP_STRUCT__entry(
|
|
__field( unsigned long, pfn )
|
|
__field( unsigned int, order )
|
|
__field( int, migratetype )
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->pfn = page ? page_to_pfn(page) : -1UL;
|
|
__entry->order = order;
|
|
__entry->migratetype = migratetype;
|
|
),
|
|
|
|
TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
|
|
__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
|
|
__entry->pfn != -1UL ? __entry->pfn : 0,
|
|
__entry->order,
|
|
__entry->migratetype,
|
|
__entry->order == 0)
|
|
);
|
|
|
|
DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
|
|
|
|
TP_PROTO(struct page *page, unsigned int order, int migratetype),
|
|
|
|
TP_ARGS(page, order, migratetype)
|
|
);
|
|
|
|
TRACE_EVENT(mm_page_pcpu_drain,
|
|
|
|
TP_PROTO(struct page *page, unsigned int order, int migratetype),
|
|
|
|
TP_ARGS(page, order, migratetype),
|
|
|
|
TP_STRUCT__entry(
|
|
__field( unsigned long, pfn )
|
|
__field( unsigned int, order )
|
|
__field( int, migratetype )
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->pfn = page ? page_to_pfn(page) : -1UL;
|
|
__entry->order = order;
|
|
__entry->migratetype = migratetype;
|
|
),
|
|
|
|
TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
|
|
pfn_to_page(__entry->pfn), __entry->pfn,
|
|
__entry->order, __entry->migratetype)
|
|
);
|
|
|
|
TRACE_EVENT(mm_page_alloc_extfrag,
|
|
|
|
TP_PROTO(struct page *page,
|
|
int alloc_order, int fallback_order,
|
|
int alloc_migratetype, int fallback_migratetype),
|
|
|
|
TP_ARGS(page,
|
|
alloc_order, fallback_order,
|
|
alloc_migratetype, fallback_migratetype),
|
|
|
|
TP_STRUCT__entry(
|
|
__field( unsigned long, pfn )
|
|
__field( int, alloc_order )
|
|
__field( int, fallback_order )
|
|
__field( int, alloc_migratetype )
|
|
__field( int, fallback_migratetype )
|
|
__field( int, change_ownership )
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->pfn = page_to_pfn(page);
|
|
__entry->alloc_order = alloc_order;
|
|
__entry->fallback_order = fallback_order;
|
|
__entry->alloc_migratetype = alloc_migratetype;
|
|
__entry->fallback_migratetype = fallback_migratetype;
|
|
__entry->change_ownership = (alloc_migratetype ==
|
|
get_pageblock_migratetype(page));
|
|
),
|
|
|
|
TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
|
|
pfn_to_page(__entry->pfn),
|
|
__entry->pfn,
|
|
__entry->alloc_order,
|
|
__entry->fallback_order,
|
|
pageblock_order,
|
|
__entry->alloc_migratetype,
|
|
__entry->fallback_migratetype,
|
|
__entry->fallback_order < pageblock_order,
|
|
__entry->change_ownership)
|
|
);
|
|
|
|
/*
|
|
* Required for uniquely and securely identifying mm in rss_stat tracepoint.
|
|
*/
|
|
#ifndef __PTR_TO_HASHVAL
|
|
static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr)
|
|
{
|
|
int ret;
|
|
unsigned long hashval;
|
|
|
|
ret = ptr_to_hashval(ptr, &hashval);
|
|
if (ret)
|
|
return 0;
|
|
|
|
/* The hashed value is only 32-bit */
|
|
return (unsigned int)hashval;
|
|
}
|
|
#define __PTR_TO_HASHVAL
|
|
#endif
|
|
|
|
TRACE_EVENT(rss_stat,
|
|
|
|
TP_PROTO(struct mm_struct *mm,
|
|
int member,
|
|
long count),
|
|
|
|
TP_ARGS(mm, member, count),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(unsigned int, mm_id)
|
|
__field(unsigned int, curr)
|
|
__field(int, member)
|
|
__field(long, size)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->mm_id = mm_ptr_to_hash(mm);
|
|
__entry->curr = !!(current->mm == mm);
|
|
__entry->member = member;
|
|
__entry->size = (count << PAGE_SHIFT);
|
|
),
|
|
|
|
TP_printk("mm_id=%u curr=%d member=%d size=%ldB",
|
|
__entry->mm_id,
|
|
__entry->curr,
|
|
__entry->member,
|
|
__entry->size)
|
|
);
|
|
#endif /* _TRACE_KMEM_H */
|
|
|
|
/* This part must be outside protection */
|
|
#include <trace/define_trace.h>
|