mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 03:25:20 +07:00
e8ba2906f6
Commite5adfc3e7e
("perf map: Synthesize maps only for thread group leader") changed the recording side so that we no longer get mmap events for threads other than the thread group leader (when synthesising these events for threads which exist before perf is started). When a file recorded after this change is loaded, the lack of mmap records mean that unwinding is not set up for any other threads. This can be seen in a simple record/report scenario: perf record --call-graph=dwarf -t $TID perf report If $TID is a process ID then the report will show call graphs, but if $TID is a secondary thread the output is as if --call-graph=none was specified. Following the rationale in that commit, move the libunwind fields into struct map_groups and update the libunwind functions to take this instead of the struct thread. This is only required for unwind__finish_access which must now be called from map_groups__delete and the others are changed for symmetry. Note that unwind__get_entries keeps the thread argument since it is required for symbol lookup and the libdw unwind provider uses the thread ID. Signed-off-by: John Keeping <john@metanate.com> Reviewed-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Fixes:e5adfc3e7e
("perf map: Synthesize maps only for thread group leader") Link: http://lkml.kernel.org/r/20190815100146.28842-2-john@metanate.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
87 lines
2.3 KiB
C
87 lines
2.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __UNWIND_H
|
|
#define __UNWIND_H
|
|
|
|
#include <linux/compiler.h>
|
|
#include <linux/types.h>
|
|
|
|
struct map;
|
|
struct map_groups;
|
|
struct perf_sample;
|
|
struct symbol;
|
|
struct thread;
|
|
|
|
struct unwind_entry {
|
|
struct map *map;
|
|
struct symbol *sym;
|
|
u64 ip;
|
|
};
|
|
|
|
typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg);
|
|
|
|
struct unwind_libunwind_ops {
|
|
int (*prepare_access)(struct map_groups *mg);
|
|
void (*flush_access)(struct map_groups *mg);
|
|
void (*finish_access)(struct map_groups *mg);
|
|
int (*get_entries)(unwind_entry_cb_t cb, void *arg,
|
|
struct thread *thread,
|
|
struct perf_sample *data, int max_stack);
|
|
};
|
|
|
|
#ifdef HAVE_DWARF_UNWIND_SUPPORT
|
|
int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
|
|
struct thread *thread,
|
|
struct perf_sample *data, int max_stack);
|
|
/* libunwind specific */
|
|
#ifdef HAVE_LIBUNWIND_SUPPORT
|
|
#ifndef LIBUNWIND__ARCH_REG_ID
|
|
#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__arch_reg_id(regnum)
|
|
#endif
|
|
|
|
#ifndef LIBUNWIND__ARCH_REG_SP
|
|
#define LIBUNWIND__ARCH_REG_SP PERF_REG_SP
|
|
#endif
|
|
|
|
#ifndef LIBUNWIND__ARCH_REG_IP
|
|
#define LIBUNWIND__ARCH_REG_IP PERF_REG_IP
|
|
#endif
|
|
|
|
int LIBUNWIND__ARCH_REG_ID(int regnum);
|
|
int unwind__prepare_access(struct map_groups *mg, struct map *map,
|
|
bool *initialized);
|
|
void unwind__flush_access(struct map_groups *mg);
|
|
void unwind__finish_access(struct map_groups *mg);
|
|
#else
|
|
static inline int unwind__prepare_access(struct map_groups *mg __maybe_unused,
|
|
struct map *map __maybe_unused,
|
|
bool *initialized __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void unwind__flush_access(struct map_groups *mg __maybe_unused) {}
|
|
static inline void unwind__finish_access(struct map_groups *mg __maybe_unused) {}
|
|
#endif
|
|
#else
|
|
static inline int
|
|
unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
|
|
void *arg __maybe_unused,
|
|
struct thread *thread __maybe_unused,
|
|
struct perf_sample *data __maybe_unused,
|
|
int max_stack __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int unwind__prepare_access(struct map_groups *mg __maybe_unused,
|
|
struct map *map __maybe_unused,
|
|
bool *initialized __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void unwind__flush_access(struct map_groups *mg __maybe_unused) {}
|
|
static inline void unwind__finish_access(struct map_groups *mg __maybe_unused) {}
|
|
#endif /* HAVE_DWARF_UNWIND_SUPPORT */
|
|
#endif /* __UNWIND_H */
|