linux_dsm_epyc7002/tools/perf/util/annotate.c

2890 lines
69 KiB
C
Raw Normal View History

/*
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
*
* Parts came from builtin-annotate.c, see those files for further
* copyright notes.
*
* Released under the GPL v2. (and only v2, not any later version)
*/
#include <errno.h>
#include <inttypes.h>
#include "util.h"
#include "ui/ui.h"
#include "sort.h"
#include "build-id.h"
#include "color.h"
#include "config.h"
#include "cache.h"
#include "symbol.h"
#include "units.h"
#include "debug.h"
#include "annotate.h"
#include "evsel.h"
#include "evlist.h"
perf annotate: Add branch stack / basic block I wanted to know the hottest path through a function and figured the branch-stack (LBR) information should be able to help out with that. The below uses the branch-stack to create basic blocks and generate statistics from them. from to branch_i * ----> * | | block v * ----> * from to branch_i+1 The blocks are broken down into non-overlapping ranges, while tracking if the start of each range is an entry point and/or the end of a range is a branch. Each block iterates all ranges it covers (while splitting where required to exactly match the block) and increments the 'coverage' count. For the range including the branch we increment the taken counter, as well as the pred counter if flags.predicted. Using these number we can find if an instruction: - had coverage; given by: br->coverage / br->sym->max_coverage This metric ensures each symbol has a 100% spot, which reflects the observation that each symbol must have a most covered/hottest block. - is a branch target: br->is_target && br->start == add - for targets, how much of a branch's coverages comes from it: target->entry / branch->coverage - is a branch: br->is_branch && br->end == addr - for branches, how often it was taken: br->taken / br->coverage after all, all execution that didn't take the branch would have incremented the coverage and continued onward to a later branch. - for branches, how often it was predicted: br->pred / br->taken The coverage percentage is used to color the address and asm sections; for low (<1%) coverage we use NORMAL (uncolored), indicating that these instructions are not 'important'. For high coverage (>75%) we color the address RED. For each branch, we add an asm comment after the instruction with information on how often it was taken and predicted. Output looks like (sans color, which does loose a lot of the information :/) $ perf record --branch-filter u,any -e cycles:p ./branches 27 $ perf annotate branches Percent | Source code & Disassembly of branches for cycles:pu (217 samples) --------------------------------------------------------------------------------- : branches(): 0.00 : 40057a: push %rbp 0.00 : 40057b: mov %rsp,%rbp 0.00 : 40057e: sub $0x20,%rsp 0.00 : 400582: mov %rdi,-0x18(%rbp) 0.00 : 400586: mov %rsi,-0x20(%rbp) 0.00 : 40058a: mov -0x18(%rbp),%rax 0.00 : 40058e: mov %rax,-0x10(%rbp) 0.00 : 400592: movq $0x0,-0x8(%rbp) 0.00 : 40059a: jmpq 400656 <branches+0xdc> 1.84 : 40059f: mov -0x10(%rbp),%rax # +100.00% 3.23 : 4005a3: and $0x1,%eax 1.84 : 4005a6: test %rax,%rax 0.00 : 4005a9: je 4005bf <branches+0x45> # -54.50% (p:42.00%) 0.46 : 4005ab: mov 0x200bbe(%rip),%rax # 601170 <acc> 12.90 : 4005b2: add $0x1,%rax 2.30 : 4005b6: mov %rax,0x200bb3(%rip) # 601170 <acc> 0.46 : 4005bd: jmp 4005d1 <branches+0x57> # -100.00% (p:100.00%) 0.92 : 4005bf: mov 0x200baa(%rip),%rax # 601170 <acc> # +49.54% 13.82 : 4005c6: sub $0x1,%rax 0.46 : 4005ca: mov %rax,0x200b9f(%rip) # 601170 <acc> 2.30 : 4005d1: mov -0x10(%rbp),%rax # +50.46% 0.46 : 4005d5: mov %rax,%rdi 0.46 : 4005d8: callq 400526 <lfsr> # -100.00% (p:100.00%) 0.00 : 4005dd: mov %rax,-0x10(%rbp) # +100.00% 0.92 : 4005e1: mov -0x18(%rbp),%rax 0.00 : 4005e5: and $0x1,%eax 0.00 : 4005e8: test %rax,%rax 0.00 : 4005eb: je 4005ff <branches+0x85> # -100.00% (p:100.00%) 0.00 : 4005ed: mov 0x200b7c(%rip),%rax # 601170 <acc> 0.00 : 4005f4: shr $0x2,%rax 0.00 : 4005f8: mov %rax,0x200b71(%rip) # 601170 <acc> 0.00 : 4005ff: mov -0x10(%rbp),%rax # +100.00% 7.37 : 400603: and $0x1,%eax 3.69 : 400606: test %rax,%rax 0.00 : 400609: jne 400612 <branches+0x98> # -59.25% (p:42.99%) 1.84 : 40060b: mov $0x1,%eax 14.29 : 400610: jmp 400617 <branches+0x9d> # -100.00% (p:100.00%) 1.38 : 400612: mov $0x0,%eax # +57.65% 10.14 : 400617: test %al,%al # +42.35% 0.00 : 400619: je 40062f <branches+0xb5> # -57.65% (p:100.00%) 0.46 : 40061b: mov 0x200b4e(%rip),%rax # 601170 <acc> 2.76 : 400622: sub $0x1,%rax 0.00 : 400626: mov %rax,0x200b43(%rip) # 601170 <acc> 0.46 : 40062d: jmp 400641 <branches+0xc7> # -100.00% (p:100.00%) 0.92 : 40062f: mov 0x200b3a(%rip),%rax # 601170 <acc> # +56.13% 2.30 : 400636: add $0x1,%rax 0.92 : 40063a: mov %rax,0x200b2f(%rip) # 601170 <acc> 0.92 : 400641: mov -0x10(%rbp),%rax # +43.87% 2.30 : 400645: mov %rax,%rdi 0.00 : 400648: callq 400526 <lfsr> # -100.00% (p:100.00%) 0.00 : 40064d: mov %rax,-0x10(%rbp) # +100.00% 1.84 : 400651: addq $0x1,-0x8(%rbp) 0.92 : 400656: mov -0x8(%rbp),%rax 5.07 : 40065a: cmp -0x20(%rbp),%rax 0.00 : 40065e: jb 40059f <branches+0x25> # -100.00% (p:100.00%) 0.00 : 400664: nop 0.00 : 400665: leaveq 0.00 : 400666: retq (Note: the --branch-filter u,any was used to avoid spurious target and branch points due to interrupts/faults, they show up as very small -/+ annotations on 'weird' locations) Committer note: Please take a look at: http://vger.kernel.org/~acme/perf/annotate_basic_blocks.png To see the colors. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: David Carrillo-Cisneros <davidcc@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Stephane Eranian <eranian@google.com> [ Moved sym->max_coverage to 'struct annotate', aka symbol__annotate(sym) ] Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-09-06 02:08:12 +07:00
#include "block-range.h"
#include "string2.h"
#include "arch/common.h"
#include <regex.h>
#include <pthread.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
/* FIXME: For the HE_COLORSET */
#include "ui/browser.h"
/*
* FIXME: Using the same values as slang.h,
* but that header may not be available everywhere
*/
#define LARROW_CHAR ((unsigned char)',')
#define RARROW_CHAR ((unsigned char)'+')
#define DARROW_CHAR ((unsigned char)'.')
#define UARROW_CHAR ((unsigned char)'-')
#include "sane_ctype.h"
struct annotation_options annotation__default_options = {
.use_offset = true,
.jump_arrows = true,
.annotate_src = true,
.offset_level = ANNOTATION__OFFSET_JUMP_TARGETS,
.percent_type = PERCENT_PERIOD_LOCAL,
};
static regex_t file_lineno;
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
static struct ins_ops *ins__find(struct arch *arch, const char *name);
perf annotate: Introduce alternative method of keeping instructions table Some arches may want to dynamically populate the table using regular expressions on the instruction names to associate them with a set of parsing/formatting/etc functions (struct ins_ops), so provide a fallback for when the ins__find() method fails. That fall back will be able to resize the arch->instructions, setting arch->nr_instructions appropriately, helper functions to associate an ins_ops to an instruction name, growing the arch->instructions if needed and resorting it are provided, all the arch specific callback needs to do is to decide if the missing instruction should be added to arch->instructions with a ins_ops association. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-auu13yradxf7g5dgtpnzt97a@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:37:08 +07:00
static void ins__sort(struct arch *arch);
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
static int disasm_line__parse(char *line, const char **namep, char **rawp);
struct arch {
const char *name;
struct ins *instructions;
size_t nr_instructions;
perf annotate: Introduce alternative method of keeping instructions table Some arches may want to dynamically populate the table using regular expressions on the instruction names to associate them with a set of parsing/formatting/etc functions (struct ins_ops), so provide a fallback for when the ins__find() method fails. That fall back will be able to resize the arch->instructions, setting arch->nr_instructions appropriately, helper functions to associate an ins_ops to an instruction name, growing the arch->instructions if needed and resorting it are provided, all the arch specific callback needs to do is to decide if the missing instruction should be added to arch->instructions with a ins_ops association. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-auu13yradxf7g5dgtpnzt97a@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:37:08 +07:00
size_t nr_instructions_allocated;
struct ins_ops *(*associate_instruction_ops)(struct arch *arch, const char *name);
bool sorted_instructions;
bool initialized;
void *priv;
perf annotate: Check for fused instructions Macro fusion merges two instructions to a single micro-op. Intel core platform performs this hardware optimization under limited circumstances. For example, CMP + JCC can be "fused" and executed /retired together. While with sampling this can result in the sample sometimes being on the JCC and sometimes on the CMP. So for the fused instruction pair, they could be considered together. On Nehalem, fused instruction pairs: cmp/test + jcc. On other new CPU: cmp/test/add/sub/and/inc/dec + jcc. This patch adds an x86-specific function which checks if 2 instructions are in a "fused" pair. For non-x86 arch, the function is just NULL. Changelog: v4: Move the CPU model checking to symbol__disassemble and save the CPU family/model in arch structure. It avoids checking every time when jump arrow printed. v3: Add checking for Nehalem (CMP, TEST). For other newer Intel CPUs just check it by default (CMP, TEST, ADD, SUB, AND, INC, DEC). v2: Remove the original weak function. Arnaldo points out that doing it as a weak function that will be overridden by the host arch doesn't work. So now it's implemented as an arch-specific function. Committer fix: Do not access evsel->evlist->env->cpuid, ->env can be null, introduce perf_evsel__env_cpuid(), just like perf_evsel__env_arch(), also used in this function call. The original patch was segfaulting 'perf top' + annotation. But this essentially disables this fused instructions augmentation in 'perf top', the right thing is to get the cpuid from the running kernel, left for a later patch tho. Signed-off-by: Yao Jin <yao.jin@linux.intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1499403995-19857-2-git-send-email-yao.jin@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-07-07 12:06:34 +07:00
unsigned int model;
unsigned int family;
int (*init)(struct arch *arch, char *cpuid);
perf annotate: Check for fused instructions Macro fusion merges two instructions to a single micro-op. Intel core platform performs this hardware optimization under limited circumstances. For example, CMP + JCC can be "fused" and executed /retired together. While with sampling this can result in the sample sometimes being on the JCC and sometimes on the CMP. So for the fused instruction pair, they could be considered together. On Nehalem, fused instruction pairs: cmp/test + jcc. On other new CPU: cmp/test/add/sub/and/inc/dec + jcc. This patch adds an x86-specific function which checks if 2 instructions are in a "fused" pair. For non-x86 arch, the function is just NULL. Changelog: v4: Move the CPU model checking to symbol__disassemble and save the CPU family/model in arch structure. It avoids checking every time when jump arrow printed. v3: Add checking for Nehalem (CMP, TEST). For other newer Intel CPUs just check it by default (CMP, TEST, ADD, SUB, AND, INC, DEC). v2: Remove the original weak function. Arnaldo points out that doing it as a weak function that will be overridden by the host arch doesn't work. So now it's implemented as an arch-specific function. Committer fix: Do not access evsel->evlist->env->cpuid, ->env can be null, introduce perf_evsel__env_cpuid(), just like perf_evsel__env_arch(), also used in this function call. The original patch was segfaulting 'perf top' + annotation. But this essentially disables this fused instructions augmentation in 'perf top', the right thing is to get the cpuid from the running kernel, left for a later patch tho. Signed-off-by: Yao Jin <yao.jin@linux.intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1499403995-19857-2-git-send-email-yao.jin@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-07-07 12:06:34 +07:00
bool (*ins_is_fused)(struct arch *arch, const char *ins1,
const char *ins2);
struct {
char comment_char;
char skip_functions_char;
} objdump;
};
static struct ins_ops call_ops;
static struct ins_ops dec_ops;
static struct ins_ops jump_ops;
static struct ins_ops mov_ops;
static struct ins_ops nop_ops;
static struct ins_ops lock_ops;
static struct ins_ops ret_ops;
perf annotate: Introduce alternative method of keeping instructions table Some arches may want to dynamically populate the table using regular expressions on the instruction names to associate them with a set of parsing/formatting/etc functions (struct ins_ops), so provide a fallback for when the ins__find() method fails. That fall back will be able to resize the arch->instructions, setting arch->nr_instructions appropriately, helper functions to associate an ins_ops to an instruction name, growing the arch->instructions if needed and resorting it are provided, all the arch specific callback needs to do is to decide if the missing instruction should be added to arch->instructions with a ins_ops association. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-auu13yradxf7g5dgtpnzt97a@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:37:08 +07:00
static int arch__grow_instructions(struct arch *arch)
{
struct ins *new_instructions;
size_t new_nr_allocated;
if (arch->nr_instructions_allocated == 0 && arch->instructions)
goto grow_from_non_allocated_table;
new_nr_allocated = arch->nr_instructions_allocated + 128;
new_instructions = realloc(arch->instructions, new_nr_allocated * sizeof(struct ins));
if (new_instructions == NULL)
return -1;
out_update_instructions:
arch->instructions = new_instructions;
arch->nr_instructions_allocated = new_nr_allocated;
return 0;
grow_from_non_allocated_table:
new_nr_allocated = arch->nr_instructions + 128;
new_instructions = calloc(new_nr_allocated, sizeof(struct ins));
if (new_instructions == NULL)
return -1;
memcpy(new_instructions, arch->instructions, arch->nr_instructions);
goto out_update_instructions;
}
static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops)
perf annotate: Introduce alternative method of keeping instructions table Some arches may want to dynamically populate the table using regular expressions on the instruction names to associate them with a set of parsing/formatting/etc functions (struct ins_ops), so provide a fallback for when the ins__find() method fails. That fall back will be able to resize the arch->instructions, setting arch->nr_instructions appropriately, helper functions to associate an ins_ops to an instruction name, growing the arch->instructions if needed and resorting it are provided, all the arch specific callback needs to do is to decide if the missing instruction should be added to arch->instructions with a ins_ops association. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-auu13yradxf7g5dgtpnzt97a@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:37:08 +07:00
{
struct ins *ins;
if (arch->nr_instructions == arch->nr_instructions_allocated &&
arch__grow_instructions(arch))
return -1;
ins = &arch->instructions[arch->nr_instructions];
ins->name = strdup(name);
if (!ins->name)
return -1;
ins->ops = ops;
arch->nr_instructions++;
ins__sort(arch);
return 0;
}
#include "arch/arm/annotate/instructions.c"
perf annotate: AArch64 support This is a regex converted version from the original: https://lkml.org/lkml/2016/5/19/461 Add basic support to recognise AArch64 assembly. This allows perf to identify AArch64 instructions that branch to other parts within the same function, thereby properly annotating them. Rebased onto new cross-arch annotation bits: https://lkml.org/lkml/2016/11/25/546 Sample output: security_file_permission vmlinux 5.80 │ ← ret ▒ │70: ldr w0, [x21,#68] ▒ 4.44 │ ↓ tbnz d0 ▒ │ mov w0, #0x24 // #36 ▒ 1.37 │ ands w0, w22, w0 ▒ │ ↑ b.eq 60 ▒ 1.37 │ ↓ tbnz e4 ▒ │ mov w19, #0x20000 // #131072 ▒ 1.02 │ ↓ tbz ec ▒ │90:┌─→ldr x3, [x21,#24] ▒ 1.37 │ │ add x21, x21, #0x10 ▒ │ │ mov w2, w19 ▒ 1.02 │ │ mov x0, x21 ▒ │ │ mov x1, x3 ▒ 1.71 │ │ ldr x20, [x3,#48] ▒ │ │→ bl __fsnotify_parent ▒ 0.68 │ │↑ cbnz 60 ▒ │ │ mov x2, x21 ▒ 1.37 │ │ mov w1, w19 ▒ │ │ mov x0, x20 ▒ 0.68 │ │ mov w5, #0x0 // #0 ▒ │ │ mov x4, #0x0 // #0 ▒ 1.71 │ │ mov w3, #0x1 // #1 ▒ │ │→ bl fsnotify ▒ 1.37 │ │↑ b 60 ▒ │d0:│ mov w0, #0x0 // #0 ▒ │ │ ldp x19, x20, [sp,#16] ▒ │ │ ldp x21, x22, [sp,#32] ▒ │ │ ldp x29, x30, [sp],#48 ▒ │ │← ret ▒ │e4:│ mov w19, #0x10000 // #65536 ▒ │ └──b 90 ◆ │ec: brk #0x800 ▒ Press 'h' for help on key bindings Signed-off-by: Kim Phillips <kim.phillips@arm.com> Signed-off-by: Chris Ryder <chris.ryder@arm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will.deacon@arm.com> Link: http://lkml.kernel.org/r/20161130092344.012e18e3e623bea395162f95@arm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-30 22:23:44 +07:00
#include "arch/arm64/annotate/instructions.c"
#include "arch/x86/annotate/instructions.c"
perf annotate: Initial PowerPC support Support the PowerPC architecture using the ins_ops association method. Committer notes: Testing it with a perf.data file collected on a PowerPC machine and cross-annotated on a x86_64 workstation, using the associated vmlinux file: $ perf report -i perf.data.f22vm.powerdev --vmlinux vmlinux.powerpc .ktime_get vmlinux.powerpc │ clrldi r9,r28,63 8.57 │ ┌──bne e0 <- TUI cursor positioned here │54:│ lwsync 2.86 │ │ std r2,40(r1) │ │ ld r9,144(r31) │ │ ld r3,136(r31) │ │ ld r30,184(r31) │ │ ld r10,0(r9) │ │ mtctr r10 │ │ ld r2,8(r9) 8.57 │ │→ bctrl │ │ ld r2,40(r1) │ │ ld r10,160(r31) │ │ ld r5,152(r31) │ │ lwz r7,168(r31) │ │ ld r9,176(r31) 8.57 │ │ lwz r6,172(r31) │ │ lwsync 2.86 │ │ lwz r8,128(r31) │ │ cmpw cr7,r8,r28 2.86 │ │↑ bne 48 │ │ subf r10,r10,r3 │ │ mr r3,r29 │ │ and r10,r10,r5 2.86 │ │ mulld r10,r10,r7 │ │ add r9,r10,r9 │ │ srd r9,r9,r6 │ │ add r9,r9,r30 │ │ std r9,0(r29) │ │ addi r1,r1,144 │ │ ld r0,16(r1) │ │ ld r28,-32(r1) │ │ ld r29,-24(r1) │ │ ld r30,-16(r1) │ │ mtlr r0 │ │ ld r31,-8(r1) │ │← blr 5.71 │e0:└─→mr r1,r1 11.43 │ mr r2,r2 11.43 │ lwz r28,128(r31) Press 'h' for help on key bindings $ perf report -i perf.data.f22vm.powerdev --header-only # ======== # captured on: Thu Nov 24 12:40:38 2016 # hostname : pdev-f22-qemu # os release : 4.4.10-200.fc22.ppc64 # perf version : 4.9.rc1.g6298ce # arch : ppc64 # nrcpus online : 48 # nrcpus avail : 48 # cpudesc : POWER7 (architected), altivec supported # cpuid : 74,513 # total memory : 4158976 kB # cmdline : /home/ravi/Workspace/linux/tools/perf/perf record -a # event : name = cycles:ppp, , size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1 # HEADER_CPU_TOPOLOGY info available, use -I to display # HEADER_NUMA_TOPOLOGY info available, use -I to display # pmu mappings: cpu = 4, software = 1, tracepoint = 2, breakpoint = 5 # missing features: HEADER_TRACING_DATA HEADER_BRANCH_STACK HEADER_GROUP_DESC HEADER_AUXTRACE HEADER_STAT HEADER_CACHE # ======== # $ Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Kim Phillips <kim.phillips@arm.com> Link: http://lkml.kernel.org/n/tip-tbjnp40ddoxxl474uvhwi6g4@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-23 23:03:46 +07:00
#include "arch/powerpc/annotate/instructions.c"
#include "arch/s390/annotate/instructions.c"
#include "arch/sparc/annotate/instructions.c"
static struct arch architectures[] = {
{
.name = "arm",
.init = arm__annotate_init,
},
perf annotate: AArch64 support This is a regex converted version from the original: https://lkml.org/lkml/2016/5/19/461 Add basic support to recognise AArch64 assembly. This allows perf to identify AArch64 instructions that branch to other parts within the same function, thereby properly annotating them. Rebased onto new cross-arch annotation bits: https://lkml.org/lkml/2016/11/25/546 Sample output: security_file_permission vmlinux 5.80 │ ← ret ▒ │70: ldr w0, [x21,#68] ▒ 4.44 │ ↓ tbnz d0 ▒ │ mov w0, #0x24 // #36 ▒ 1.37 │ ands w0, w22, w0 ▒ │ ↑ b.eq 60 ▒ 1.37 │ ↓ tbnz e4 ▒ │ mov w19, #0x20000 // #131072 ▒ 1.02 │ ↓ tbz ec ▒ │90:┌─→ldr x3, [x21,#24] ▒ 1.37 │ │ add x21, x21, #0x10 ▒ │ │ mov w2, w19 ▒ 1.02 │ │ mov x0, x21 ▒ │ │ mov x1, x3 ▒ 1.71 │ │ ldr x20, [x3,#48] ▒ │ │→ bl __fsnotify_parent ▒ 0.68 │ │↑ cbnz 60 ▒ │ │ mov x2, x21 ▒ 1.37 │ │ mov w1, w19 ▒ │ │ mov x0, x20 ▒ 0.68 │ │ mov w5, #0x0 // #0 ▒ │ │ mov x4, #0x0 // #0 ▒ 1.71 │ │ mov w3, #0x1 // #1 ▒ │ │→ bl fsnotify ▒ 1.37 │ │↑ b 60 ▒ │d0:│ mov w0, #0x0 // #0 ▒ │ │ ldp x19, x20, [sp,#16] ▒ │ │ ldp x21, x22, [sp,#32] ▒ │ │ ldp x29, x30, [sp],#48 ▒ │ │← ret ▒ │e4:│ mov w19, #0x10000 // #65536 ▒ │ └──b 90 ◆ │ec: brk #0x800 ▒ Press 'h' for help on key bindings Signed-off-by: Kim Phillips <kim.phillips@arm.com> Signed-off-by: Chris Ryder <chris.ryder@arm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will.deacon@arm.com> Link: http://lkml.kernel.org/r/20161130092344.012e18e3e623bea395162f95@arm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-30 22:23:44 +07:00
{
.name = "arm64",
.init = arm64__annotate_init,
},
{
.name = "x86",
.init = x86__annotate_init,
.instructions = x86__instructions,
.nr_instructions = ARRAY_SIZE(x86__instructions),
perf annotate: Check for fused instructions Macro fusion merges two instructions to a single micro-op. Intel core platform performs this hardware optimization under limited circumstances. For example, CMP + JCC can be "fused" and executed /retired together. While with sampling this can result in the sample sometimes being on the JCC and sometimes on the CMP. So for the fused instruction pair, they could be considered together. On Nehalem, fused instruction pairs: cmp/test + jcc. On other new CPU: cmp/test/add/sub/and/inc/dec + jcc. This patch adds an x86-specific function which checks if 2 instructions are in a "fused" pair. For non-x86 arch, the function is just NULL. Changelog: v4: Move the CPU model checking to symbol__disassemble and save the CPU family/model in arch structure. It avoids checking every time when jump arrow printed. v3: Add checking for Nehalem (CMP, TEST). For other newer Intel CPUs just check it by default (CMP, TEST, ADD, SUB, AND, INC, DEC). v2: Remove the original weak function. Arnaldo points out that doing it as a weak function that will be overridden by the host arch doesn't work. So now it's implemented as an arch-specific function. Committer fix: Do not access evsel->evlist->env->cpuid, ->env can be null, introduce perf_evsel__env_cpuid(), just like perf_evsel__env_arch(), also used in this function call. The original patch was segfaulting 'perf top' + annotation. But this essentially disables this fused instructions augmentation in 'perf top', the right thing is to get the cpuid from the running kernel, left for a later patch tho. Signed-off-by: Yao Jin <yao.jin@linux.intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1499403995-19857-2-git-send-email-yao.jin@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-07-07 12:06:34 +07:00
.ins_is_fused = x86__ins_is_fused,
.objdump = {
.comment_char = '#',
},
},
perf annotate: Initial PowerPC support Support the PowerPC architecture using the ins_ops association method. Committer notes: Testing it with a perf.data file collected on a PowerPC machine and cross-annotated on a x86_64 workstation, using the associated vmlinux file: $ perf report -i perf.data.f22vm.powerdev --vmlinux vmlinux.powerpc .ktime_get vmlinux.powerpc │ clrldi r9,r28,63 8.57 │ ┌──bne e0 <- TUI cursor positioned here │54:│ lwsync 2.86 │ │ std r2,40(r1) │ │ ld r9,144(r31) │ │ ld r3,136(r31) │ │ ld r30,184(r31) │ │ ld r10,0(r9) │ │ mtctr r10 │ │ ld r2,8(r9) 8.57 │ │→ bctrl │ │ ld r2,40(r1) │ │ ld r10,160(r31) │ │ ld r5,152(r31) │ │ lwz r7,168(r31) │ │ ld r9,176(r31) 8.57 │ │ lwz r6,172(r31) │ │ lwsync 2.86 │ │ lwz r8,128(r31) │ │ cmpw cr7,r8,r28 2.86 │ │↑ bne 48 │ │ subf r10,r10,r3 │ │ mr r3,r29 │ │ and r10,r10,r5 2.86 │ │ mulld r10,r10,r7 │ │ add r9,r10,r9 │ │ srd r9,r9,r6 │ │ add r9,r9,r30 │ │ std r9,0(r29) │ │ addi r1,r1,144 │ │ ld r0,16(r1) │ │ ld r28,-32(r1) │ │ ld r29,-24(r1) │ │ ld r30,-16(r1) │ │ mtlr r0 │ │ ld r31,-8(r1) │ │← blr 5.71 │e0:└─→mr r1,r1 11.43 │ mr r2,r2 11.43 │ lwz r28,128(r31) Press 'h' for help on key bindings $ perf report -i perf.data.f22vm.powerdev --header-only # ======== # captured on: Thu Nov 24 12:40:38 2016 # hostname : pdev-f22-qemu # os release : 4.4.10-200.fc22.ppc64 # perf version : 4.9.rc1.g6298ce # arch : ppc64 # nrcpus online : 48 # nrcpus avail : 48 # cpudesc : POWER7 (architected), altivec supported # cpuid : 74,513 # total memory : 4158976 kB # cmdline : /home/ravi/Workspace/linux/tools/perf/perf record -a # event : name = cycles:ppp, , size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, disabled = 1, inherit = 1, mmap = 1, comm = 1, freq = 1, task = 1, precise_ip = 3, sample_id_all = 1, exclude_guest = 1, mmap2 = 1, comm_exec = 1 # HEADER_CPU_TOPOLOGY info available, use -I to display # HEADER_NUMA_TOPOLOGY info available, use -I to display # pmu mappings: cpu = 4, software = 1, tracepoint = 2, breakpoint = 5 # missing features: HEADER_TRACING_DATA HEADER_BRANCH_STACK HEADER_GROUP_DESC HEADER_AUXTRACE HEADER_STAT HEADER_CACHE # ======== # $ Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Kim Phillips <kim.phillips@arm.com> Link: http://lkml.kernel.org/n/tip-tbjnp40ddoxxl474uvhwi6g4@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-23 23:03:46 +07:00
{
.name = "powerpc",
.init = powerpc__annotate_init,
},
{
.name = "s390",
.init = s390__annotate_init,
.objdump = {
.comment_char = '#',
},
},
{
.name = "sparc",
.init = sparc__annotate_init,
.objdump = {
.comment_char = '#',
},
},
};
static void ins__delete(struct ins_operands *ops)
{
2015-03-06 01:27:28 +07:00
if (ops == NULL)
return;
zfree(&ops->source.raw);
zfree(&ops->source.name);
zfree(&ops->target.raw);
zfree(&ops->target.name);
}
static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
return scnprintf(bf, size, "%-6s %s", ins->name, ops->raw);
}
int ins__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
if (ins->ops->scnprintf)
return ins->ops->scnprintf(ins, bf, size, ops);
return ins__raw_scnprintf(ins, bf, size, ops);
}
perf annotate: Check for fused instructions Macro fusion merges two instructions to a single micro-op. Intel core platform performs this hardware optimization under limited circumstances. For example, CMP + JCC can be "fused" and executed /retired together. While with sampling this can result in the sample sometimes being on the JCC and sometimes on the CMP. So for the fused instruction pair, they could be considered together. On Nehalem, fused instruction pairs: cmp/test + jcc. On other new CPU: cmp/test/add/sub/and/inc/dec + jcc. This patch adds an x86-specific function which checks if 2 instructions are in a "fused" pair. For non-x86 arch, the function is just NULL. Changelog: v4: Move the CPU model checking to symbol__disassemble and save the CPU family/model in arch structure. It avoids checking every time when jump arrow printed. v3: Add checking for Nehalem (CMP, TEST). For other newer Intel CPUs just check it by default (CMP, TEST, ADD, SUB, AND, INC, DEC). v2: Remove the original weak function. Arnaldo points out that doing it as a weak function that will be overridden by the host arch doesn't work. So now it's implemented as an arch-specific function. Committer fix: Do not access evsel->evlist->env->cpuid, ->env can be null, introduce perf_evsel__env_cpuid(), just like perf_evsel__env_arch(), also used in this function call. The original patch was segfaulting 'perf top' + annotation. But this essentially disables this fused instructions augmentation in 'perf top', the right thing is to get the cpuid from the running kernel, left for a later patch tho. Signed-off-by: Yao Jin <yao.jin@linux.intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1499403995-19857-2-git-send-email-yao.jin@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-07-07 12:06:34 +07:00
bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2)
{
if (!arch || !arch->ins_is_fused)
return false;
return arch->ins_is_fused(arch, ins1, ins2);
}
static int call__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
{
char *endptr, *tok, *name;
struct map *map = ms->map;
struct addr_map_symbol target = {
.map = map,
};
ops->target.addr = strtoull(ops->raw, &endptr, 16);
name = strchr(endptr, '<');
if (name == NULL)
goto indirect_call;
name++;
if (arch->objdump.skip_functions_char &&
strchr(name, arch->objdump.skip_functions_char))
return -1;
tok = strchr(name, '>');
if (tok == NULL)
return -1;
*tok = '\0';
ops->target.name = strdup(name);
*tok = '>';
if (ops->target.name == NULL)
return -1;
find_target:
target.addr = map__objdump_2mem(map, ops->target.addr);
if (map_groups__find_ams(&target) == 0 &&
map__rip_2objdump(target.map, map->map_ip(target.map, target.addr)) == ops->target.addr)
ops->target.sym = target.sym;
return 0;
indirect_call:
tok = strchr(endptr, '*');
if (tok != NULL) {
endptr++;
/* Indirect call can use a non-rip register and offset: callq *0x8(%rbx).
* Do not parse such instruction. */
if (strstr(endptr, "(%r") == NULL)
ops->target.addr = strtoull(endptr, NULL, 16);
}
goto find_target;
}
static int call__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
if (ops->target.sym)
return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.sym->name);
if (ops->target.addr == 0)
return ins__raw_scnprintf(ins, bf, size, ops);
perf annotate: Use ops->target.name when available for unresolved call targets There is a bug where when using 'perf annotate timerqueue_add' the target for its only routine called with the 'callq' instruction, 'rb_insert_color', doesn't get resolved from its address when parsing that 'callq' instruction. That symbol resolution works when using 'perf report --tui' and then doing annotation for 'timerqueue_add' from there, the vmlinux dso->symbols rb_tree somehow gets in a state that we can't find that address, that is a bug that has to be further investigated. But since the objdump output has the function name, i.e. the raw objdump disassembled line looks like: So, before: # perf annotate timerqueue_add │ mov %rbx,%rdi │ mov %rbx,(%rdx) │ → callq *ffffffff8184dc80 │ mov 0x8(%rbp),%rdx │ test %rdx,%rdx │ ↓ je 67 # perf report │ mov %rbx,%rdi │ mov %rbx,(%rdx) │ → callq rb_insert_color │ mov 0x8(%rbp),%rdx │ test %rdx,%rdx │ ↓ je 67 And after both look the same: # perf annotate timerqueue_add │ mov %rbx,%rdi │ mov %rbx,(%rdx) │ → callq rb_insert_color │ mov 0x8(%rbp),%rdx │ test %rdx,%rdx │ ↓ je 67 From 'perf report' one can annotate and navigate to that 'rb_insert_color' function, but not directly from 'perf annotate timerqueue_add', that remains to be investigated and fixed. Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-nkktz6355rhqtq7o8atr8f8r@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-16 23:28:09 +07:00
if (ops->target.name)
return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);
return scnprintf(bf, size, "%-6s *%" PRIx64, ins->name, ops->target.addr);
}
static struct ins_ops call_ops = {
.parse = call__parse,
.scnprintf = call__scnprintf,
};
bool ins__is_call(const struct ins *ins)
{
return ins->ops == &call_ops || ins->ops == &s390_call_ops;
}
perf annotate: Fix parsing aarch64 branch instructions after objdump update Starting with binutils 2.28, aarch64 objdump adds comments to the disassembly output to show the alternative names of a condition code [1]. It is assumed that commas in objdump comments could occur in other arches now or in the future, so this fix is arch-independent. The fix could have been done with arm64 specific jump__parse and jump__scnprintf functions, but the jump__scnprintf instruction would have to have its comment character be a literal, since the scnprintf functions cannot receive a struct arch easily. This inconvenience also applies to the generic jump__scnprintf, which is why we add a raw_comment pointer to struct ins_operands, so the __parse function assigns it to be re-used by its corresponding __scnprintf function. Example differences in 'perf annotate --stdio2' output on an aarch64 perf.data file: BEFORE: → b.cs ffff200008133d1c <unwind_frame+0x18c> // b.hs, dffff7ecc47b AFTER : ↓ b.cs 18c BEFORE: → b.cc ffff200008d8d9cc <get_alloc_profile+0x31c> // b.lo, b.ul, dffff727295b AFTER : ↓ b.cc 31c The branch target labels 18c and 31c also now appear in the output: BEFORE: add x26, x29, #0x80 AFTER : 18c: add x26, x29, #0x80 BEFORE: add x21, x21, #0x8 AFTER : 31c: add x21, x21, #0x8 The Fixes: tag below is added so stable branches will get the update; it doesn't necessarily mean that commit was broken at the time, rather it didn't withstand the aarch64 objdump update. Tested no difference in output for sample x86_64, power arch perf.data files. [1] https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;a=commit;h=bb7eff5206e4795ac79c177a80fe9f4630aaf730 Signed-off-by: Kim Phillips <kim.phillips@arm.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Anton Blanchard <anton@samba.org> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: linux-arm-kernel@lists.infradead.org Fixes: b13bbeee5ee6 ("perf annotate: Fix branch instruction with multiple operands") Link: http://lkml.kernel.org/r/20180827125340.a2f7e291901d17cea05daba4@arm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-08-28 00:53:40 +07:00
/*
* Prevents from matching commas in the comment section, e.g.:
* ffff200008446e70: b.cs ffff2000084470f4 <generic_exec_single+0x314> // b.hs, b.nlast
*/
static inline const char *validate_comma(const char *c, struct ins_operands *ops)
{
if (ops->raw_comment && c > ops->raw_comment)
return NULL;
return c;
}
static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
{
struct map *map = ms->map;
struct symbol *sym = ms->sym;
struct addr_map_symbol target = {
.map = map,
};
const char *c = strchr(ops->raw, ',');
u64 start, end;
perf annotate: Fix parsing aarch64 branch instructions after objdump update Starting with binutils 2.28, aarch64 objdump adds comments to the disassembly output to show the alternative names of a condition code [1]. It is assumed that commas in objdump comments could occur in other arches now or in the future, so this fix is arch-independent. The fix could have been done with arm64 specific jump__parse and jump__scnprintf functions, but the jump__scnprintf instruction would have to have its comment character be a literal, since the scnprintf functions cannot receive a struct arch easily. This inconvenience also applies to the generic jump__scnprintf, which is why we add a raw_comment pointer to struct ins_operands, so the __parse function assigns it to be re-used by its corresponding __scnprintf function. Example differences in 'perf annotate --stdio2' output on an aarch64 perf.data file: BEFORE: → b.cs ffff200008133d1c <unwind_frame+0x18c> // b.hs, dffff7ecc47b AFTER : ↓ b.cs 18c BEFORE: → b.cc ffff200008d8d9cc <get_alloc_profile+0x31c> // b.lo, b.ul, dffff727295b AFTER : ↓ b.cc 31c The branch target labels 18c and 31c also now appear in the output: BEFORE: add x26, x29, #0x80 AFTER : 18c: add x26, x29, #0x80 BEFORE: add x21, x21, #0x8 AFTER : 31c: add x21, x21, #0x8 The Fixes: tag below is added so stable branches will get the update; it doesn't necessarily mean that commit was broken at the time, rather it didn't withstand the aarch64 objdump update. Tested no difference in output for sample x86_64, power arch perf.data files. [1] https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;a=commit;h=bb7eff5206e4795ac79c177a80fe9f4630aaf730 Signed-off-by: Kim Phillips <kim.phillips@arm.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Anton Blanchard <anton@samba.org> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: linux-arm-kernel@lists.infradead.org Fixes: b13bbeee5ee6 ("perf annotate: Fix branch instruction with multiple operands") Link: http://lkml.kernel.org/r/20180827125340.a2f7e291901d17cea05daba4@arm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-08-28 00:53:40 +07:00
ops->raw_comment = strchr(ops->raw, arch->objdump.comment_char);
c = validate_comma(c, ops);
/*
* Examples of lines to parse for the _cpp_lex_token@@Base
* function:
*
* 1159e6c: jne 115aa32 <_cpp_lex_token@@Base+0xf92>
* 1159e8b: jne c469be <cpp_named_operator2name@@Base+0xa72>
*
* The first is a jump to an offset inside the same function,
* the second is to another function, i.e. that 0xa72 is an
* offset in the cpp_named_operator2name@@base function.
*/
perf annotate: Fix branch instruction with multiple operands 'perf annotate' is dropping the cr* fields from branch instructions. Fix it by adding support to display branch instructions having multiple operands. Power Arch objdump of int_sqrt: 20.36 | c0000000004d2694: subf r10,r10,r3 | c0000000004d2698: v bgt cr6,c0000000004d26a0 <int_sqrt+0x40> 1.82 | c0000000004d269c: mr r3,r10 29.18 | c0000000004d26a0: mr r10,r8 | c0000000004d26a4: v bgt cr7,c0000000004d26ac <int_sqrt+0x4c> | c0000000004d26a8: mr r10,r7 Power Arch Before Patch: 20.36 | subf r10,r10,r3 | v bgt 40 1.82 | mr r3,r10 29.18 | 40: mr r10,r8 | v bgt 4c | mr r10,r7 Power Arch After patch: 20.36 | subf r10,r10,r3 | v bgt cr6,40 1.82 | mr r3,r10 29.18 | 40: mr r10,r8 | v bgt cr7,4c | mr r10,r7 Also support AArch64 conditional branch instructions, which can have up to three operands: Aarch64 Non-simplified (raw objdump) view: │ffff0000083cd11c: ↑ cbz w0, ffff0000083cd100 <security_fil▒ ... 4.44 │ffff000│083cd134: ↓ tbnz w0, #26, ffff0000083cd190 <securit▒ ... 1.37 │ffff000│083cd144: ↓ tbnz w22, #5, ffff0000083cd1a4 <securit▒ │ffff000│083cd148: mov w19, #0x20000 //▒ 1.02 │ffff000│083cd14c: ↓ tbz w22, #2, ffff0000083cd1ac <securit▒ ... 0.68 │ffff000└──3cd16c: ↑ cbnz w0, ffff0000083cd120 <security_fil▒ Aarch64 Simplified, before this patch: │ ↑ cbz 40 ... 4.44 │ │↓ tbnz w0, #26, ffff0000083cd190 <security_file_permiss▒ ... 1.37 │ │↓ tbnz w22, #5, ffff0000083cd1a4 <security_file_permiss▒ │ │ mov w19, #0x20000 // #131072 1.02 │ │↓ tbz w22, #2, ffff0000083cd1ac <security_file_permiss▒ ... 0.68 │ └──cbnz 60 the cbz operand is missing, and the tbz doesn't get simplified processing at all because the parsing function failed to match an address. Aarch64 Simplified, After this patch applied: │ ↑ cbz w0, 40 ... 4.44 │ │↓ tbnz w0, #26, d0 ... 1.37 │ │↓ tbnz w22, #5, e4 │ │ mov w19, #0x20000 // #131072 1.02 │ │↓ tbz w22, #2, ec ... 0.68 │ └──cbnz w0, 60 Originally-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Tested-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Reported-by: Anton Blanchard <anton@samba.org> Reported-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Kim Phillips <kim.phillips@arm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Taeung Song <treeze.taeung@gmail.com> Link: http://lkml.kernel.org/r/20170601092959.f60d98912e8a1b66fd1e4c0e@arm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-06-01 21:29:59 +07:00
/*
* skip over possible up to 2 operands to get to address, e.g.:
* tbnz w0, #26, ffff0000083cd190 <security_file_permission+0xd0>
*/
if (c++ != NULL) {
ops->target.addr = strtoull(c, NULL, 16);
perf annotate: Fix branch instruction with multiple operands 'perf annotate' is dropping the cr* fields from branch instructions. Fix it by adding support to display branch instructions having multiple operands. Power Arch objdump of int_sqrt: 20.36 | c0000000004d2694: subf r10,r10,r3 | c0000000004d2698: v bgt cr6,c0000000004d26a0 <int_sqrt+0x40> 1.82 | c0000000004d269c: mr r3,r10 29.18 | c0000000004d26a0: mr r10,r8 | c0000000004d26a4: v bgt cr7,c0000000004d26ac <int_sqrt+0x4c> | c0000000004d26a8: mr r10,r7 Power Arch Before Patch: 20.36 | subf r10,r10,r3 | v bgt 40 1.82 | mr r3,r10 29.18 | 40: mr r10,r8 | v bgt 4c | mr r10,r7 Power Arch After patch: 20.36 | subf r10,r10,r3 | v bgt cr6,40 1.82 | mr r3,r10 29.18 | 40: mr r10,r8 | v bgt cr7,4c | mr r10,r7 Also support AArch64 conditional branch instructions, which can have up to three operands: Aarch64 Non-simplified (raw objdump) view: │ffff0000083cd11c: ↑ cbz w0, ffff0000083cd100 <security_fil▒ ... 4.44 │ffff000│083cd134: ↓ tbnz w0, #26, ffff0000083cd190 <securit▒ ... 1.37 │ffff000│083cd144: ↓ tbnz w22, #5, ffff0000083cd1a4 <securit▒ │ffff000│083cd148: mov w19, #0x20000 //▒ 1.02 │ffff000│083cd14c: ↓ tbz w22, #2, ffff0000083cd1ac <securit▒ ... 0.68 │ffff000└──3cd16c: ↑ cbnz w0, ffff0000083cd120 <security_fil▒ Aarch64 Simplified, before this patch: │ ↑ cbz 40 ... 4.44 │ │↓ tbnz w0, #26, ffff0000083cd190 <security_file_permiss▒ ... 1.37 │ │↓ tbnz w22, #5, ffff0000083cd1a4 <security_file_permiss▒ │ │ mov w19, #0x20000 // #131072 1.02 │ │↓ tbz w22, #2, ffff0000083cd1ac <security_file_permiss▒ ... 0.68 │ └──cbnz 60 the cbz operand is missing, and the tbz doesn't get simplified processing at all because the parsing function failed to match an address. Aarch64 Simplified, After this patch applied: │ ↑ cbz w0, 40 ... 4.44 │ │↓ tbnz w0, #26, d0 ... 1.37 │ │↓ tbnz w22, #5, e4 │ │ mov w19, #0x20000 // #131072 1.02 │ │↓ tbz w22, #2, ec ... 0.68 │ └──cbnz w0, 60 Originally-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Tested-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Reported-by: Anton Blanchard <anton@samba.org> Reported-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Kim Phillips <kim.phillips@arm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Taeung Song <treeze.taeung@gmail.com> Link: http://lkml.kernel.org/r/20170601092959.f60d98912e8a1b66fd1e4c0e@arm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-06-01 21:29:59 +07:00
if (!ops->target.addr) {
c = strchr(c, ',');
perf annotate: Fix parsing aarch64 branch instructions after objdump update Starting with binutils 2.28, aarch64 objdump adds comments to the disassembly output to show the alternative names of a condition code [1]. It is assumed that commas in objdump comments could occur in other arches now or in the future, so this fix is arch-independent. The fix could have been done with arm64 specific jump__parse and jump__scnprintf functions, but the jump__scnprintf instruction would have to have its comment character be a literal, since the scnprintf functions cannot receive a struct arch easily. This inconvenience also applies to the generic jump__scnprintf, which is why we add a raw_comment pointer to struct ins_operands, so the __parse function assigns it to be re-used by its corresponding __scnprintf function. Example differences in 'perf annotate --stdio2' output on an aarch64 perf.data file: BEFORE: → b.cs ffff200008133d1c <unwind_frame+0x18c> // b.hs, dffff7ecc47b AFTER : ↓ b.cs 18c BEFORE: → b.cc ffff200008d8d9cc <get_alloc_profile+0x31c> // b.lo, b.ul, dffff727295b AFTER : ↓ b.cc 31c The branch target labels 18c and 31c also now appear in the output: BEFORE: add x26, x29, #0x80 AFTER : 18c: add x26, x29, #0x80 BEFORE: add x21, x21, #0x8 AFTER : 31c: add x21, x21, #0x8 The Fixes: tag below is added so stable branches will get the update; it doesn't necessarily mean that commit was broken at the time, rather it didn't withstand the aarch64 objdump update. Tested no difference in output for sample x86_64, power arch perf.data files. [1] https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;a=commit;h=bb7eff5206e4795ac79c177a80fe9f4630aaf730 Signed-off-by: Kim Phillips <kim.phillips@arm.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Anton Blanchard <anton@samba.org> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: linux-arm-kernel@lists.infradead.org Fixes: b13bbeee5ee6 ("perf annotate: Fix branch instruction with multiple operands") Link: http://lkml.kernel.org/r/20180827125340.a2f7e291901d17cea05daba4@arm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-08-28 00:53:40 +07:00
c = validate_comma(c, ops);
perf annotate: Fix branch instruction with multiple operands 'perf annotate' is dropping the cr* fields from branch instructions. Fix it by adding support to display branch instructions having multiple operands. Power Arch objdump of int_sqrt: 20.36 | c0000000004d2694: subf r10,r10,r3 | c0000000004d2698: v bgt cr6,c0000000004d26a0 <int_sqrt+0x40> 1.82 | c0000000004d269c: mr r3,r10 29.18 | c0000000004d26a0: mr r10,r8 | c0000000004d26a4: v bgt cr7,c0000000004d26ac <int_sqrt+0x4c> | c0000000004d26a8: mr r10,r7 Power Arch Before Patch: 20.36 | subf r10,r10,r3 | v bgt 40 1.82 | mr r3,r10 29.18 | 40: mr r10,r8 | v bgt 4c | mr r10,r7 Power Arch After patch: 20.36 | subf r10,r10,r3 | v bgt cr6,40 1.82 | mr r3,r10 29.18 | 40: mr r10,r8 | v bgt cr7,4c | mr r10,r7 Also support AArch64 conditional branch instructions, which can have up to three operands: Aarch64 Non-simplified (raw objdump) view: │ffff0000083cd11c: ↑ cbz w0, ffff0000083cd100 <security_fil▒ ... 4.44 │ffff000│083cd134: ↓ tbnz w0, #26, ffff0000083cd190 <securit▒ ... 1.37 │ffff000│083cd144: ↓ tbnz w22, #5, ffff0000083cd1a4 <securit▒ │ffff000│083cd148: mov w19, #0x20000 //▒ 1.02 │ffff000│083cd14c: ↓ tbz w22, #2, ffff0000083cd1ac <securit▒ ... 0.68 │ffff000└──3cd16c: ↑ cbnz w0, ffff0000083cd120 <security_fil▒ Aarch64 Simplified, before this patch: │ ↑ cbz 40 ... 4.44 │ │↓ tbnz w0, #26, ffff0000083cd190 <security_file_permiss▒ ... 1.37 │ │↓ tbnz w22, #5, ffff0000083cd1a4 <security_file_permiss▒ │ │ mov w19, #0x20000 // #131072 1.02 │ │↓ tbz w22, #2, ffff0000083cd1ac <security_file_permiss▒ ... 0.68 │ └──cbnz 60 the cbz operand is missing, and the tbz doesn't get simplified processing at all because the parsing function failed to match an address. Aarch64 Simplified, After this patch applied: │ ↑ cbz w0, 40 ... 4.44 │ │↓ tbnz w0, #26, d0 ... 1.37 │ │↓ tbnz w22, #5, e4 │ │ mov w19, #0x20000 // #131072 1.02 │ │↓ tbz w22, #2, ec ... 0.68 │ └──cbnz w0, 60 Originally-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Tested-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Reported-by: Anton Blanchard <anton@samba.org> Reported-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Kim Phillips <kim.phillips@arm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Taeung Song <treeze.taeung@gmail.com> Link: http://lkml.kernel.org/r/20170601092959.f60d98912e8a1b66fd1e4c0e@arm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-06-01 21:29:59 +07:00
if (c++ != NULL)
ops->target.addr = strtoull(c, NULL, 16);
}
} else {
ops->target.addr = strtoull(ops->raw, NULL, 16);
perf annotate: Fix branch instruction with multiple operands 'perf annotate' is dropping the cr* fields from branch instructions. Fix it by adding support to display branch instructions having multiple operands. Power Arch objdump of int_sqrt: 20.36 | c0000000004d2694: subf r10,r10,r3 | c0000000004d2698: v bgt cr6,c0000000004d26a0 <int_sqrt+0x40> 1.82 | c0000000004d269c: mr r3,r10 29.18 | c0000000004d26a0: mr r10,r8 | c0000000004d26a4: v bgt cr7,c0000000004d26ac <int_sqrt+0x4c> | c0000000004d26a8: mr r10,r7 Power Arch Before Patch: 20.36 | subf r10,r10,r3 | v bgt 40 1.82 | mr r3,r10 29.18 | 40: mr r10,r8 | v bgt 4c | mr r10,r7 Power Arch After patch: 20.36 | subf r10,r10,r3 | v bgt cr6,40 1.82 | mr r3,r10 29.18 | 40: mr r10,r8 | v bgt cr7,4c | mr r10,r7 Also support AArch64 conditional branch instructions, which can have up to three operands: Aarch64 Non-simplified (raw objdump) view: │ffff0000083cd11c: ↑ cbz w0, ffff0000083cd100 <security_fil▒ ... 4.44 │ffff000│083cd134: ↓ tbnz w0, #26, ffff0000083cd190 <securit▒ ... 1.37 │ffff000│083cd144: ↓ tbnz w22, #5, ffff0000083cd1a4 <securit▒ │ffff000│083cd148: mov w19, #0x20000 //▒ 1.02 │ffff000│083cd14c: ↓ tbz w22, #2, ffff0000083cd1ac <securit▒ ... 0.68 │ffff000└──3cd16c: ↑ cbnz w0, ffff0000083cd120 <security_fil▒ Aarch64 Simplified, before this patch: │ ↑ cbz 40 ... 4.44 │ │↓ tbnz w0, #26, ffff0000083cd190 <security_file_permiss▒ ... 1.37 │ │↓ tbnz w22, #5, ffff0000083cd1a4 <security_file_permiss▒ │ │ mov w19, #0x20000 // #131072 1.02 │ │↓ tbz w22, #2, ffff0000083cd1ac <security_file_permiss▒ ... 0.68 │ └──cbnz 60 the cbz operand is missing, and the tbz doesn't get simplified processing at all because the parsing function failed to match an address. Aarch64 Simplified, After this patch applied: │ ↑ cbz w0, 40 ... 4.44 │ │↓ tbnz w0, #26, d0 ... 1.37 │ │↓ tbnz w22, #5, e4 │ │ mov w19, #0x20000 // #131072 1.02 │ │↓ tbz w22, #2, ec ... 0.68 │ └──cbnz w0, 60 Originally-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Tested-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Reported-by: Anton Blanchard <anton@samba.org> Reported-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Kim Phillips <kim.phillips@arm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Taeung Song <treeze.taeung@gmail.com> Link: http://lkml.kernel.org/r/20170601092959.f60d98912e8a1b66fd1e4c0e@arm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-06-01 21:29:59 +07:00
}
target.addr = map__objdump_2mem(map, ops->target.addr);
start = map->unmap_ip(map, sym->start),
end = map->unmap_ip(map, sym->end);
ops->target.outside = target.addr < start || target.addr > end;
/*
* FIXME: things like this in _cpp_lex_token (gcc's cc1 program):
cpp_named_operator2name@@Base+0xa72
* Point to a place that is after the cpp_named_operator2name
* boundaries, i.e. in the ELF symbol table for cc1
* cpp_named_operator2name is marked as being 32-bytes long, but it in
* fact is much larger than that, so we seem to need a symbols__find()
* routine that looks for >= current->start and < next_symbol->start,
* possibly just for C++ objects?
*
* For now lets just make some progress by marking jumps to outside the
* current function as call like.
*
* Actual navigation will come next, with further understanding of how
* the symbol searching and disassembly should be done.
perf annotate: Support jumping from one function to another For instance: entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux 5.50 │ → callq do_syscall_64 14.56 │ mov 0x58(%rsp),%rcx 7.44 │ mov 0x80(%rsp),%r11 0.32 │ cmp %rcx,%r11 │ → jne swapgs_restore_regs_and_return_to_usermode 0.32 │ shl $0x10,%rcx 0.32 │ sar $0x10,%rcx 3.24 │ cmp %rcx,%r11 │ → jne swapgs_restore_regs_and_return_to_usermode 2.27 │ cmpq $0x33,0x88(%rsp) 1.29 │ → jne swapgs_restore_regs_and_return_to_usermode │ mov 0x30(%rsp),%r11 8.74 │ cmp %r11,0x90(%rsp) │ → jne swapgs_restore_regs_and_return_to_usermode 0.32 │ test $0x10100,%r11 │ → jne swapgs_restore_regs_and_return_to_usermode 0.32 │ cmpq $0x2b,0xa0(%rsp) 0.65 │ → jne swapgs_restore_regs_and_return_to_usermode It'll behave just like a "call" instruction, i.e. press enter or right arrow over one such line and the browser will navigate to the annotated disassembly of that function, which when exited, via left arrow or esc, will come back to the calling function. Now to support jump to an offset on a different function... Reported-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-78o508mqvr8inhj63ddtw7mo@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-23 20:50:35 +07:00
*/
if (map_groups__find_ams(&target) == 0 &&
map__rip_2objdump(target.map, map->map_ip(target.map, target.addr)) == ops->target.addr)
ops->target.sym = target.sym;
perf annotate: Use absolute addresses to calculate jump target offsets These types of jumps were confusing the annotate browser: entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux Percent│ffffffff81a00020: swapgs <SNIP> │ffffffff81a00128: ↓ jae ffffffff81a00139 <syscall_return_via_sysret+0x53> <SNIP> │ffffffff81a00155: → jmpq *0x825d2d(%rip) # ffffffff82225e88 <pv_cpu_ops+0xe8> I.e. the syscall_return_via_sysret function is actually "inside" the entry_SYSCALL_64 function, and the offsets in jumps like these (+0x53) are relative to syscall_return_via_sysret, not to syscall_return_via_sysret. Or this may be some artifact in how the assembler marks the start and end of a function and how this ends up in the ELF symtab for vmlinux, i.e. syscall_return_via_sysret() isn't "inside" entry_SYSCALL_64, but just right after it. From readelf -sw vmlinux: 80267: ffffffff81a00020 315 NOTYPE GLOBAL DEFAULT 1 entry_SYSCALL_64 316: ffffffff81a000e6 0 NOTYPE LOCAL DEFAULT 1 syscall_return_via_sysret 0xffffffff81a00020 + 315 > 0xffffffff81a000e6 So instead of looking for offsets after that last '+' sign, calculate offsets for jump target addresses that are inside the function being disassembled from the absolute address, 0xffffffff81a00139 in this case, subtracting from it the objdump address for the start of the function being disassembled, entry_SYSCALL_64() in this case. So, before this patch: entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux Percent│ pop %r10 │ pop %r9 │ pop %r8 │ pop %rax │ pop %rsi │ pop %rdx │ pop %rsi │ mov %rsp,%rdi │ mov %gs:0x5004,%rsp │ pushq 0x28(%rdi) │ pushq (%rdi) │ push %rax │ ↑ jmp 6c │ mov %cr3,%rdi │ ↑ jmp 62 │ mov %rdi,%rax │ and $0x7ff,%rdi │ bt %rdi,%gs:0x2219a │ ↑ jae 53 │ btr %rdi,%gs:0x2219a │ mov %rax,%rdi │ ↑ jmp 5b After: entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux 0.65 │ → jne swapgs_restore_regs_and_return_to_usermode │ pop %r10 │ pop %r9 │ pop %r8 │ pop %rax │ pop %rsi │ pop %rdx │ pop %rsi │ mov %rsp,%rdi │ mov %gs:0x5004,%rsp │ pushq 0x28(%rdi) │ pushq (%rdi) │ push %rax │ ↓ jmp 132 │ mov %cr3,%rdi │ ┌──jmp 128 │ │ mov %rdi,%rax │ │ and $0x7ff,%rdi │ │ bt %rdi,%gs:0x2219a │ │↓ jae 119 │ │ btr %rdi,%gs:0x2219a │ │ mov %rax,%rdi │ │↓ jmp 121 │119:│ mov %rax,%rdi │ │ bts $0x3f,%rdi │121:│ or $0x800,%rdi │128:└─→or $0x1000,%rdi │ mov %rdi,%cr3 │132: pop %rax │ pop %rdi │ pop %rsp │ → jmpq *0x825d2d(%rip) # ffffffff82225e88 <pv_cpu_ops+0xe8> With those at least navigating to the right destination, an improvement for these cases seems to be to be to somehow mark those inner functions, which in this case could be: entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux │syscall_return_via_sysret: │ pop %r15 │ pop %r14 │ pop %r13 │ pop %r12 │ pop %rbp │ pop %rbx │ pop %rsi │ pop %r10 │ pop %r9 │ pop %r8 │ pop %rax │ pop %rsi │ pop %rdx │ pop %rsi │ mov %rsp,%rdi │ mov %gs:0x5004,%rsp │ pushq 0x28(%rdi) │ pushq (%rdi) │ push %rax │ ↓ jmp 132 │ mov %cr3,%rdi │ ┌──jmp 128 │ │ mov %rdi,%rax │ │ and $0x7ff,%rdi │ │ bt %rdi,%gs:0x2219a │ │↓ jae 119 │ │ btr %rdi,%gs:0x2219a │ │ mov %rax,%rdi │ │↓ jmp 121 │119:│ mov %rax,%rdi │ │ bts $0x3f,%rdi │121:│ or $0x800,%rdi │128:└─→or $0x1000,%rdi │ mov %rdi,%cr3 │132: pop %rax │ pop %rdi │ pop %rsp │ → jmpq *0x825d2d(%rip) # ffffffff82225e88 <pv_cpu_ops+0xe8> This all gets much better viewed if one uses 'perf report --ignore-vmlinux' forcing the usage of /proc/kcore + /proc/kallsyms, when the above actually gets down to: # perf report --ignore-vmlinux ## do '/64', will show the function names containing '64', ## navigate to /entry_SYSCALL_64_after_hwframe.annotation, ## press 'A' to annotate, then 'P' to print that annotation ## to a file ## From another xterm (or see on screen, this 'P' thing is for ## getting rid of those right side scroll bars/spaces): # cat /entry_SYSCALL_64_after_hwframe.annotation entry_SYSCALL_64_after_hwframe() /proc/kcore Event: cycles:ppp Percent Disassembly of section load0: ffffffff9aa00044 <load0>: 11.97 push %rax 4.85 push %rdi push %rsi 2.59 push %rdx 2.27 push %rcx 0.32 pushq $0xffffffffffffffda 1.29 push %r8 xor %r8d,%r8d 1.62 push %r9 0.65 xor %r9d,%r9d 1.62 push %r10 xor %r10d,%r10d 5.50 push %r11 xor %r11d,%r11d 3.56 push %rbx xor %ebx,%ebx 4.21 push %rbp xor %ebp,%ebp 2.59 push %r12 0.97 xor %r12d,%r12d 3.24 push %r13 xor %r13d,%r13d 2.27 push %r14 xor %r14d,%r14d 4.21 push %r15 xor %r15d,%r15d 0.97 mov %rsp,%rdi 5.50 → callq do_syscall_64 14.56 mov 0x58(%rsp),%rcx 7.44 mov 0x80(%rsp),%r11 0.32 cmp %rcx,%r11 → jne swapgs_restore_regs_and_return_to_usermode 0.32 shl $0x10,%rcx 0.32 sar $0x10,%rcx 3.24 cmp %rcx,%r11 → jne swapgs_restore_regs_and_return_to_usermode 2.27 cmpq $0x33,0x88(%rsp) 1.29 → jne swapgs_restore_regs_and_return_to_usermode mov 0x30(%rsp),%r11 8.74 cmp %r11,0x90(%rsp) → jne swapgs_restore_regs_and_return_to_usermode 0.32 test $0x10100,%r11 → jne swapgs_restore_regs_and_return_to_usermode 0.32 cmpq $0x2b,0xa0(%rsp) 0.65 → jne swapgs_restore_regs_and_return_to_usermode I.e. using kallsyms makes the function start/end be done differently than using what is in the vmlinux ELF symtab and actually the hits goes to entry_SYSCALL_64_after_hwframe, which is a GLOBAL() after the start of entry_SYSCALL_64: ENTRY(entry_SYSCALL_64) UNWIND_HINT_EMPTY <SNIP> pushq $__USER_CS /* pt_regs->cs */ pushq %rcx /* pt_regs->ip */ GLOBAL(entry_SYSCALL_64_after_hwframe) pushq %rax /* pt_regs->orig_ax */ PUSH_AND_CLEAR_REGS rax=$-ENOSYS And it goes and ends at: cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ jne swapgs_restore_regs_and_return_to_usermode /* * We win! This label is here just for ease of understanding * perf profiles. Nothing jumps here. */ syscall_return_via_sysret: /* rcx and r11 are already restored (see code above) */ UNWIND_HINT_EMPTY POP_REGS pop_rdi=0 skip_r11rcx=1 So perhaps some people should really just play with '--ignore-vmlinux' to force /proc/kcore + kallsyms. One idea is to do both, i.e. have a vmlinux annotation and a kcore+kallsyms one, when possible, and even show the patched location, etc. Reported-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-r11knxv8voesav31xokjiuo6@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-23 22:26:39 +07:00
if (!ops->target.outside) {
ops->target.offset = target.addr - start;
perf annotate: Fix jump target outside of function address range If jump target is outside of function range, perf is not handling it correctly. Especially when target address is lesser than function start address, target offset will be negative. But, target address declared to be unsigned, converts negative number into 2's complement. See below example. Here target of 'jumpq' instruction at 34cf8 is 34ac0 which is lesser than function start address(34cf0). 34ac0 - 34cf0 = -0x230 = 0xfffffffffffffdd0 Objdump output: 0000000000034cf0 <__sigaction>: __GI___sigaction(): 34cf0: lea -0x20(%rdi),%eax 34cf3: cmp -bashx1,%eax 34cf6: jbe 34d00 <__sigaction+0x10> 34cf8: jmpq 34ac0 <__GI___libc_sigaction> 34cfd: nopl (%rax) 34d00: mov 0x386161(%rip),%rax # 3bae68 <_DYNAMIC+0x2e8> 34d07: movl -bashx16,%fs:(%rax) 34d0e: mov -bashxffffffff,%eax 34d13: retq perf annotate before applying patch: __GI___sigaction /usr/lib64/libc-2.22.so lea -0x20(%rdi),%eax cmp -bashx1,%eax v jbe 10 v jmpq fffffffffffffdd0 nop 10: mov _DYNAMIC+0x2e8,%rax movl -bashx16,%fs:(%rax) mov -bashxffffffff,%eax retq perf annotate after applying patch: __GI___sigaction /usr/lib64/libc-2.22.so lea -0x20(%rdi),%eax cmp -bashx1,%eax v jbe 10 ^ jmpq 34ac0 <__GI___libc_sigaction> nop 10: mov _DYNAMIC+0x2e8,%rax movl -bashx16,%fs:(%rax) mov -bashxffffffff,%eax retq Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: linuxppc-dev@lists.ozlabs.org Link: http://lkml.kernel.org/r/1480953407-7605-3-git-send-email-ravi.bangoria@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-12-05 22:56:47 +07:00
ops->target.offset_avail = true;
} else {
ops->target.offset_avail = false;
}
return 0;
}
static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
const char *c;
perf annotate: Fix branch instruction with multiple operands 'perf annotate' is dropping the cr* fields from branch instructions. Fix it by adding support to display branch instructions having multiple operands. Power Arch objdump of int_sqrt: 20.36 | c0000000004d2694: subf r10,r10,r3 | c0000000004d2698: v bgt cr6,c0000000004d26a0 <int_sqrt+0x40> 1.82 | c0000000004d269c: mr r3,r10 29.18 | c0000000004d26a0: mr r10,r8 | c0000000004d26a4: v bgt cr7,c0000000004d26ac <int_sqrt+0x4c> | c0000000004d26a8: mr r10,r7 Power Arch Before Patch: 20.36 | subf r10,r10,r3 | v bgt 40 1.82 | mr r3,r10 29.18 | 40: mr r10,r8 | v bgt 4c | mr r10,r7 Power Arch After patch: 20.36 | subf r10,r10,r3 | v bgt cr6,40 1.82 | mr r3,r10 29.18 | 40: mr r10,r8 | v bgt cr7,4c | mr r10,r7 Also support AArch64 conditional branch instructions, which can have up to three operands: Aarch64 Non-simplified (raw objdump) view: │ffff0000083cd11c: ↑ cbz w0, ffff0000083cd100 <security_fil▒ ... 4.44 │ffff000│083cd134: ↓ tbnz w0, #26, ffff0000083cd190 <securit▒ ... 1.37 │ffff000│083cd144: ↓ tbnz w22, #5, ffff0000083cd1a4 <securit▒ │ffff000│083cd148: mov w19, #0x20000 //▒ 1.02 │ffff000│083cd14c: ↓ tbz w22, #2, ffff0000083cd1ac <securit▒ ... 0.68 │ffff000└──3cd16c: ↑ cbnz w0, ffff0000083cd120 <security_fil▒ Aarch64 Simplified, before this patch: │ ↑ cbz 40 ... 4.44 │ │↓ tbnz w0, #26, ffff0000083cd190 <security_file_permiss▒ ... 1.37 │ │↓ tbnz w22, #5, ffff0000083cd1a4 <security_file_permiss▒ │ │ mov w19, #0x20000 // #131072 1.02 │ │↓ tbz w22, #2, ffff0000083cd1ac <security_file_permiss▒ ... 0.68 │ └──cbnz 60 the cbz operand is missing, and the tbz doesn't get simplified processing at all because the parsing function failed to match an address. Aarch64 Simplified, After this patch applied: │ ↑ cbz w0, 40 ... 4.44 │ │↓ tbnz w0, #26, d0 ... 1.37 │ │↓ tbnz w22, #5, e4 │ │ mov w19, #0x20000 // #131072 1.02 │ │↓ tbz w22, #2, ec ... 0.68 │ └──cbnz w0, 60 Originally-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Tested-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Reported-by: Anton Blanchard <anton@samba.org> Reported-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Kim Phillips <kim.phillips@arm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Taeung Song <treeze.taeung@gmail.com> Link: http://lkml.kernel.org/r/20170601092959.f60d98912e8a1b66fd1e4c0e@arm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-06-01 21:29:59 +07:00
perf annotate: Fix jump target outside of function address range If jump target is outside of function range, perf is not handling it correctly. Especially when target address is lesser than function start address, target offset will be negative. But, target address declared to be unsigned, converts negative number into 2's complement. See below example. Here target of 'jumpq' instruction at 34cf8 is 34ac0 which is lesser than function start address(34cf0). 34ac0 - 34cf0 = -0x230 = 0xfffffffffffffdd0 Objdump output: 0000000000034cf0 <__sigaction>: __GI___sigaction(): 34cf0: lea -0x20(%rdi),%eax 34cf3: cmp -bashx1,%eax 34cf6: jbe 34d00 <__sigaction+0x10> 34cf8: jmpq 34ac0 <__GI___libc_sigaction> 34cfd: nopl (%rax) 34d00: mov 0x386161(%rip),%rax # 3bae68 <_DYNAMIC+0x2e8> 34d07: movl -bashx16,%fs:(%rax) 34d0e: mov -bashxffffffff,%eax 34d13: retq perf annotate before applying patch: __GI___sigaction /usr/lib64/libc-2.22.so lea -0x20(%rdi),%eax cmp -bashx1,%eax v jbe 10 v jmpq fffffffffffffdd0 nop 10: mov _DYNAMIC+0x2e8,%rax movl -bashx16,%fs:(%rax) mov -bashxffffffff,%eax retq perf annotate after applying patch: __GI___sigaction /usr/lib64/libc-2.22.so lea -0x20(%rdi),%eax cmp -bashx1,%eax v jbe 10 ^ jmpq 34ac0 <__GI___libc_sigaction> nop 10: mov _DYNAMIC+0x2e8,%rax movl -bashx16,%fs:(%rax) mov -bashxffffffff,%eax retq Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: linuxppc-dev@lists.ozlabs.org Link: http://lkml.kernel.org/r/1480953407-7605-3-git-send-email-ravi.bangoria@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-12-05 22:56:47 +07:00
if (!ops->target.addr || ops->target.offset < 0)
perf annotate: Show raw form for jump instruction with indirect target For jump instructions that does not include target address as direct operand, show the original disassembled line for them. This is needed for certain powerpc jump instructions that use target address in a register (such as bctr, btar, ...). Before: ld r12,32088(r12) mtctr r12 v bctr ffffffffffffca2c std r2,24(r1) addis r12,r2,-1 After: ld r12,32088(r12) mtctr r12 v bctr std r2,24(r1) addis r12,r2,-1 Committer notes: Testing it using a perf.data file and vmlinux for powerpc64, cross-annotating it on a x86_64 workstation: Before: .__bpf_prog_run vmlinux.powerpc │ std r10,512(r9) ▒ │ lbz r9,0(r31) ▒ │ rldicr r9,r9,3,60 ▒ │ ldx r9,r30,r9 ▒ │ mtctr r9 ▒ 100.00 │ ↓ bctr 3fffffffffe01510 ▒ │ lwa r10,4(r31) ▒ │ lwz r9,0(r31) ▒ <SNIP> Invalid jump offset: 3fffffffffe01510 After: .__bpf_prog_run vmlinux.powerpc │ std r10,512(r9) ▒ │ lbz r9,0(r31) ▒ │ rldicr r9,r9,3,60 ▒ │ ldx r9,r30,r9 ▒ │ mtctr r9 ▒ 100.00 │ ↓ bctr ▒ │ lwa r10,4(r31) ▒ │ lwz r9,0(r31) ▒ <SNIP> Invalid jump offset: 3fffffffffe01510 This, in turn, uncovers another problem with jumps without operands, the ENTER/-> operation, to jump to the target, still continues using the bogus target :-) BTW, this was the file used for the above tests: [acme@jouet ravi_bangoria]$ perf report --header-only -i perf.data.f22vm.powerdev # ======== # captured on: Thu Nov 24 12:40:38 2016 # hostname : pdev-f22-qemu # os release : 4.4.10-200.fc22.ppc64 # perf version : 4.9.rc1.g6298ce # arch : ppc64 # nrcpus online : 48 # nrcpus avail : 48 # cpudesc : POWER7 (architected), altivec supported # cpuid : 74,513 # total memory : 4158976 kB # cmdline : /home/ravi/Workspace/linux/tools/perf/perf record -a # event : name = cycles:ppp, , size = 112, { sample_period, sample_freq } = 4000, sample_type = IP|TID|TIME|CPU|PERIOD, disabled = 1, inherit = 1, mmap = 1, c # HEADER_CPU_TOPOLOGY info available, use -I to display # HEADER_NUMA_TOPOLOGY info available, use -I to display # pmu mappings: cpu = 4, software = 1, tracepoint = 2, breakpoint = 5 # missing features: HEADER_TRACING_DATA HEADER_BRANCH_STACK HEADER_GROUP_DESC HEADER_AUXTRACE HEADER_STAT HEADER_CACHE # ======== # [acme@jouet ravi_bangoria]$ Suggested-by: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: linuxppc-dev@lists.ozlabs.org Link: http://lkml.kernel.org/r/1480953407-7605-1-git-send-email-ravi.bangoria@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-12-05 22:56:45 +07:00
return ins__raw_scnprintf(ins, bf, size, ops);
perf annotate: Support jumping from one function to another For instance: entry_SYSCALL_64 /lib/modules/4.16.0-rc5-00086-gdf09348f78dc/build/vmlinux 5.50 │ → callq do_syscall_64 14.56 │ mov 0x58(%rsp),%rcx 7.44 │ mov 0x80(%rsp),%r11 0.32 │ cmp %rcx,%r11 │ → jne swapgs_restore_regs_and_return_to_usermode 0.32 │ shl $0x10,%rcx 0.32 │ sar $0x10,%rcx 3.24 │ cmp %rcx,%r11 │ → jne swapgs_restore_regs_and_return_to_usermode 2.27 │ cmpq $0x33,0x88(%rsp) 1.29 │ → jne swapgs_restore_regs_and_return_to_usermode │ mov 0x30(%rsp),%r11 8.74 │ cmp %r11,0x90(%rsp) │ → jne swapgs_restore_regs_and_return_to_usermode 0.32 │ test $0x10100,%r11 │ → jne swapgs_restore_regs_and_return_to_usermode 0.32 │ cmpq $0x2b,0xa0(%rsp) 0.65 │ → jne swapgs_restore_regs_and_return_to_usermode It'll behave just like a "call" instruction, i.e. press enter or right arrow over one such line and the browser will navigate to the annotated disassembly of that function, which when exited, via left arrow or esc, will come back to the calling function. Now to support jump to an offset on a different function... Reported-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-78o508mqvr8inhj63ddtw7mo@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-23 20:50:35 +07:00
if (ops->target.outside && ops->target.sym != NULL)
return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.sym->name);
c = strchr(ops->raw, ',');
perf annotate: Fix parsing aarch64 branch instructions after objdump update Starting with binutils 2.28, aarch64 objdump adds comments to the disassembly output to show the alternative names of a condition code [1]. It is assumed that commas in objdump comments could occur in other arches now or in the future, so this fix is arch-independent. The fix could have been done with arm64 specific jump__parse and jump__scnprintf functions, but the jump__scnprintf instruction would have to have its comment character be a literal, since the scnprintf functions cannot receive a struct arch easily. This inconvenience also applies to the generic jump__scnprintf, which is why we add a raw_comment pointer to struct ins_operands, so the __parse function assigns it to be re-used by its corresponding __scnprintf function. Example differences in 'perf annotate --stdio2' output on an aarch64 perf.data file: BEFORE: → b.cs ffff200008133d1c <unwind_frame+0x18c> // b.hs, dffff7ecc47b AFTER : ↓ b.cs 18c BEFORE: → b.cc ffff200008d8d9cc <get_alloc_profile+0x31c> // b.lo, b.ul, dffff727295b AFTER : ↓ b.cc 31c The branch target labels 18c and 31c also now appear in the output: BEFORE: add x26, x29, #0x80 AFTER : 18c: add x26, x29, #0x80 BEFORE: add x21, x21, #0x8 AFTER : 31c: add x21, x21, #0x8 The Fixes: tag below is added so stable branches will get the update; it doesn't necessarily mean that commit was broken at the time, rather it didn't withstand the aarch64 objdump update. Tested no difference in output for sample x86_64, power arch perf.data files. [1] https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;a=commit;h=bb7eff5206e4795ac79c177a80fe9f4630aaf730 Signed-off-by: Kim Phillips <kim.phillips@arm.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Anton Blanchard <anton@samba.org> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: linux-arm-kernel@lists.infradead.org Fixes: b13bbeee5ee6 ("perf annotate: Fix branch instruction with multiple operands") Link: http://lkml.kernel.org/r/20180827125340.a2f7e291901d17cea05daba4@arm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-08-28 00:53:40 +07:00
c = validate_comma(c, ops);
perf annotate: Fix branch instruction with multiple operands 'perf annotate' is dropping the cr* fields from branch instructions. Fix it by adding support to display branch instructions having multiple operands. Power Arch objdump of int_sqrt: 20.36 | c0000000004d2694: subf r10,r10,r3 | c0000000004d2698: v bgt cr6,c0000000004d26a0 <int_sqrt+0x40> 1.82 | c0000000004d269c: mr r3,r10 29.18 | c0000000004d26a0: mr r10,r8 | c0000000004d26a4: v bgt cr7,c0000000004d26ac <int_sqrt+0x4c> | c0000000004d26a8: mr r10,r7 Power Arch Before Patch: 20.36 | subf r10,r10,r3 | v bgt 40 1.82 | mr r3,r10 29.18 | 40: mr r10,r8 | v bgt 4c | mr r10,r7 Power Arch After patch: 20.36 | subf r10,r10,r3 | v bgt cr6,40 1.82 | mr r3,r10 29.18 | 40: mr r10,r8 | v bgt cr7,4c | mr r10,r7 Also support AArch64 conditional branch instructions, which can have up to three operands: Aarch64 Non-simplified (raw objdump) view: │ffff0000083cd11c: ↑ cbz w0, ffff0000083cd100 <security_fil▒ ... 4.44 │ffff000│083cd134: ↓ tbnz w0, #26, ffff0000083cd190 <securit▒ ... 1.37 │ffff000│083cd144: ↓ tbnz w22, #5, ffff0000083cd1a4 <securit▒ │ffff000│083cd148: mov w19, #0x20000 //▒ 1.02 │ffff000│083cd14c: ↓ tbz w22, #2, ffff0000083cd1ac <securit▒ ... 0.68 │ffff000└──3cd16c: ↑ cbnz w0, ffff0000083cd120 <security_fil▒ Aarch64 Simplified, before this patch: │ ↑ cbz 40 ... 4.44 │ │↓ tbnz w0, #26, ffff0000083cd190 <security_file_permiss▒ ... 1.37 │ │↓ tbnz w22, #5, ffff0000083cd1a4 <security_file_permiss▒ │ │ mov w19, #0x20000 // #131072 1.02 │ │↓ tbz w22, #2, ffff0000083cd1ac <security_file_permiss▒ ... 0.68 │ └──cbnz 60 the cbz operand is missing, and the tbz doesn't get simplified processing at all because the parsing function failed to match an address. Aarch64 Simplified, After this patch applied: │ ↑ cbz w0, 40 ... 4.44 │ │↓ tbnz w0, #26, d0 ... 1.37 │ │↓ tbnz w22, #5, e4 │ │ mov w19, #0x20000 // #131072 1.02 │ │↓ tbz w22, #2, ec ... 0.68 │ └──cbnz w0, 60 Originally-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Tested-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Reported-by: Anton Blanchard <anton@samba.org> Reported-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Kim Phillips <kim.phillips@arm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Taeung Song <treeze.taeung@gmail.com> Link: http://lkml.kernel.org/r/20170601092959.f60d98912e8a1b66fd1e4c0e@arm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-06-01 21:29:59 +07:00
if (c != NULL) {
const char *c2 = strchr(c + 1, ',');
perf annotate: Fix parsing aarch64 branch instructions after objdump update Starting with binutils 2.28, aarch64 objdump adds comments to the disassembly output to show the alternative names of a condition code [1]. It is assumed that commas in objdump comments could occur in other arches now or in the future, so this fix is arch-independent. The fix could have been done with arm64 specific jump__parse and jump__scnprintf functions, but the jump__scnprintf instruction would have to have its comment character be a literal, since the scnprintf functions cannot receive a struct arch easily. This inconvenience also applies to the generic jump__scnprintf, which is why we add a raw_comment pointer to struct ins_operands, so the __parse function assigns it to be re-used by its corresponding __scnprintf function. Example differences in 'perf annotate --stdio2' output on an aarch64 perf.data file: BEFORE: → b.cs ffff200008133d1c <unwind_frame+0x18c> // b.hs, dffff7ecc47b AFTER : ↓ b.cs 18c BEFORE: → b.cc ffff200008d8d9cc <get_alloc_profile+0x31c> // b.lo, b.ul, dffff727295b AFTER : ↓ b.cc 31c The branch target labels 18c and 31c also now appear in the output: BEFORE: add x26, x29, #0x80 AFTER : 18c: add x26, x29, #0x80 BEFORE: add x21, x21, #0x8 AFTER : 31c: add x21, x21, #0x8 The Fixes: tag below is added so stable branches will get the update; it doesn't necessarily mean that commit was broken at the time, rather it didn't withstand the aarch64 objdump update. Tested no difference in output for sample x86_64, power arch perf.data files. [1] https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;a=commit;h=bb7eff5206e4795ac79c177a80fe9f4630aaf730 Signed-off-by: Kim Phillips <kim.phillips@arm.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Anton Blanchard <anton@samba.org> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: linux-arm-kernel@lists.infradead.org Fixes: b13bbeee5ee6 ("perf annotate: Fix branch instruction with multiple operands") Link: http://lkml.kernel.org/r/20180827125340.a2f7e291901d17cea05daba4@arm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-08-28 00:53:40 +07:00
c2 = validate_comma(c2, ops);
perf annotate: Fix branch instruction with multiple operands 'perf annotate' is dropping the cr* fields from branch instructions. Fix it by adding support to display branch instructions having multiple operands. Power Arch objdump of int_sqrt: 20.36 | c0000000004d2694: subf r10,r10,r3 | c0000000004d2698: v bgt cr6,c0000000004d26a0 <int_sqrt+0x40> 1.82 | c0000000004d269c: mr r3,r10 29.18 | c0000000004d26a0: mr r10,r8 | c0000000004d26a4: v bgt cr7,c0000000004d26ac <int_sqrt+0x4c> | c0000000004d26a8: mr r10,r7 Power Arch Before Patch: 20.36 | subf r10,r10,r3 | v bgt 40 1.82 | mr r3,r10 29.18 | 40: mr r10,r8 | v bgt 4c | mr r10,r7 Power Arch After patch: 20.36 | subf r10,r10,r3 | v bgt cr6,40 1.82 | mr r3,r10 29.18 | 40: mr r10,r8 | v bgt cr7,4c | mr r10,r7 Also support AArch64 conditional branch instructions, which can have up to three operands: Aarch64 Non-simplified (raw objdump) view: │ffff0000083cd11c: ↑ cbz w0, ffff0000083cd100 <security_fil▒ ... 4.44 │ffff000│083cd134: ↓ tbnz w0, #26, ffff0000083cd190 <securit▒ ... 1.37 │ffff000│083cd144: ↓ tbnz w22, #5, ffff0000083cd1a4 <securit▒ │ffff000│083cd148: mov w19, #0x20000 //▒ 1.02 │ffff000│083cd14c: ↓ tbz w22, #2, ffff0000083cd1ac <securit▒ ... 0.68 │ffff000└──3cd16c: ↑ cbnz w0, ffff0000083cd120 <security_fil▒ Aarch64 Simplified, before this patch: │ ↑ cbz 40 ... 4.44 │ │↓ tbnz w0, #26, ffff0000083cd190 <security_file_permiss▒ ... 1.37 │ │↓ tbnz w22, #5, ffff0000083cd1a4 <security_file_permiss▒ │ │ mov w19, #0x20000 // #131072 1.02 │ │↓ tbz w22, #2, ffff0000083cd1ac <security_file_permiss▒ ... 0.68 │ └──cbnz 60 the cbz operand is missing, and the tbz doesn't get simplified processing at all because the parsing function failed to match an address. Aarch64 Simplified, After this patch applied: │ ↑ cbz w0, 40 ... 4.44 │ │↓ tbnz w0, #26, d0 ... 1.37 │ │↓ tbnz w22, #5, e4 │ │ mov w19, #0x20000 // #131072 1.02 │ │↓ tbz w22, #2, ec ... 0.68 │ └──cbnz w0, 60 Originally-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Tested-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Reported-by: Anton Blanchard <anton@samba.org> Reported-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Kim Phillips <kim.phillips@arm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Taeung Song <treeze.taeung@gmail.com> Link: http://lkml.kernel.org/r/20170601092959.f60d98912e8a1b66fd1e4c0e@arm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-06-01 21:29:59 +07:00
/* check for 3-op insn */
if (c2 != NULL)
c = c2;
c++;
/* mirror arch objdump's space-after-comma style */
if (*c == ' ')
c++;
}
return scnprintf(bf, size, "%-6s %.*s%" PRIx64,
perf annotate: Fix branch instruction with multiple operands 'perf annotate' is dropping the cr* fields from branch instructions. Fix it by adding support to display branch instructions having multiple operands. Power Arch objdump of int_sqrt: 20.36 | c0000000004d2694: subf r10,r10,r3 | c0000000004d2698: v bgt cr6,c0000000004d26a0 <int_sqrt+0x40> 1.82 | c0000000004d269c: mr r3,r10 29.18 | c0000000004d26a0: mr r10,r8 | c0000000004d26a4: v bgt cr7,c0000000004d26ac <int_sqrt+0x4c> | c0000000004d26a8: mr r10,r7 Power Arch Before Patch: 20.36 | subf r10,r10,r3 | v bgt 40 1.82 | mr r3,r10 29.18 | 40: mr r10,r8 | v bgt 4c | mr r10,r7 Power Arch After patch: 20.36 | subf r10,r10,r3 | v bgt cr6,40 1.82 | mr r3,r10 29.18 | 40: mr r10,r8 | v bgt cr7,4c | mr r10,r7 Also support AArch64 conditional branch instructions, which can have up to three operands: Aarch64 Non-simplified (raw objdump) view: │ffff0000083cd11c: ↑ cbz w0, ffff0000083cd100 <security_fil▒ ... 4.44 │ffff000│083cd134: ↓ tbnz w0, #26, ffff0000083cd190 <securit▒ ... 1.37 │ffff000│083cd144: ↓ tbnz w22, #5, ffff0000083cd1a4 <securit▒ │ffff000│083cd148: mov w19, #0x20000 //▒ 1.02 │ffff000│083cd14c: ↓ tbz w22, #2, ffff0000083cd1ac <securit▒ ... 0.68 │ffff000└──3cd16c: ↑ cbnz w0, ffff0000083cd120 <security_fil▒ Aarch64 Simplified, before this patch: │ ↑ cbz 40 ... 4.44 │ │↓ tbnz w0, #26, ffff0000083cd190 <security_file_permiss▒ ... 1.37 │ │↓ tbnz w22, #5, ffff0000083cd1a4 <security_file_permiss▒ │ │ mov w19, #0x20000 // #131072 1.02 │ │↓ tbz w22, #2, ffff0000083cd1ac <security_file_permiss▒ ... 0.68 │ └──cbnz 60 the cbz operand is missing, and the tbz doesn't get simplified processing at all because the parsing function failed to match an address. Aarch64 Simplified, After this patch applied: │ ↑ cbz w0, 40 ... 4.44 │ │↓ tbnz w0, #26, d0 ... 1.37 │ │↓ tbnz w22, #5, e4 │ │ mov w19, #0x20000 // #131072 1.02 │ │↓ tbz w22, #2, ec ... 0.68 │ └──cbnz w0, 60 Originally-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Tested-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Reported-by: Anton Blanchard <anton@samba.org> Reported-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Kim Phillips <kim.phillips@arm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Taeung Song <treeze.taeung@gmail.com> Link: http://lkml.kernel.org/r/20170601092959.f60d98912e8a1b66fd1e4c0e@arm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-06-01 21:29:59 +07:00
ins->name, c ? c - ops->raw : 0, ops->raw,
ops->target.offset);
}
static struct ins_ops jump_ops = {
.parse = jump__parse,
.scnprintf = jump__scnprintf,
};
bool ins__is_jump(const struct ins *ins)
{
return ins->ops == &jump_ops;
}
static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
{
char *endptr, *name, *t;
if (strstr(raw, "(%rip)") == NULL)
return 0;
*addrp = strtoull(comment, &endptr, 16);
perf annotate: Fix objdump comment parsing for Intel mov dissassembly The command 'perf annotate' parses the output of objdump and also investigates the comments produced by objdump. For example the output of objdump produces (on x86): 23eee: 4c 8b 3d 13 01 21 00 mov 0x210113(%rip),%r15 # 234008 <stderr@@GLIBC_2.2.5+0x9a8> and the function mov__parse() is called to investigate the complete line. Mov__parse() breaks this line into several parts and finally calls function comment__symbol() to parse the data after the comment character '#'. Comment__symbol() expects a hexadecimal address followed by a symbol in '<' and '>' brackets. However the 2nd parameter given to function comment__symbol() always points to the comment character '#'. The address parsing always returns 0 because the character '#' is not a digit and strtoull() fails without being noticed. Fix this by advancing the second parameter to function comment__symbol() by one byte before invocation and add an error check after strtoull() has been called. Signed-off-by: Thomas Richter <tmricht@linux.vnet.ibm.com> Reviewed-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com> Acked-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Fixes: 6de783b6f50f ("perf annotate: Resolve symbols using objdump comment") Link: http://lkml.kernel.org/r/20171128075632.72182-1-tmricht@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-11-28 14:56:32 +07:00
if (endptr == comment)
return 0;
name = strchr(endptr, '<');
if (name == NULL)
return -1;
name++;
t = strchr(name, '>');
if (t == NULL)
return 0;
*t = '\0';
*namep = strdup(name);
*t = '>';
return 0;
}
static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
{
ops->locked.ops = zalloc(sizeof(*ops->locked.ops));
if (ops->locked.ops == NULL)
return 0;
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0)
goto out_free_ops;
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name);
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
if (ops->locked.ins.ops == NULL)
goto out_free_ops;
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
if (ops->locked.ins.ops->parse &&
ops->locked.ins.ops->parse(arch, ops->locked.ops, ms) < 0)
goto out_free_ops;
return 0;
out_free_ops:
zfree(&ops->locked.ops);
return 0;
}
static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
int printed;
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
if (ops->locked.ins.ops == NULL)
return ins__raw_scnprintf(ins, bf, size, ops);
printed = scnprintf(bf, size, "%-6s ", ins->name);
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
size - printed, ops->locked.ops);
}
static void lock__delete(struct ins_operands *ops)
{
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
struct ins *ins = &ops->locked.ins;
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
if (ins->ops && ins->ops->free)
ins->ops->free(ops->locked.ops);
else
ins__delete(ops->locked.ops);
zfree(&ops->locked.ops);
zfree(&ops->target.raw);
zfree(&ops->target.name);
}
static struct ins_ops lock_ops = {
.free = lock__delete,
.parse = lock__parse,
.scnprintf = lock__scnprintf,
};
static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms __maybe_unused)
{
char *s = strchr(ops->raw, ','), *target, *comment, prev;
if (s == NULL)
return -1;
*s = '\0';
ops->source.raw = strdup(ops->raw);
*s = ',';
if (ops->source.raw == NULL)
return -1;
target = ++s;
comment = strchr(s, arch->objdump.comment_char);
if (comment != NULL)
s = comment - 1;
else
s = strchr(s, '\0') - 1;
while (s > target && isspace(s[0]))
--s;
s++;
prev = *s;
*s = '\0';
ops->target.raw = strdup(target);
*s = prev;
if (ops->target.raw == NULL)
goto out_free_source;
if (comment == NULL)
return 0;
comment = ltrim(comment);
perf annotate: Fix objdump comment parsing for Intel mov dissassembly The command 'perf annotate' parses the output of objdump and also investigates the comments produced by objdump. For example the output of objdump produces (on x86): 23eee: 4c 8b 3d 13 01 21 00 mov 0x210113(%rip),%r15 # 234008 <stderr@@GLIBC_2.2.5+0x9a8> and the function mov__parse() is called to investigate the complete line. Mov__parse() breaks this line into several parts and finally calls function comment__symbol() to parse the data after the comment character '#'. Comment__symbol() expects a hexadecimal address followed by a symbol in '<' and '>' brackets. However the 2nd parameter given to function comment__symbol() always points to the comment character '#'. The address parsing always returns 0 because the character '#' is not a digit and strtoull() fails without being noticed. Fix this by advancing the second parameter to function comment__symbol() by one byte before invocation and add an error check after strtoull() has been called. Signed-off-by: Thomas Richter <tmricht@linux.vnet.ibm.com> Reviewed-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com> Acked-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Fixes: 6de783b6f50f ("perf annotate: Resolve symbols using objdump comment") Link: http://lkml.kernel.org/r/20171128075632.72182-1-tmricht@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-11-28 14:56:32 +07:00
comment__symbol(ops->source.raw, comment + 1, &ops->source.addr, &ops->source.name);
comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
return 0;
out_free_source:
zfree(&ops->source.raw);
return -1;
}
static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
return scnprintf(bf, size, "%-6s %s,%s", ins->name,
ops->source.name ?: ops->source.raw,
ops->target.name ?: ops->target.raw);
}
static struct ins_ops mov_ops = {
.parse = mov__parse,
.scnprintf = mov__scnprintf,
};
static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map_symbol *ms __maybe_unused)
{
char *target, *comment, *s, prev;
target = s = ops->raw;
while (s[0] != '\0' && !isspace(s[0]))
++s;
prev = *s;
*s = '\0';
ops->target.raw = strdup(target);
*s = prev;
if (ops->target.raw == NULL)
return -1;
comment = strchr(s, arch->objdump.comment_char);
if (comment == NULL)
return 0;
comment = ltrim(comment);
perf annotate: Fix objdump comment parsing for Intel mov dissassembly The command 'perf annotate' parses the output of objdump and also investigates the comments produced by objdump. For example the output of objdump produces (on x86): 23eee: 4c 8b 3d 13 01 21 00 mov 0x210113(%rip),%r15 # 234008 <stderr@@GLIBC_2.2.5+0x9a8> and the function mov__parse() is called to investigate the complete line. Mov__parse() breaks this line into several parts and finally calls function comment__symbol() to parse the data after the comment character '#'. Comment__symbol() expects a hexadecimal address followed by a symbol in '<' and '>' brackets. However the 2nd parameter given to function comment__symbol() always points to the comment character '#'. The address parsing always returns 0 because the character '#' is not a digit and strtoull() fails without being noticed. Fix this by advancing the second parameter to function comment__symbol() by one byte before invocation and add an error check after strtoull() has been called. Signed-off-by: Thomas Richter <tmricht@linux.vnet.ibm.com> Reviewed-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com> Acked-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Fixes: 6de783b6f50f ("perf annotate: Resolve symbols using objdump comment") Link: http://lkml.kernel.org/r/20171128075632.72182-1-tmricht@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-11-28 14:56:32 +07:00
comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
return 0;
}
static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
return scnprintf(bf, size, "%-6s %s", ins->name,
ops->target.name ?: ops->target.raw);
}
static struct ins_ops dec_ops = {
.parse = dec__parse,
.scnprintf = dec__scnprintf,
};
perf tools: Use __maybe_used for unused variables perf defines both __used and __unused variables to use for marking unused variables. The variable __used is defined to __attribute__((__unused__)), which contradicts the kernel definition to __attribute__((__used__)) for new gcc versions. On Android, __used is also defined in system headers and this leads to warnings like: warning: '__used__' attribute ignored __unused is not defined in the kernel and is not a standard definition. If __unused is included everywhere instead of __used, this leads to conflicts with glibc headers, since glibc has a variables with this name in its headers. The best approach is to use __maybe_unused, the definition used in the kernel for __attribute__((unused)). In this way there is only one definition in perf sources (instead of 2 definitions that point to the same thing: __used and __unused) and it works on both Linux and Android. This patch simply replaces all instances of __used and __unused with __maybe_unused. Signed-off-by: Irina Tirdea <irina.tirdea@intel.com> Acked-by: Pekka Enberg <penberg@kernel.org> Cc: David Ahern <dsahern@gmail.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Namhyung Kim <namhyung.kim@lge.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Steven Rostedt <rostedt@goodmis.org> Link: http://lkml.kernel.org/r/1347315303-29906-7-git-send-email-irina.tirdea@intel.com [ committer note: fixed up conflict with a116e05 in builtin-sched.c ] Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-09-11 05:15:03 +07:00
static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
struct ins_operands *ops __maybe_unused)
{
return scnprintf(bf, size, "%-6s", "nop");
}
static struct ins_ops nop_ops = {
.scnprintf = nop__scnprintf,
};
static struct ins_ops ret_ops = {
.scnprintf = ins__raw_scnprintf,
};
bool ins__is_ret(const struct ins *ins)
{
return ins->ops == &ret_ops;
}
bool ins__is_lock(const struct ins *ins)
{
return ins->ops == &lock_ops;
}
static int ins__key_cmp(const void *name, const void *insp)
{
const struct ins *ins = insp;
return strcmp(name, ins->name);
}
static int ins__cmp(const void *a, const void *b)
{
const struct ins *ia = a;
const struct ins *ib = b;
return strcmp(ia->name, ib->name);
}
static void ins__sort(struct arch *arch)
{
const int nmemb = arch->nr_instructions;
qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp);
}
perf annotate: Introduce alternative method of keeping instructions table Some arches may want to dynamically populate the table using regular expressions on the instruction names to associate them with a set of parsing/formatting/etc functions (struct ins_ops), so provide a fallback for when the ins__find() method fails. That fall back will be able to resize the arch->instructions, setting arch->nr_instructions appropriately, helper functions to associate an ins_ops to an instruction name, growing the arch->instructions if needed and resorting it are provided, all the arch specific callback needs to do is to decide if the missing instruction should be added to arch->instructions with a ins_ops association. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-auu13yradxf7g5dgtpnzt97a@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:37:08 +07:00
static struct ins_ops *__ins__find(struct arch *arch, const char *name)
{
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
struct ins *ins;
const int nmemb = arch->nr_instructions;
if (!arch->sorted_instructions) {
ins__sort(arch);
arch->sorted_instructions = true;
}
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
ins = bsearch(name, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp);
return ins ? ins->ops : NULL;
}
perf annotate: Introduce alternative method of keeping instructions table Some arches may want to dynamically populate the table using regular expressions on the instruction names to associate them with a set of parsing/formatting/etc functions (struct ins_ops), so provide a fallback for when the ins__find() method fails. That fall back will be able to resize the arch->instructions, setting arch->nr_instructions appropriately, helper functions to associate an ins_ops to an instruction name, growing the arch->instructions if needed and resorting it are provided, all the arch specific callback needs to do is to decide if the missing instruction should be added to arch->instructions with a ins_ops association. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-auu13yradxf7g5dgtpnzt97a@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:37:08 +07:00
static struct ins_ops *ins__find(struct arch *arch, const char *name)
{
struct ins_ops *ops = __ins__find(arch, name);
if (!ops && arch->associate_instruction_ops)
ops = arch->associate_instruction_ops(arch, name);
return ops;
}
static int arch__key_cmp(const void *name, const void *archp)
{
const struct arch *arch = archp;
return strcmp(name, arch->name);
}
static int arch__cmp(const void *a, const void *b)
{
const struct arch *aa = a;
const struct arch *ab = b;
return strcmp(aa->name, ab->name);
}
static void arch__sort(void)
{
const int nmemb = ARRAY_SIZE(architectures);
qsort(architectures, nmemb, sizeof(struct arch), arch__cmp);
}
static struct arch *arch__find(const char *name)
{
const int nmemb = ARRAY_SIZE(architectures);
static bool sorted;
if (!sorted) {
arch__sort();
sorted = true;
}
return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp);
}
static struct annotated_source *annotated_source__new(void)
{
struct annotated_source *src = zalloc(sizeof(*src));
if (src != NULL)
INIT_LIST_HEAD(&src->source);
return src;
}
static __maybe_unused void annotated_source__delete(struct annotated_source *src)
{
if (src == NULL)
return;
zfree(&src->histograms);
zfree(&src->cycles_hist);
free(src);
}
static int annotated_source__alloc_histograms(struct annotated_source *src,
size_t size, int nr_hists)
{
size_t sizeof_sym_hist;
perf symbols: Fix memory corruption because of zero length symbols Perf top is often crashing at very random locations on powerpc. After investigating, I found the crash only happens when sample is of zero length symbol. Powerpc kernel has many such symbols which does not contain length details in vmlinux binary and thus start and end addresses of such symbols are same. Structure struct sym_hist { u64 nr_samples; u64 period; struct sym_hist_entry addr[0]; }; has last member 'addr[]' of size zero. 'addr[]' is an array of addresses that belongs to one symbol (function). If function consist of 100 instructions, 'addr' points to an array of 100 'struct sym_hist_entry' elements. For zero length symbol, it points to the *empty* array, i.e. no members in the array and thus offset 0 is also invalid for such array. static int __symbol__inc_addr_samples(...) { ... offset = addr - sym->start; h = annotation__histogram(notes, evidx); h->nr_samples++; h->addr[offset].nr_samples++; h->period += sample->period; h->addr[offset].period += sample->period; ... } Here, when 'addr' is same as 'sym->start', 'offset' becomes 0, which is valid for normal symbols but *invalid* for zero length symbols and thus updating h->addr[offset] causes memory corruption. Fix this by adding one dummy element for zero length symbols. Link: https://lkml.org/lkml/2016/10/10/148 Fixes: edee44be5919 ("perf annotate: Don't throw error for zero length symbols") Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Acked-by: Jiri Olsa <jolsa@kernel.org> Acked-by: Namhyung Kim <namhyung@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Taeung Song <treeze.taeung@gmail.com> Link: http://lkml.kernel.org/r/1508854806-10542-1-git-send-email-ravi.bangoria@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-10-24 21:20:06 +07:00
/*
* Add buffer of one element for zero length symbol.
* When sample is taken from first instruction of
* zero length symbol, perf still resolves it and
* shows symbol name in perf report and allows to
* annotate it.
*/
if (size == 0)
size = 1;
/* Check for overflow when calculating sizeof_sym_hist */
if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(struct sym_hist_entry))
return -1;
sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(struct sym_hist_entry));
/* Check for overflow in zalloc argument */
if (sizeof_sym_hist > SIZE_MAX / nr_hists)
return -1;
src->sizeof_sym_hist = sizeof_sym_hist;
src->nr_histograms = nr_hists;
src->histograms = calloc(nr_hists, sizeof_sym_hist) ;
return src->histograms ? 0 : -1;
}
/* The cycles histogram is lazily allocated. */
static int symbol__alloc_hist_cycles(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
const size_t size = symbol__size(sym);
notes->src->cycles_hist = calloc(size, sizeof(struct cyc_hist));
if (notes->src->cycles_hist == NULL)
return -1;
return 0;
}
void symbol__annotate_zero_histograms(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
pthread_mutex_lock(&notes->lock);
if (notes->src != NULL) {
memset(notes->src->histograms, 0,
notes->src->nr_histograms * notes->src->sizeof_sym_hist);
if (notes->src->cycles_hist)
memset(notes->src->cycles_hist, 0,
symbol__size(sym) * sizeof(struct cyc_hist));
}
pthread_mutex_unlock(&notes->lock);
}
static int __symbol__account_cycles(struct cyc_hist *ch,
u64 start,
unsigned offset, unsigned cycles,
unsigned have_start)
{
/*
* For now we can only account one basic block per
* final jump. But multiple could be overlapping.
* Always account the longest one. So when
* a shorter one has been already seen throw it away.
*
* We separately always account the full cycles.
*/
ch[offset].num_aggr++;
ch[offset].cycles_aggr += cycles;
if (cycles > ch[offset].cycles_max)
ch[offset].cycles_max = cycles;
if (ch[offset].cycles_min) {
if (cycles && cycles < ch[offset].cycles_min)
ch[offset].cycles_min = cycles;
} else
ch[offset].cycles_min = cycles;
if (!have_start && ch[offset].have_start)
return 0;
if (ch[offset].num) {
if (have_start && (!ch[offset].have_start ||
ch[offset].start > start)) {
ch[offset].have_start = 0;
ch[offset].cycles = 0;
ch[offset].num = 0;
if (ch[offset].reset < 0xffff)
ch[offset].reset++;
} else if (have_start &&
ch[offset].start < start)
return 0;
}
ch[offset].have_start = have_start;
ch[offset].start = start;
ch[offset].cycles += cycles;
ch[offset].num++;
return 0;
}
static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
struct annotated_source *src, int evidx, u64 addr,
struct perf_sample *sample)
{
unsigned offset;
struct sym_hist *h;
pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
if ((addr < sym->start || addr >= sym->end) &&
(addr != sym->end || sym->start != sym->end)) {
pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
__func__, __LINE__, sym->name, sym->start, addr, sym->end);
return -ERANGE;
}
offset = addr - sym->start;
h = annotated_source__histogram(src, evidx);
if (h == NULL) {
pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n",
__func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC);
return -ENOMEM;
}
h->nr_samples++;
h->addr[offset].nr_samples++;
h->period += sample->period;
h->addr[offset].period += sample->period;
pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n",
sym->start, sym->name, addr, addr - sym->start, evidx,
h->addr[offset].nr_samples, h->addr[offset].period);
return 0;
}
static struct cyc_hist *symbol__cycles_hist(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
if (notes->src == NULL) {
notes->src = annotated_source__new();
if (notes->src == NULL)
return NULL;
goto alloc_cycles_hist;
}
if (!notes->src->cycles_hist) {
alloc_cycles_hist:
symbol__alloc_hist_cycles(sym);
}
return notes->src->cycles_hist;
}
struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists)
{
struct annotation *notes = symbol__annotation(sym);
if (notes->src == NULL) {
notes->src = annotated_source__new();
if (notes->src == NULL)
return NULL;
goto alloc_histograms;
}
if (notes->src->histograms == NULL) {
alloc_histograms:
annotated_source__alloc_histograms(notes->src, symbol__size(sym),
nr_hists);
}
return notes->src;
}
static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, u64 addr,
struct perf_sample *sample)
{
struct annotated_source *src;
if (sym == NULL)
return 0;
src = symbol__hists(sym, evsel->evlist->nr_entries);
if (src == NULL)
return -ENOMEM;
return __symbol__inc_addr_samples(sym, map, src, evsel->idx, addr, sample);
}
static int symbol__account_cycles(u64 addr, u64 start,
struct symbol *sym, unsigned cycles)
{
struct cyc_hist *cycles_hist;
unsigned offset;
if (sym == NULL)
return 0;
cycles_hist = symbol__cycles_hist(sym);
if (cycles_hist == NULL)
return -ENOMEM;
if (addr < sym->start || addr >= sym->end)
return -ERANGE;
if (start) {
if (start < sym->start || start >= sym->end)
return -ERANGE;
if (start >= addr)
start = 0;
}
offset = addr - sym->start;
return __symbol__account_cycles(cycles_hist,
start ? start - sym->start : 0,
offset, cycles,
!!start);
}
int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
struct addr_map_symbol *start,
unsigned cycles)
{
u64 saddr = 0;
int err;
if (!cycles)
return 0;
/*
* Only set start when IPC can be computed. We can only
* compute it when the basic block is completely in a single
* function.
* Special case the case when the jump is elsewhere, but
* it starts on the function start.
*/
if (start &&
(start->sym == ams->sym ||
(ams->sym &&
start->addr == ams->sym->start + ams->map->start)))
saddr = start->al_addr;
if (saddr == 0)
pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
ams->addr,
start ? start->addr : 0,
ams->sym ? ams->sym->start + ams->map->start : 0,
saddr);
err = symbol__account_cycles(ams->al_addr, saddr, ams->sym, cycles);
if (err)
pr_debug2("account_cycles failed %d\n", err);
return err;
}
static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end)
{
unsigned n_insn = 0;
u64 offset;
for (offset = start; offset <= end; offset++) {
if (notes->offsets[offset])
n_insn++;
}
return n_insn;
}
static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch)
{
unsigned n_insn;
u64 offset;
n_insn = annotation__count_insn(notes, start, end);
if (n_insn && ch->num && ch->cycles) {
float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
/* Hide data when there are too many overlaps. */
if (ch->reset >= 0x7fff || ch->reset >= ch->num / 2)
return;
for (offset = start; offset <= end; offset++) {
struct annotation_line *al = notes->offsets[offset];
if (al)
al->ipc = ipc;
}
}
}
void annotation__compute_ipc(struct annotation *notes, size_t size)
{
u64 offset;
if (!notes->src || !notes->src->cycles_hist)
return;
pthread_mutex_lock(&notes->lock);
for (offset = 0; offset < size; ++offset) {
struct cyc_hist *ch;
ch = &notes->src->cycles_hist[offset];
if (ch && ch->cycles) {
struct annotation_line *al;
if (ch->have_start)
annotation__count_and_fill(notes, ch->start, offset, ch);
al = notes->offsets[offset];
if (al && ch->num_aggr) {
al->cycles = ch->cycles_aggr / ch->num_aggr;
al->cycles_max = ch->cycles_max;
al->cycles_min = ch->cycles_min;
}
notes->have_cycles = true;
}
}
pthread_mutex_unlock(&notes->lock);
}
int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
struct perf_evsel *evsel)
{
return symbol__inc_addr_samples(ams->sym, ams->map, evsel, ams->al_addr, sample);
}
int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample,
struct perf_evsel *evsel, u64 ip)
{
return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evsel, ip, sample);
}
static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map_symbol *ms)
{
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
dl->ins.ops = ins__find(arch, dl->ins.name);
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
if (!dl->ins.ops)
return;
if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, ms) < 0)
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
dl->ins.ops = NULL;
}
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
static int disasm_line__parse(char *line, const char **namep, char **rawp)
{
char tmp, *name = ltrim(line);
if (name[0] == '\0')
return -1;
*rawp = name + 1;
while ((*rawp)[0] != '\0' && !isspace((*rawp)[0]))
++*rawp;
tmp = (*rawp)[0];
(*rawp)[0] = '\0';
*namep = strdup(name);
if (*namep == NULL)
goto out_free_name;
(*rawp)[0] = tmp;
*rawp = ltrim(*rawp);
return 0;
out_free_name:
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
free((void *)namep);
*namep = NULL;
return -1;
}
struct annotate_args {
size_t privsize;
struct arch *arch;
struct map_symbol ms;
struct perf_evsel *evsel;
struct annotation_options *options;
s64 offset;
char *line;
int line_nr;
};
static void annotation_line__delete(struct annotation_line *al)
{
void *ptr = (void *) al - al->privsize;
free_srcline(al->path);
zfree(&al->line);
free(ptr);
}
/*
* Allocating the annotation line data with following
* structure:
*
* --------------------------------------
* private space | struct annotation_line
* --------------------------------------
*
* Size of the private space is stored in 'struct annotation_line'.
*
*/
static struct annotation_line *
annotation_line__new(struct annotate_args *args, size_t privsize)
{
struct annotation_line *al;
struct perf_evsel *evsel = args->evsel;
size_t size = privsize + sizeof(*al);
int nr = 1;
if (perf_evsel__is_group_event(evsel))
nr = evsel->nr_members;
size += sizeof(al->data[0]) * nr;
al = zalloc(size);
if (al) {
al = (void *) al + privsize;
al->privsize = privsize;
al->offset = args->offset;
al->line = strdup(args->line);
al->line_nr = args->line_nr;
al->data_nr = nr;
}
return al;
}
/*
* Allocating the disasm annotation line data with
* following structure:
*
* ------------------------------------------------------------
* privsize space | struct disasm_line | struct annotation_line
* ------------------------------------------------------------
*
* We have 'struct annotation_line' member as last member
* of 'struct disasm_line' to have an easy access.
*
*/
static struct disasm_line *disasm_line__new(struct annotate_args *args)
{
struct disasm_line *dl = NULL;
struct annotation_line *al;
size_t privsize = args->privsize + offsetof(struct disasm_line, al);
al = annotation_line__new(args, privsize);
if (al != NULL) {
dl = disasm_line(al);
if (dl->al.line == NULL)
goto out_delete;
if (args->offset != -1) {
if (disasm_line__parse(dl->al.line, &dl->ins.name, &dl->ops.raw) < 0)
goto out_free_line;
disasm_line__init_ins(dl, args->arch, &args->ms);
}
}
return dl;
out_free_line:
zfree(&dl->al.line);
out_delete:
free(dl);
return NULL;
}
void disasm_line__free(struct disasm_line *dl)
{
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
if (dl->ins.ops && dl->ins.ops->free)
dl->ins.ops->free(&dl->ops);
else
ins__delete(&dl->ops);
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
free((void *)dl->ins.name);
dl->ins.name = NULL;
annotation_line__delete(&dl->al);
}
int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
{
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
if (raw || !dl->ins.ops)
return scnprintf(bf, size, "%-6s %s", dl->ins.name, dl->ops.raw);
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
}
static void annotation_line__add(struct annotation_line *al, struct list_head *head)
{
list_add_tail(&al->node, head);
}
struct annotation_line *
annotation_line__next(struct annotation_line *pos, struct list_head *head)
{
list_for_each_entry_continue(pos, head, node)
if (pos->offset >= 0)
return pos;
return NULL;
}
perf annotate: Add branch stack / basic block I wanted to know the hottest path through a function and figured the branch-stack (LBR) information should be able to help out with that. The below uses the branch-stack to create basic blocks and generate statistics from them. from to branch_i * ----> * | | block v * ----> * from to branch_i+1 The blocks are broken down into non-overlapping ranges, while tracking if the start of each range is an entry point and/or the end of a range is a branch. Each block iterates all ranges it covers (while splitting where required to exactly match the block) and increments the 'coverage' count. For the range including the branch we increment the taken counter, as well as the pred counter if flags.predicted. Using these number we can find if an instruction: - had coverage; given by: br->coverage / br->sym->max_coverage This metric ensures each symbol has a 100% spot, which reflects the observation that each symbol must have a most covered/hottest block. - is a branch target: br->is_target && br->start == add - for targets, how much of a branch's coverages comes from it: target->entry / branch->coverage - is a branch: br->is_branch && br->end == addr - for branches, how often it was taken: br->taken / br->coverage after all, all execution that didn't take the branch would have incremented the coverage and continued onward to a later branch. - for branches, how often it was predicted: br->pred / br->taken The coverage percentage is used to color the address and asm sections; for low (<1%) coverage we use NORMAL (uncolored), indicating that these instructions are not 'important'. For high coverage (>75%) we color the address RED. For each branch, we add an asm comment after the instruction with information on how often it was taken and predicted. Output looks like (sans color, which does loose a lot of the information :/) $ perf record --branch-filter u,any -e cycles:p ./branches 27 $ perf annotate branches Percent | Source code & Disassembly of branches for cycles:pu (217 samples) --------------------------------------------------------------------------------- : branches(): 0.00 : 40057a: push %rbp 0.00 : 40057b: mov %rsp,%rbp 0.00 : 40057e: sub $0x20,%rsp 0.00 : 400582: mov %rdi,-0x18(%rbp) 0.00 : 400586: mov %rsi,-0x20(%rbp) 0.00 : 40058a: mov -0x18(%rbp),%rax 0.00 : 40058e: mov %rax,-0x10(%rbp) 0.00 : 400592: movq $0x0,-0x8(%rbp) 0.00 : 40059a: jmpq 400656 <branches+0xdc> 1.84 : 40059f: mov -0x10(%rbp),%rax # +100.00% 3.23 : 4005a3: and $0x1,%eax 1.84 : 4005a6: test %rax,%rax 0.00 : 4005a9: je 4005bf <branches+0x45> # -54.50% (p:42.00%) 0.46 : 4005ab: mov 0x200bbe(%rip),%rax # 601170 <acc> 12.90 : 4005b2: add $0x1,%rax 2.30 : 4005b6: mov %rax,0x200bb3(%rip) # 601170 <acc> 0.46 : 4005bd: jmp 4005d1 <branches+0x57> # -100.00% (p:100.00%) 0.92 : 4005bf: mov 0x200baa(%rip),%rax # 601170 <acc> # +49.54% 13.82 : 4005c6: sub $0x1,%rax 0.46 : 4005ca: mov %rax,0x200b9f(%rip) # 601170 <acc> 2.30 : 4005d1: mov -0x10(%rbp),%rax # +50.46% 0.46 : 4005d5: mov %rax,%rdi 0.46 : 4005d8: callq 400526 <lfsr> # -100.00% (p:100.00%) 0.00 : 4005dd: mov %rax,-0x10(%rbp) # +100.00% 0.92 : 4005e1: mov -0x18(%rbp),%rax 0.00 : 4005e5: and $0x1,%eax 0.00 : 4005e8: test %rax,%rax 0.00 : 4005eb: je 4005ff <branches+0x85> # -100.00% (p:100.00%) 0.00 : 4005ed: mov 0x200b7c(%rip),%rax # 601170 <acc> 0.00 : 4005f4: shr $0x2,%rax 0.00 : 4005f8: mov %rax,0x200b71(%rip) # 601170 <acc> 0.00 : 4005ff: mov -0x10(%rbp),%rax # +100.00% 7.37 : 400603: and $0x1,%eax 3.69 : 400606: test %rax,%rax 0.00 : 400609: jne 400612 <branches+0x98> # -59.25% (p:42.99%) 1.84 : 40060b: mov $0x1,%eax 14.29 : 400610: jmp 400617 <branches+0x9d> # -100.00% (p:100.00%) 1.38 : 400612: mov $0x0,%eax # +57.65% 10.14 : 400617: test %al,%al # +42.35% 0.00 : 400619: je 40062f <branches+0xb5> # -57.65% (p:100.00%) 0.46 : 40061b: mov 0x200b4e(%rip),%rax # 601170 <acc> 2.76 : 400622: sub $0x1,%rax 0.00 : 400626: mov %rax,0x200b43(%rip) # 601170 <acc> 0.46 : 40062d: jmp 400641 <branches+0xc7> # -100.00% (p:100.00%) 0.92 : 40062f: mov 0x200b3a(%rip),%rax # 601170 <acc> # +56.13% 2.30 : 400636: add $0x1,%rax 0.92 : 40063a: mov %rax,0x200b2f(%rip) # 601170 <acc> 0.92 : 400641: mov -0x10(%rbp),%rax # +43.87% 2.30 : 400645: mov %rax,%rdi 0.00 : 400648: callq 400526 <lfsr> # -100.00% (p:100.00%) 0.00 : 40064d: mov %rax,-0x10(%rbp) # +100.00% 1.84 : 400651: addq $0x1,-0x8(%rbp) 0.92 : 400656: mov -0x8(%rbp),%rax 5.07 : 40065a: cmp -0x20(%rbp),%rax 0.00 : 40065e: jb 40059f <branches+0x25> # -100.00% (p:100.00%) 0.00 : 400664: nop 0.00 : 400665: leaveq 0.00 : 400666: retq (Note: the --branch-filter u,any was used to avoid spurious target and branch points due to interrupts/faults, they show up as very small -/+ annotations on 'weird' locations) Committer note: Please take a look at: http://vger.kernel.org/~acme/perf/annotate_basic_blocks.png To see the colors. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: David Carrillo-Cisneros <davidcc@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Stephane Eranian <eranian@google.com> [ Moved sym->max_coverage to 'struct annotate', aka symbol__annotate(sym) ] Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-09-06 02:08:12 +07:00
static const char *annotate__address_color(struct block_range *br)
{
double cov = block_range__coverage(br);
if (cov >= 0) {
/* mark red for >75% coverage */
if (cov > 0.75)
return PERF_COLOR_RED;
/* mark dull for <1% coverage */
if (cov < 0.01)
return PERF_COLOR_NORMAL;
}
return PERF_COLOR_MAGENTA;
}
static const char *annotate__asm_color(struct block_range *br)
{
double cov = block_range__coverage(br);
if (cov >= 0) {
/* mark dull for <1% coverage */
if (cov < 0.01)
return PERF_COLOR_NORMAL;
}
return PERF_COLOR_BLUE;
}
static void annotate__branch_printf(struct block_range *br, u64 addr)
{
bool emit_comment = true;
if (!br)
return;
#if 1
if (br->is_target && br->start == addr) {
struct block_range *branch = br;
double p;
/*
* Find matching branch to our target.
*/
while (!branch->is_branch)
branch = block_range__next(branch);
p = 100 *(double)br->entry / branch->coverage;
if (p > 0.1) {
if (emit_comment) {
emit_comment = false;
printf("\t#");
}
/*
* The percentage of coverage joined at this target in relation
* to the next branch.
*/
printf(" +%.2f%%", p);
}
}
#endif
if (br->is_branch && br->end == addr) {
double p = 100*(double)br->taken / br->coverage;
if (p > 0.1) {
if (emit_comment) {
emit_comment = false;
printf("\t#");
}
/*
* The percentage of coverage leaving at this branch, and
* its prediction ratio.
*/
printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred / br->taken);
}
}
}
perf annotate: Align source and offset lines Align source with offset lines, which are more advanced, because of the address column. Before: : static void *worker_thread(void *__tdata) : { 0.00 : 48a971: push %rbp 0.00 : 48a972: mov %rsp,%rbp 0.00 : 48a975: sub $0x30,%rsp 0.00 : 48a979: mov %rdi,-0x28(%rbp) 0.00 : 48a97d: mov %fs:0x28,%rax 0.00 : 48a986: mov %rax,-0x8(%rbp) 0.00 : 48a98a: xor %eax,%eax : struct thread_data *td = __tdata; 0.00 : 48a98c: mov -0x28(%rbp),%rax 0.00 : 48a990: mov %rax,-0x10(%rbp) : int m = 0, i; 0.00 : 48a994: movl $0x0,-0x1c(%rbp) : int ret; : : for (i = 0; i < loops; i++) { 0.00 : 48a99b: movl $0x0,-0x18(%rbp) After: : static void *worker_thread(void *__tdata) : { 0.00 : 48a971: push %rbp 0.00 : 48a972: mov %rsp,%rbp 0.00 : 48a975: sub $0x30,%rsp 0.00 : 48a979: mov %rdi,-0x28(%rbp) 0.00 : 48a97d: mov %fs:0x28,%rax 0.00 : 48a986: mov %rax,-0x8(%rbp) 0.00 : 48a98a: xor %eax,%eax : struct thread_data *td = __tdata; 0.00 : 48a98c: mov -0x28(%rbp),%rax 0.00 : 48a990: mov %rax,-0x10(%rbp) : int m = 0, i; 0.00 : 48a994: movl $0x0,-0x1c(%rbp) : int ret; : : for (i = 0; i < loops; i++) { 0.00 : 48a99b: movl $0x0,-0x18(%rbp) It makes bigger different when displaying script sources, where the comment lines looks oddly shifted from the lines which actually hold code. I'll send script support separately. Committer note: Do not use a fixed column width for the addresses, as kernel ones se more than 10 columns, look at the last offset and get the right width. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: David Ahern <dsahern@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20171011150158.11895-36-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-10-11 22:01:58 +07:00
static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width)
{
s64 offset = dl->al.offset;
const u64 addr = start + offset;
struct block_range *br;
br = block_range__find(addr);
perf annotate: Align source and offset lines Align source with offset lines, which are more advanced, because of the address column. Before: : static void *worker_thread(void *__tdata) : { 0.00 : 48a971: push %rbp 0.00 : 48a972: mov %rsp,%rbp 0.00 : 48a975: sub $0x30,%rsp 0.00 : 48a979: mov %rdi,-0x28(%rbp) 0.00 : 48a97d: mov %fs:0x28,%rax 0.00 : 48a986: mov %rax,-0x8(%rbp) 0.00 : 48a98a: xor %eax,%eax : struct thread_data *td = __tdata; 0.00 : 48a98c: mov -0x28(%rbp),%rax 0.00 : 48a990: mov %rax,-0x10(%rbp) : int m = 0, i; 0.00 : 48a994: movl $0x0,-0x1c(%rbp) : int ret; : : for (i = 0; i < loops; i++) { 0.00 : 48a99b: movl $0x0,-0x18(%rbp) After: : static void *worker_thread(void *__tdata) : { 0.00 : 48a971: push %rbp 0.00 : 48a972: mov %rsp,%rbp 0.00 : 48a975: sub $0x30,%rsp 0.00 : 48a979: mov %rdi,-0x28(%rbp) 0.00 : 48a97d: mov %fs:0x28,%rax 0.00 : 48a986: mov %rax,-0x8(%rbp) 0.00 : 48a98a: xor %eax,%eax : struct thread_data *td = __tdata; 0.00 : 48a98c: mov -0x28(%rbp),%rax 0.00 : 48a990: mov %rax,-0x10(%rbp) : int m = 0, i; 0.00 : 48a994: movl $0x0,-0x1c(%rbp) : int ret; : : for (i = 0; i < loops; i++) { 0.00 : 48a99b: movl $0x0,-0x18(%rbp) It makes bigger different when displaying script sources, where the comment lines looks oddly shifted from the lines which actually hold code. I'll send script support separately. Committer note: Do not use a fixed column width for the addresses, as kernel ones se more than 10 columns, look at the last offset and get the right width. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: David Ahern <dsahern@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20171011150158.11895-36-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-10-11 22:01:58 +07:00
color_fprintf(stdout, annotate__address_color(br), " %*" PRIx64 ":", addr_fmt_width, addr);
color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line);
annotate__branch_printf(br, addr);
return 0;
}
static int
annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start,
struct perf_evsel *evsel, u64 len, int min_pcnt, int printed,
int max_lines, struct annotation_line *queue, int addr_fmt_width,
int percent_type)
{
struct disasm_line *dl = container_of(al, struct disasm_line, al);
static const char *prev_line;
static const char *prev_color;
if (al->offset != -1) {
double max_percent = 0.0;
perf annotate: Add basic support to event group view Add --group option to enable event grouping. When enabled, all the group members information will be shown with the leader so skip non-leader events. It only supports --stdio output currently. Later patches will extend additional features. $ perf annotate --group --stdio ... Percent | Source code & Disassembly of libpthread-2.15.so -------------------------------------------------------------------------------- : : : : Disassembly of section .text: : : 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>: 8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx 0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi 0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax 0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax 3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil 0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax 0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi 0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi 3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx) 0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx) 2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi 0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx) 83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586> 0.00 0.00 0.00 : 387dc0aa81: nop 0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax 0.00 0.00 0.00 : 387dc0aa84: retq ... Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 12:53:25 +07:00
int i, nr_percent = 1;
const char *color;
struct annotation *notes = symbol__annotation(sym);
for (i = 0; i < al->data_nr; i++) {
double percent;
percent = annotation_data__percent(&al->data[i],
percent_type);
if (percent > max_percent)
max_percent = percent;
perf annotate: Add basic support to event group view Add --group option to enable event grouping. When enabled, all the group members information will be shown with the leader so skip non-leader events. It only supports --stdio output currently. Later patches will extend additional features. $ perf annotate --group --stdio ... Percent | Source code & Disassembly of libpthread-2.15.so -------------------------------------------------------------------------------- : : : : Disassembly of section .text: : : 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>: 8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx 0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi 0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax 0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax 3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil 0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax 0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi 0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi 3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx) 0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx) 2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi 0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx) 83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586> 0.00 0.00 0.00 : 387dc0aa81: nop 0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax 0.00 0.00 0.00 : 387dc0aa84: retq ... Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 12:53:25 +07:00
}
if (al->data_nr > nr_percent)
nr_percent = al->data_nr;
perf annotate: Display all available events on --stdio When we perform the following command lines: $ perf record -e "{cycles,branches}" ./div $ perf annotate main --stdio The output shows only the first event, "cycles" and the displaying format is not correct. Percent | Source code & Disassembly of div for cycles (44550 samples) ----------------------------------------------------------------------------------- : : : : Disassembly of section .text: : : 00000000004004b0 <main>: : main(): : : return i; : } : : int main(void) : { 0.00 : 4004b0: push %rbx : int i; : int flag; : volatile double x = 1212121212, y = 121212; : : s_randseed = time(0); 0.00 : 4004b1: xor %edi,%edi : srand(s_randseed); 0.00 : 4004b3: mov $0x77359400,%ebx : : return i; : } The issue is that the value of the 'nr_percent' variable is hardcoded to 1. This patch fixes it. With this patch, the output is: Percent | Source code & Disassembly of div for cycles (44550 samples) ----------------------------------------------------------------------------------- : : : : Disassembly of section .text: : : 00000000004004b0 <main>: : main(): : : return i; : } : : int main(void) : { 0.00 0.00 : 4004b0: push %rbx : int i; : int flag; : volatile double x = 1212121212, y = 121212; : : s_randseed = time(0); 0.00 0.00 : 4004b1: xor %edi,%edi : srand(s_randseed); 0.00 0.00 : 4004b3: mov $0x77359400,%ebx : : return i; : } Signed-off-by: Jin Yao <yao.jin@linux.intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Fixes: f681d593d1ce ("perf annotate: Remove disasm__calc_percent() from disasm_line__print()") Link: http://lkml.kernel.org/r/1525881435-4092-1-git-send-email-yao.jin@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-05-09 22:57:15 +07:00
perf annotate: Add basic support to event group view Add --group option to enable event grouping. When enabled, all the group members information will be shown with the leader so skip non-leader events. It only supports --stdio output currently. Later patches will extend additional features. $ perf annotate --group --stdio ... Percent | Source code & Disassembly of libpthread-2.15.so -------------------------------------------------------------------------------- : : : : Disassembly of section .text: : : 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>: 8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx 0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi 0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax 0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax 3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil 0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax 0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi 0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi 3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx) 0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx) 2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi 0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx) 83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586> 0.00 0.00 0.00 : 387dc0aa81: nop 0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax 0.00 0.00 0.00 : 387dc0aa84: retq ... Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 12:53:25 +07:00
if (max_percent < min_pcnt)
return -1;
if (max_lines && printed >= max_lines)
return 1;
if (queue != NULL) {
list_for_each_entry_from(queue, &notes->src->source, node) {
if (queue == al)
break;
annotation_line__print(queue, sym, start, evsel, len,
0, 0, 1, NULL, addr_fmt_width,
percent_type);
}
}
perf annotate: Add basic support to event group view Add --group option to enable event grouping. When enabled, all the group members information will be shown with the leader so skip non-leader events. It only supports --stdio output currently. Later patches will extend additional features. $ perf annotate --group --stdio ... Percent | Source code & Disassembly of libpthread-2.15.so -------------------------------------------------------------------------------- : : : : Disassembly of section .text: : : 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>: 8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx 0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi 0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax 0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax 3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil 0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax 0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi 0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi 3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx) 0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx) 2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi 0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx) 83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586> 0.00 0.00 0.00 : 387dc0aa81: nop 0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax 0.00 0.00 0.00 : 387dc0aa84: retq ... Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 12:53:25 +07:00
color = get_percent_color(max_percent);
/*
* Also color the filename and line if needed, with
* the same color than the percentage. Don't print it
* twice for close colored addr with the same filename:line
*/
if (al->path) {
if (!prev_line || strcmp(prev_line, al->path)
|| color != prev_color) {
color_fprintf(stdout, color, " %s", al->path);
prev_line = al->path;
prev_color = color;
}
}
perf annotate: Add basic support to event group view Add --group option to enable event grouping. When enabled, all the group members information will be shown with the leader so skip non-leader events. It only supports --stdio output currently. Later patches will extend additional features. $ perf annotate --group --stdio ... Percent | Source code & Disassembly of libpthread-2.15.so -------------------------------------------------------------------------------- : : : : Disassembly of section .text: : : 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>: 8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx 0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi 0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax 0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax 3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil 0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax 0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi 0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi 3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx) 0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx) 2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi 0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx) 83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586> 0.00 0.00 0.00 : 387dc0aa81: nop 0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax 0.00 0.00 0.00 : 387dc0aa84: retq ... Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 12:53:25 +07:00
for (i = 0; i < nr_percent; i++) {
struct annotation_data *data = &al->data[i];
double percent;
percent = annotation_data__percent(data, percent_type);
color = get_percent_color(percent);
if (symbol_conf.show_total_period)
color_fprintf(stdout, color, " %11" PRIu64,
data->he.period);
else if (symbol_conf.show_nr_samples)
color_fprintf(stdout, color, " %7" PRIu64,
data->he.nr_samples);
else
color_fprintf(stdout, color, " %7.2f", percent);
perf annotate: Add basic support to event group view Add --group option to enable event grouping. When enabled, all the group members information will be shown with the leader so skip non-leader events. It only supports --stdio output currently. Later patches will extend additional features. $ perf annotate --group --stdio ... Percent | Source code & Disassembly of libpthread-2.15.so -------------------------------------------------------------------------------- : : : : Disassembly of section .text: : : 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>: 8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx 0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi 0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax 0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax 3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil 0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax 0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi 0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi 3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx) 0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx) 2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi 0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx) 83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586> 0.00 0.00 0.00 : 387dc0aa81: nop 0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax 0.00 0.00 0.00 : 387dc0aa84: retq ... Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 12:53:25 +07:00
}
perf annotate: Align source and offset lines Align source with offset lines, which are more advanced, because of the address column. Before: : static void *worker_thread(void *__tdata) : { 0.00 : 48a971: push %rbp 0.00 : 48a972: mov %rsp,%rbp 0.00 : 48a975: sub $0x30,%rsp 0.00 : 48a979: mov %rdi,-0x28(%rbp) 0.00 : 48a97d: mov %fs:0x28,%rax 0.00 : 48a986: mov %rax,-0x8(%rbp) 0.00 : 48a98a: xor %eax,%eax : struct thread_data *td = __tdata; 0.00 : 48a98c: mov -0x28(%rbp),%rax 0.00 : 48a990: mov %rax,-0x10(%rbp) : int m = 0, i; 0.00 : 48a994: movl $0x0,-0x1c(%rbp) : int ret; : : for (i = 0; i < loops; i++) { 0.00 : 48a99b: movl $0x0,-0x18(%rbp) After: : static void *worker_thread(void *__tdata) : { 0.00 : 48a971: push %rbp 0.00 : 48a972: mov %rsp,%rbp 0.00 : 48a975: sub $0x30,%rsp 0.00 : 48a979: mov %rdi,-0x28(%rbp) 0.00 : 48a97d: mov %fs:0x28,%rax 0.00 : 48a986: mov %rax,-0x8(%rbp) 0.00 : 48a98a: xor %eax,%eax : struct thread_data *td = __tdata; 0.00 : 48a98c: mov -0x28(%rbp),%rax 0.00 : 48a990: mov %rax,-0x10(%rbp) : int m = 0, i; 0.00 : 48a994: movl $0x0,-0x1c(%rbp) : int ret; : : for (i = 0; i < loops; i++) { 0.00 : 48a99b: movl $0x0,-0x18(%rbp) It makes bigger different when displaying script sources, where the comment lines looks oddly shifted from the lines which actually hold code. I'll send script support separately. Committer note: Do not use a fixed column width for the addresses, as kernel ones se more than 10 columns, look at the last offset and get the right width. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: David Ahern <dsahern@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20171011150158.11895-36-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-10-11 22:01:58 +07:00
printf(" : ");
perf annotate: Add branch stack / basic block I wanted to know the hottest path through a function and figured the branch-stack (LBR) information should be able to help out with that. The below uses the branch-stack to create basic blocks and generate statistics from them. from to branch_i * ----> * | | block v * ----> * from to branch_i+1 The blocks are broken down into non-overlapping ranges, while tracking if the start of each range is an entry point and/or the end of a range is a branch. Each block iterates all ranges it covers (while splitting where required to exactly match the block) and increments the 'coverage' count. For the range including the branch we increment the taken counter, as well as the pred counter if flags.predicted. Using these number we can find if an instruction: - had coverage; given by: br->coverage / br->sym->max_coverage This metric ensures each symbol has a 100% spot, which reflects the observation that each symbol must have a most covered/hottest block. - is a branch target: br->is_target && br->start == add - for targets, how much of a branch's coverages comes from it: target->entry / branch->coverage - is a branch: br->is_branch && br->end == addr - for branches, how often it was taken: br->taken / br->coverage after all, all execution that didn't take the branch would have incremented the coverage and continued onward to a later branch. - for branches, how often it was predicted: br->pred / br->taken The coverage percentage is used to color the address and asm sections; for low (<1%) coverage we use NORMAL (uncolored), indicating that these instructions are not 'important'. For high coverage (>75%) we color the address RED. For each branch, we add an asm comment after the instruction with information on how often it was taken and predicted. Output looks like (sans color, which does loose a lot of the information :/) $ perf record --branch-filter u,any -e cycles:p ./branches 27 $ perf annotate branches Percent | Source code & Disassembly of branches for cycles:pu (217 samples) --------------------------------------------------------------------------------- : branches(): 0.00 : 40057a: push %rbp 0.00 : 40057b: mov %rsp,%rbp 0.00 : 40057e: sub $0x20,%rsp 0.00 : 400582: mov %rdi,-0x18(%rbp) 0.00 : 400586: mov %rsi,-0x20(%rbp) 0.00 : 40058a: mov -0x18(%rbp),%rax 0.00 : 40058e: mov %rax,-0x10(%rbp) 0.00 : 400592: movq $0x0,-0x8(%rbp) 0.00 : 40059a: jmpq 400656 <branches+0xdc> 1.84 : 40059f: mov -0x10(%rbp),%rax # +100.00% 3.23 : 4005a3: and $0x1,%eax 1.84 : 4005a6: test %rax,%rax 0.00 : 4005a9: je 4005bf <branches+0x45> # -54.50% (p:42.00%) 0.46 : 4005ab: mov 0x200bbe(%rip),%rax # 601170 <acc> 12.90 : 4005b2: add $0x1,%rax 2.30 : 4005b6: mov %rax,0x200bb3(%rip) # 601170 <acc> 0.46 : 4005bd: jmp 4005d1 <branches+0x57> # -100.00% (p:100.00%) 0.92 : 4005bf: mov 0x200baa(%rip),%rax # 601170 <acc> # +49.54% 13.82 : 4005c6: sub $0x1,%rax 0.46 : 4005ca: mov %rax,0x200b9f(%rip) # 601170 <acc> 2.30 : 4005d1: mov -0x10(%rbp),%rax # +50.46% 0.46 : 4005d5: mov %rax,%rdi 0.46 : 4005d8: callq 400526 <lfsr> # -100.00% (p:100.00%) 0.00 : 4005dd: mov %rax,-0x10(%rbp) # +100.00% 0.92 : 4005e1: mov -0x18(%rbp),%rax 0.00 : 4005e5: and $0x1,%eax 0.00 : 4005e8: test %rax,%rax 0.00 : 4005eb: je 4005ff <branches+0x85> # -100.00% (p:100.00%) 0.00 : 4005ed: mov 0x200b7c(%rip),%rax # 601170 <acc> 0.00 : 4005f4: shr $0x2,%rax 0.00 : 4005f8: mov %rax,0x200b71(%rip) # 601170 <acc> 0.00 : 4005ff: mov -0x10(%rbp),%rax # +100.00% 7.37 : 400603: and $0x1,%eax 3.69 : 400606: test %rax,%rax 0.00 : 400609: jne 400612 <branches+0x98> # -59.25% (p:42.99%) 1.84 : 40060b: mov $0x1,%eax 14.29 : 400610: jmp 400617 <branches+0x9d> # -100.00% (p:100.00%) 1.38 : 400612: mov $0x0,%eax # +57.65% 10.14 : 400617: test %al,%al # +42.35% 0.00 : 400619: je 40062f <branches+0xb5> # -57.65% (p:100.00%) 0.46 : 40061b: mov 0x200b4e(%rip),%rax # 601170 <acc> 2.76 : 400622: sub $0x1,%rax 0.00 : 400626: mov %rax,0x200b43(%rip) # 601170 <acc> 0.46 : 40062d: jmp 400641 <branches+0xc7> # -100.00% (p:100.00%) 0.92 : 40062f: mov 0x200b3a(%rip),%rax # 601170 <acc> # +56.13% 2.30 : 400636: add $0x1,%rax 0.92 : 40063a: mov %rax,0x200b2f(%rip) # 601170 <acc> 0.92 : 400641: mov -0x10(%rbp),%rax # +43.87% 2.30 : 400645: mov %rax,%rdi 0.00 : 400648: callq 400526 <lfsr> # -100.00% (p:100.00%) 0.00 : 40064d: mov %rax,-0x10(%rbp) # +100.00% 1.84 : 400651: addq $0x1,-0x8(%rbp) 0.92 : 400656: mov -0x8(%rbp),%rax 5.07 : 40065a: cmp -0x20(%rbp),%rax 0.00 : 40065e: jb 40059f <branches+0x25> # -100.00% (p:100.00%) 0.00 : 400664: nop 0.00 : 400665: leaveq 0.00 : 400666: retq (Note: the --branch-filter u,any was used to avoid spurious target and branch points due to interrupts/faults, they show up as very small -/+ annotations on 'weird' locations) Committer note: Please take a look at: http://vger.kernel.org/~acme/perf/annotate_basic_blocks.png To see the colors. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: David Carrillo-Cisneros <davidcc@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Stephane Eranian <eranian@google.com> [ Moved sym->max_coverage to 'struct annotate', aka symbol__annotate(sym) ] Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-09-06 02:08:12 +07:00
perf annotate: Align source and offset lines Align source with offset lines, which are more advanced, because of the address column. Before: : static void *worker_thread(void *__tdata) : { 0.00 : 48a971: push %rbp 0.00 : 48a972: mov %rsp,%rbp 0.00 : 48a975: sub $0x30,%rsp 0.00 : 48a979: mov %rdi,-0x28(%rbp) 0.00 : 48a97d: mov %fs:0x28,%rax 0.00 : 48a986: mov %rax,-0x8(%rbp) 0.00 : 48a98a: xor %eax,%eax : struct thread_data *td = __tdata; 0.00 : 48a98c: mov -0x28(%rbp),%rax 0.00 : 48a990: mov %rax,-0x10(%rbp) : int m = 0, i; 0.00 : 48a994: movl $0x0,-0x1c(%rbp) : int ret; : : for (i = 0; i < loops; i++) { 0.00 : 48a99b: movl $0x0,-0x18(%rbp) After: : static void *worker_thread(void *__tdata) : { 0.00 : 48a971: push %rbp 0.00 : 48a972: mov %rsp,%rbp 0.00 : 48a975: sub $0x30,%rsp 0.00 : 48a979: mov %rdi,-0x28(%rbp) 0.00 : 48a97d: mov %fs:0x28,%rax 0.00 : 48a986: mov %rax,-0x8(%rbp) 0.00 : 48a98a: xor %eax,%eax : struct thread_data *td = __tdata; 0.00 : 48a98c: mov -0x28(%rbp),%rax 0.00 : 48a990: mov %rax,-0x10(%rbp) : int m = 0, i; 0.00 : 48a994: movl $0x0,-0x1c(%rbp) : int ret; : : for (i = 0; i < loops; i++) { 0.00 : 48a99b: movl $0x0,-0x18(%rbp) It makes bigger different when displaying script sources, where the comment lines looks oddly shifted from the lines which actually hold code. I'll send script support separately. Committer note: Do not use a fixed column width for the addresses, as kernel ones se more than 10 columns, look at the last offset and get the right width. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: David Ahern <dsahern@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20171011150158.11895-36-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-10-11 22:01:58 +07:00
disasm_line__print(dl, start, addr_fmt_width);
perf annotate: Add branch stack / basic block I wanted to know the hottest path through a function and figured the branch-stack (LBR) information should be able to help out with that. The below uses the branch-stack to create basic blocks and generate statistics from them. from to branch_i * ----> * | | block v * ----> * from to branch_i+1 The blocks are broken down into non-overlapping ranges, while tracking if the start of each range is an entry point and/or the end of a range is a branch. Each block iterates all ranges it covers (while splitting where required to exactly match the block) and increments the 'coverage' count. For the range including the branch we increment the taken counter, as well as the pred counter if flags.predicted. Using these number we can find if an instruction: - had coverage; given by: br->coverage / br->sym->max_coverage This metric ensures each symbol has a 100% spot, which reflects the observation that each symbol must have a most covered/hottest block. - is a branch target: br->is_target && br->start == add - for targets, how much of a branch's coverages comes from it: target->entry / branch->coverage - is a branch: br->is_branch && br->end == addr - for branches, how often it was taken: br->taken / br->coverage after all, all execution that didn't take the branch would have incremented the coverage and continued onward to a later branch. - for branches, how often it was predicted: br->pred / br->taken The coverage percentage is used to color the address and asm sections; for low (<1%) coverage we use NORMAL (uncolored), indicating that these instructions are not 'important'. For high coverage (>75%) we color the address RED. For each branch, we add an asm comment after the instruction with information on how often it was taken and predicted. Output looks like (sans color, which does loose a lot of the information :/) $ perf record --branch-filter u,any -e cycles:p ./branches 27 $ perf annotate branches Percent | Source code & Disassembly of branches for cycles:pu (217 samples) --------------------------------------------------------------------------------- : branches(): 0.00 : 40057a: push %rbp 0.00 : 40057b: mov %rsp,%rbp 0.00 : 40057e: sub $0x20,%rsp 0.00 : 400582: mov %rdi,-0x18(%rbp) 0.00 : 400586: mov %rsi,-0x20(%rbp) 0.00 : 40058a: mov -0x18(%rbp),%rax 0.00 : 40058e: mov %rax,-0x10(%rbp) 0.00 : 400592: movq $0x0,-0x8(%rbp) 0.00 : 40059a: jmpq 400656 <branches+0xdc> 1.84 : 40059f: mov -0x10(%rbp),%rax # +100.00% 3.23 : 4005a3: and $0x1,%eax 1.84 : 4005a6: test %rax,%rax 0.00 : 4005a9: je 4005bf <branches+0x45> # -54.50% (p:42.00%) 0.46 : 4005ab: mov 0x200bbe(%rip),%rax # 601170 <acc> 12.90 : 4005b2: add $0x1,%rax 2.30 : 4005b6: mov %rax,0x200bb3(%rip) # 601170 <acc> 0.46 : 4005bd: jmp 4005d1 <branches+0x57> # -100.00% (p:100.00%) 0.92 : 4005bf: mov 0x200baa(%rip),%rax # 601170 <acc> # +49.54% 13.82 : 4005c6: sub $0x1,%rax 0.46 : 4005ca: mov %rax,0x200b9f(%rip) # 601170 <acc> 2.30 : 4005d1: mov -0x10(%rbp),%rax # +50.46% 0.46 : 4005d5: mov %rax,%rdi 0.46 : 4005d8: callq 400526 <lfsr> # -100.00% (p:100.00%) 0.00 : 4005dd: mov %rax,-0x10(%rbp) # +100.00% 0.92 : 4005e1: mov -0x18(%rbp),%rax 0.00 : 4005e5: and $0x1,%eax 0.00 : 4005e8: test %rax,%rax 0.00 : 4005eb: je 4005ff <branches+0x85> # -100.00% (p:100.00%) 0.00 : 4005ed: mov 0x200b7c(%rip),%rax # 601170 <acc> 0.00 : 4005f4: shr $0x2,%rax 0.00 : 4005f8: mov %rax,0x200b71(%rip) # 601170 <acc> 0.00 : 4005ff: mov -0x10(%rbp),%rax # +100.00% 7.37 : 400603: and $0x1,%eax 3.69 : 400606: test %rax,%rax 0.00 : 400609: jne 400612 <branches+0x98> # -59.25% (p:42.99%) 1.84 : 40060b: mov $0x1,%eax 14.29 : 400610: jmp 400617 <branches+0x9d> # -100.00% (p:100.00%) 1.38 : 400612: mov $0x0,%eax # +57.65% 10.14 : 400617: test %al,%al # +42.35% 0.00 : 400619: je 40062f <branches+0xb5> # -57.65% (p:100.00%) 0.46 : 40061b: mov 0x200b4e(%rip),%rax # 601170 <acc> 2.76 : 400622: sub $0x1,%rax 0.00 : 400626: mov %rax,0x200b43(%rip) # 601170 <acc> 0.46 : 40062d: jmp 400641 <branches+0xc7> # -100.00% (p:100.00%) 0.92 : 40062f: mov 0x200b3a(%rip),%rax # 601170 <acc> # +56.13% 2.30 : 400636: add $0x1,%rax 0.92 : 40063a: mov %rax,0x200b2f(%rip) # 601170 <acc> 0.92 : 400641: mov -0x10(%rbp),%rax # +43.87% 2.30 : 400645: mov %rax,%rdi 0.00 : 400648: callq 400526 <lfsr> # -100.00% (p:100.00%) 0.00 : 40064d: mov %rax,-0x10(%rbp) # +100.00% 1.84 : 400651: addq $0x1,-0x8(%rbp) 0.92 : 400656: mov -0x8(%rbp),%rax 5.07 : 40065a: cmp -0x20(%rbp),%rax 0.00 : 40065e: jb 40059f <branches+0x25> # -100.00% (p:100.00%) 0.00 : 400664: nop 0.00 : 400665: leaveq 0.00 : 400666: retq (Note: the --branch-filter u,any was used to avoid spurious target and branch points due to interrupts/faults, they show up as very small -/+ annotations on 'weird' locations) Committer note: Please take a look at: http://vger.kernel.org/~acme/perf/annotate_basic_blocks.png To see the colors. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: David Carrillo-Cisneros <davidcc@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Stephane Eranian <eranian@google.com> [ Moved sym->max_coverage to 'struct annotate', aka symbol__annotate(sym) ] Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-09-06 02:08:12 +07:00
printf("\n");
} else if (max_lines && printed >= max_lines)
return 1;
else {
int width = symbol_conf.show_total_period ? 12 : 8;
perf annotate: Add basic support to event group view Add --group option to enable event grouping. When enabled, all the group members information will be shown with the leader so skip non-leader events. It only supports --stdio output currently. Later patches will extend additional features. $ perf annotate --group --stdio ... Percent | Source code & Disassembly of libpthread-2.15.so -------------------------------------------------------------------------------- : : : : Disassembly of section .text: : : 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>: 8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx 0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi 0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax 0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax 3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil 0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax 0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi 0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi 3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx) 0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx) 2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi 0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx) 83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586> 0.00 0.00 0.00 : 387dc0aa81: nop 0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax 0.00 0.00 0.00 : 387dc0aa84: retq ... Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 12:53:25 +07:00
if (queue)
return -1;
if (perf_evsel__is_group_event(evsel))
perf annotate: Add basic support to event group view Add --group option to enable event grouping. When enabled, all the group members information will be shown with the leader so skip non-leader events. It only supports --stdio output currently. Later patches will extend additional features. $ perf annotate --group --stdio ... Percent | Source code & Disassembly of libpthread-2.15.so -------------------------------------------------------------------------------- : : : : Disassembly of section .text: : : 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>: 8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx 0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi 0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax 0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax 3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil 0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax 0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi 0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi 3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx) 0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx) 2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi 0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx) 83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586> 0.00 0.00 0.00 : 387dc0aa81: nop 0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax 0.00 0.00 0.00 : 387dc0aa84: retq ... Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 12:53:25 +07:00
width *= evsel->nr_members;
if (!*al->line)
perf annotate: Add basic support to event group view Add --group option to enable event grouping. When enabled, all the group members information will be shown with the leader so skip non-leader events. It only supports --stdio output currently. Later patches will extend additional features. $ perf annotate --group --stdio ... Percent | Source code & Disassembly of libpthread-2.15.so -------------------------------------------------------------------------------- : : : : Disassembly of section .text: : : 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>: 8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx 0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi 0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax 0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax 3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil 0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax 0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi 0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi 3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx) 0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx) 2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi 0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx) 83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586> 0.00 0.00 0.00 : 387dc0aa81: nop 0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax 0.00 0.00 0.00 : 387dc0aa84: retq ... Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 12:53:25 +07:00
printf(" %*s:\n", width, " ");
else
perf annotate: Align source and offset lines Align source with offset lines, which are more advanced, because of the address column. Before: : static void *worker_thread(void *__tdata) : { 0.00 : 48a971: push %rbp 0.00 : 48a972: mov %rsp,%rbp 0.00 : 48a975: sub $0x30,%rsp 0.00 : 48a979: mov %rdi,-0x28(%rbp) 0.00 : 48a97d: mov %fs:0x28,%rax 0.00 : 48a986: mov %rax,-0x8(%rbp) 0.00 : 48a98a: xor %eax,%eax : struct thread_data *td = __tdata; 0.00 : 48a98c: mov -0x28(%rbp),%rax 0.00 : 48a990: mov %rax,-0x10(%rbp) : int m = 0, i; 0.00 : 48a994: movl $0x0,-0x1c(%rbp) : int ret; : : for (i = 0; i < loops; i++) { 0.00 : 48a99b: movl $0x0,-0x18(%rbp) After: : static void *worker_thread(void *__tdata) : { 0.00 : 48a971: push %rbp 0.00 : 48a972: mov %rsp,%rbp 0.00 : 48a975: sub $0x30,%rsp 0.00 : 48a979: mov %rdi,-0x28(%rbp) 0.00 : 48a97d: mov %fs:0x28,%rax 0.00 : 48a986: mov %rax,-0x8(%rbp) 0.00 : 48a98a: xor %eax,%eax : struct thread_data *td = __tdata; 0.00 : 48a98c: mov -0x28(%rbp),%rax 0.00 : 48a990: mov %rax,-0x10(%rbp) : int m = 0, i; 0.00 : 48a994: movl $0x0,-0x1c(%rbp) : int ret; : : for (i = 0; i < loops; i++) { 0.00 : 48a99b: movl $0x0,-0x18(%rbp) It makes bigger different when displaying script sources, where the comment lines looks oddly shifted from the lines which actually hold code. I'll send script support separately. Committer note: Do not use a fixed column width for the addresses, as kernel ones se more than 10 columns, look at the last offset and get the right width. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: David Ahern <dsahern@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20171011150158.11895-36-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-10-11 22:01:58 +07:00
printf(" %*s: %*s %s\n", width, " ", addr_fmt_width, " ", al->line);
}
return 0;
}
/*
* symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw)
* which looks like following
*
* 0000000000415500 <_init>:
* 415500: sub $0x8,%rsp
* 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8>
* 41550b: test %rax,%rax
* 41550e: je 415515 <_init+0x15>
* 415510: callq 416e70 <__gmon_start__@plt>
* 415515: add $0x8,%rsp
* 415519: retq
*
* it will be parsed and saved into struct disasm_line as
* <offset> <name> <ops.raw>
*
* The offset will be a relative offset from the start of the symbol and -1
* means that it's not a disassembly line so should be treated differently.
* The ops.raw part will be parsed further according to type of the instruction.
*/
static int symbol__parse_objdump_line(struct symbol *sym, FILE *file,
struct annotate_args *args,
int *line_nr)
{
struct map *map = args->ms.map;
struct annotation *notes = symbol__annotation(sym);
struct disasm_line *dl;
char *line = NULL, *parsed_line, *tmp, *tmp2;
size_t line_len;
s64 line_ip, offset = -1;
regmatch_t match[2];
if (getline(&line, &line_len, file) < 0)
return -1;
if (!line)
return -1;
line_ip = -1;
parsed_line = rtrim(line);
/* /filename:linenr ? Save line number and ignore. */
if (regexec(&file_lineno, parsed_line, 2, match, 0) == 0) {
*line_nr = atoi(parsed_line + match[1].rm_so);
return 0;
}
tmp = ltrim(parsed_line);
if (*tmp) {
/*
* Parse hexa addresses followed by ':'
*/
line_ip = strtoull(tmp, &tmp2, 16);
if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
line_ip = -1;
}
if (line_ip != -1) {
u64 start = map__rip_2objdump(map, sym->start),
end = map__rip_2objdump(map, sym->end);
offset = line_ip - start;
if ((u64)line_ip < start || (u64)line_ip >= end)
offset = -1;
else
parsed_line = tmp2 + 1;
}
args->offset = offset;
args->line = parsed_line;
args->line_nr = *line_nr;
args->ms.sym = sym;
dl = disasm_line__new(args);
free(line);
(*line_nr)++;
if (dl == NULL)
return -1;
if (!disasm_line__has_local_offset(dl)) {
dl->ops.target.offset = dl->ops.target.addr -
map__rip_2objdump(map, sym->start);
perf annotate: Fix jump target outside of function address range If jump target is outside of function range, perf is not handling it correctly. Especially when target address is lesser than function start address, target offset will be negative. But, target address declared to be unsigned, converts negative number into 2's complement. See below example. Here target of 'jumpq' instruction at 34cf8 is 34ac0 which is lesser than function start address(34cf0). 34ac0 - 34cf0 = -0x230 = 0xfffffffffffffdd0 Objdump output: 0000000000034cf0 <__sigaction>: __GI___sigaction(): 34cf0: lea -0x20(%rdi),%eax 34cf3: cmp -bashx1,%eax 34cf6: jbe 34d00 <__sigaction+0x10> 34cf8: jmpq 34ac0 <__GI___libc_sigaction> 34cfd: nopl (%rax) 34d00: mov 0x386161(%rip),%rax # 3bae68 <_DYNAMIC+0x2e8> 34d07: movl -bashx16,%fs:(%rax) 34d0e: mov -bashxffffffff,%eax 34d13: retq perf annotate before applying patch: __GI___sigaction /usr/lib64/libc-2.22.so lea -0x20(%rdi),%eax cmp -bashx1,%eax v jbe 10 v jmpq fffffffffffffdd0 nop 10: mov _DYNAMIC+0x2e8,%rax movl -bashx16,%fs:(%rax) mov -bashxffffffff,%eax retq perf annotate after applying patch: __GI___sigaction /usr/lib64/libc-2.22.so lea -0x20(%rdi),%eax cmp -bashx1,%eax v jbe 10 ^ jmpq 34ac0 <__GI___libc_sigaction> nop 10: mov _DYNAMIC+0x2e8,%rax movl -bashx16,%fs:(%rax) mov -bashxffffffff,%eax retq Signed-off-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: linuxppc-dev@lists.ozlabs.org Link: http://lkml.kernel.org/r/1480953407-7605-3-git-send-email-ravi.bangoria@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-12-05 22:56:47 +07:00
dl->ops.target.offset_avail = true;
}
/* kcore has no symbols, so add the call target symbol */
if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.sym) {
struct addr_map_symbol target = {
.map = map,
.addr = dl->ops.target.addr,
};
if (!map_groups__find_ams(&target) &&
target.sym->start == target.al_addr)
dl->ops.target.sym = target.sym;
}
annotation_line__add(&dl->al, &notes->src->source);
return 0;
}
static __attribute__((constructor)) void symbol__init_regexpr(void)
{
regcomp(&file_lineno, "^/[^:]+:([0-9]+)", REG_EXTENDED);
}
static void delete_last_nop(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
struct list_head *list = &notes->src->source;
struct disasm_line *dl;
while (!list_empty(list)) {
dl = list_entry(list->prev, struct disasm_line, al.node);
perf annotate: Remove duplicate 'name' field from disasm_line The disasm_line::name field is always equal to ins::name, being used just to locate the instruction's ins_ops from the per-arch instructions table. Eliminate this duplication, nuking that field and instead make ins__find() return an ins_ops, store it in disasm_line::ins.ops, and keep just in disasm_line::ins.name what was in disasm_line::name, this way we end up not keeping a reference to entries in the per-arch instructions table. This in turn will help supporting multiple ways to manage the per-arch instructions table, allowing resorting that array, for instance, when the entries will move after references to its addresses were made. The same problem is avoided when one grows the array with realloc. So architectures simply keeping a constant array will work as well as architectures building the table using regular expressions or other logic that involves resorting the table. Reviewed-by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Chris Riyder <chris.ryder@arm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kim Phillips <kim.phillips@arm.com> Cc: Markus Trippelsdorf <markus@trippelsdorf.de> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Cc: Pawel Moll <pawel.moll@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Cc: Taeung Song <treeze.taeung@gmail.com> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-vr899azvabnw9gtuepuqfd9t@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-11-24 21:16:06 +07:00
if (dl->ins.ops) {
if (dl->ins.ops != &nop_ops)
return;
} else {
if (!strstr(dl->al.line, " nop ") &&
!strstr(dl->al.line, " nopl ") &&
!strstr(dl->al.line, " nopw "))
return;
}
list_del(&dl->al.node);
disasm_line__free(dl);
}
}
int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *map,
int errnum, char *buf, size_t buflen)
{
struct dso *dso = map->dso;
BUG_ON(buflen == 0);
if (errnum >= 0) {
str_error_r(errnum, buf, buflen);
return 0;
}
switch (errnum) {
case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX: {
char bf[SBUILD_ID_SIZE + 15] = " with build id ";
char *build_id_msg = NULL;
if (dso->has_build_id) {
build_id__sprintf(dso->build_id,
sizeof(dso->build_id), bf + 15);
build_id_msg = bf;
}
scnprintf(buf, buflen,
"No vmlinux file%s\nwas found in the path.\n\n"
"Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n"
"Please use:\n\n"
" perf buildid-cache -vu vmlinux\n\n"
"or:\n\n"
" --vmlinux vmlinux\n", build_id_msg ?: "");
}
break;
default:
scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
break;
}
return 0;
}
static int dso__disassemble_filename(struct dso *dso, char *filename, size_t filename_size)
{
char linkname[PATH_MAX];
char *build_id_filename;
char *build_id_path = NULL;
char *pos;
if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
!dso__is_kcore(dso))
return SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX;
build_id_filename = dso__build_id_filename(dso, NULL, 0, false);
if (build_id_filename) {
__symbol__join_symfs(filename, filename_size, build_id_filename);
free(build_id_filename);
} else {
if (dso->has_build_id)
return ENOMEM;
goto fallback;
}
build_id_path = strdup(filename);
if (!build_id_path)
return -1;
/*
* old style build-id cache has name of XX/XXXXXXX.. while
* new style has XX/XXXXXXX../{elf,kallsyms,vdso}.
* extract the build-id part of dirname in the new style only.
*/
pos = strrchr(build_id_path, '/');
if (pos && strlen(pos) < SBUILD_ID_SIZE - 2)
dirname(build_id_path);
if (dso__is_kcore(dso) ||
readlink(build_id_path, linkname, sizeof(linkname)) < 0 ||
strstr(linkname, DSO__NAME_KALLSYMS) ||
access(filename, R_OK)) {
fallback:
/*
* If we don't have build-ids or the build-id file isn't in the
* cache, or is just a kallsyms file, well, lets hope that this
* DSO is the same as when 'perf record' ran.
*/
__symbol__join_symfs(filename, filename_size, dso->long_name);
}
free(build_id_path);
return 0;
}
static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
{
struct annotation_options *opts = args->options;
struct map *map = args->ms.map;
struct dso *dso = map->dso;
perf annotate: Use asprintf when formatting objdump command line We were using a local buffer with an arbitrary size, that would have to get increased to avoid truncation as warned by gcc 8: util/annotate.c: In function 'symbol__disassemble': util/annotate.c:1488:4: error: '%s' directive output may be truncated writing up to 4095 bytes into a region of size between 3966 and 8086 [-Werror=format-truncation=] "%s %s%s --start-address=0x%016" PRIx64 ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ util/annotate.c:1498:20: symfs_filename, symfs_filename); ~~~~~~~~~~~~~~ util/annotate.c:1490:50: note: format string is defined here " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand", ^~ In file included from /usr/include/stdio.h:861, from util/color.h:5, from util/sort.h:8, from util/annotate.c:14: /usr/include/bits/stdio2.h:67:10: note: '__builtin___snprintf_chk' output 116 or more bytes (assuming 8331) into a destination of size 8192 return __builtin___snprintf_chk (__s, __n, __USE_FORTIFY_LEVEL - 1, ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ __bos (__s), __fmt, __va_arg_pack ()); ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ So switch to asprintf, that will make sure enough space is available. Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-qagoy2dmbjpc9gdnaj0r3mml@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-14 20:34:11 +07:00
char *command;
FILE *file;
char symfs_filename[PATH_MAX];
struct kcore_extract kce;
bool delete_extract = false;
bool decomp = false;
int stdout_fd[2];
int lineno = 0;
int nline;
pid_t pid;
int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename));
if (err)
return err;
pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
symfs_filename, sym->name, map->unmap_ip(map, sym->start),
map->unmap_ip(map, sym->end));
pr_debug("annotating [%p] %30s : [%p] %30s\n",
dso, dso->long_name, sym, sym->name);
if (dso__is_kcore(dso)) {
kce.kcore_filename = symfs_filename;
kce.addr = map__rip_2objdump(map, sym->start);
kce.offs = sym->start;
kce.len = sym->end - sym->start;
if (!kcore_extract__create(&kce)) {
delete_extract = true;
strlcpy(symfs_filename, kce.extract_filename,
sizeof(symfs_filename));
}
} else if (dso__needs_decompress(dso)) {
char tmp[KMOD_DECOMP_LEN];
if (dso__decompress_kmodule_path(dso, symfs_filename,
tmp, sizeof(tmp)) < 0)
goto out;
decomp = true;
strcpy(symfs_filename, tmp);
}
perf annotate: Use asprintf when formatting objdump command line We were using a local buffer with an arbitrary size, that would have to get increased to avoid truncation as warned by gcc 8: util/annotate.c: In function 'symbol__disassemble': util/annotate.c:1488:4: error: '%s' directive output may be truncated writing up to 4095 bytes into a region of size between 3966 and 8086 [-Werror=format-truncation=] "%s %s%s --start-address=0x%016" PRIx64 ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ util/annotate.c:1498:20: symfs_filename, symfs_filename); ~~~~~~~~~~~~~~ util/annotate.c:1490:50: note: format string is defined here " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand", ^~ In file included from /usr/include/stdio.h:861, from util/color.h:5, from util/sort.h:8, from util/annotate.c:14: /usr/include/bits/stdio2.h:67:10: note: '__builtin___snprintf_chk' output 116 or more bytes (assuming 8331) into a destination of size 8192 return __builtin___snprintf_chk (__s, __n, __USE_FORTIFY_LEVEL - 1, ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ __bos (__s), __fmt, __va_arg_pack ()); ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ So switch to asprintf, that will make sure enough space is available. Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-qagoy2dmbjpc9gdnaj0r3mml@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-14 20:34:11 +07:00
err = asprintf(&command,
"%s %s%s --start-address=0x%016" PRIx64
" --stop-address=0x%016" PRIx64
" -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
opts->objdump_path ?: "objdump",
opts->disassembler_style ? "-M " : "",
opts->disassembler_style ?: "",
map__rip_2objdump(map, sym->start),
map__rip_2objdump(map, sym->end),
opts->show_asm_raw ? "" : "--no-show-raw",
opts->annotate_src ? "-S" : "",
symfs_filename, symfs_filename);
perf annotate: Use asprintf when formatting objdump command line We were using a local buffer with an arbitrary size, that would have to get increased to avoid truncation as warned by gcc 8: util/annotate.c: In function 'symbol__disassemble': util/annotate.c:1488:4: error: '%s' directive output may be truncated writing up to 4095 bytes into a region of size between 3966 and 8086 [-Werror=format-truncation=] "%s %s%s --start-address=0x%016" PRIx64 ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ util/annotate.c:1498:20: symfs_filename, symfs_filename); ~~~~~~~~~~~~~~ util/annotate.c:1490:50: note: format string is defined here " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand", ^~ In file included from /usr/include/stdio.h:861, from util/color.h:5, from util/sort.h:8, from util/annotate.c:14: /usr/include/bits/stdio2.h:67:10: note: '__builtin___snprintf_chk' output 116 or more bytes (assuming 8331) into a destination of size 8192 return __builtin___snprintf_chk (__s, __n, __USE_FORTIFY_LEVEL - 1, ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ __bos (__s), __fmt, __va_arg_pack ()); ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ So switch to asprintf, that will make sure enough space is available. Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-qagoy2dmbjpc9gdnaj0r3mml@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-14 20:34:11 +07:00
if (err < 0) {
pr_err("Failure allocating memory for the command to run\n");
goto out_remove_tmp;
}
pr_debug("Executing: %s\n", command);
err = -1;
if (pipe(stdout_fd) < 0) {
pr_err("Failure creating the pipe to run %s\n", command);
perf annotate: Use asprintf when formatting objdump command line We were using a local buffer with an arbitrary size, that would have to get increased to avoid truncation as warned by gcc 8: util/annotate.c: In function 'symbol__disassemble': util/annotate.c:1488:4: error: '%s' directive output may be truncated writing up to 4095 bytes into a region of size between 3966 and 8086 [-Werror=format-truncation=] "%s %s%s --start-address=0x%016" PRIx64 ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ util/annotate.c:1498:20: symfs_filename, symfs_filename); ~~~~~~~~~~~~~~ util/annotate.c:1490:50: note: format string is defined here " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand", ^~ In file included from /usr/include/stdio.h:861, from util/color.h:5, from util/sort.h:8, from util/annotate.c:14: /usr/include/bits/stdio2.h:67:10: note: '__builtin___snprintf_chk' output 116 or more bytes (assuming 8331) into a destination of size 8192 return __builtin___snprintf_chk (__s, __n, __USE_FORTIFY_LEVEL - 1, ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ __bos (__s), __fmt, __va_arg_pack ()); ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ So switch to asprintf, that will make sure enough space is available. Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-qagoy2dmbjpc9gdnaj0r3mml@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-14 20:34:11 +07:00
goto out_free_command;
}
pid = fork();
if (pid < 0) {
pr_err("Failure forking to run %s\n", command);
goto out_close_stdout;
}
if (pid == 0) {
close(stdout_fd[0]);
dup2(stdout_fd[1], 1);
close(stdout_fd[1]);
execl("/bin/sh", "sh", "-c", command, NULL);
perror(command);
exit(-1);
}
close(stdout_fd[1]);
file = fdopen(stdout_fd[0], "r");
perf annotate: Inform the user about objdump failures in --stdio When the browser fails to annotate it is difficult for users to find out what went wrong. Add some errors for objdump failures that are displayed in the UI. Note it would be even better to handle these errors smarter, like falling back to the binary when the debug info is somehow corrupted. But for now just giving a better error is an improvement. Committer note: This works for --stdio, where errors just scroll by the screen: # perf annotate --stdio intel_idle Failure running objdump --start-address=0xffffffff81418290 --stop-address=0xffffffff814183ae -l -d --no-show-raw -S -C /root/.debug/.build-id/28/2777c262e6b3c0451375163c9a81c893218ab1 2>/dev/null|grep -v /root/.debug/.build-id/28/2777c262e6b3c0451375163c9a81c893218ab1|expand Percent | Source code & Disassembly of vmlinux for cycles:pp ------------------------------------------------------------------ And with that one can use that command line to try to find out more about what happened instead of getting a blank screen, an improvement. We need tho to improve this further to get it to work with other UIs, like --tui and --gtk, where it continues showing a blank screen, no messages, as the pr_err() used is enough just for --stdio. Signed-off-by: Andi Kleen <ak@linux.intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Link: http://lkml.kernel.org/r/1446779167-18949-1-git-send-email-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-11-06 10:06:07 +07:00
if (!file) {
pr_err("Failure creating FILE stream for %s\n", command);
perf annotate: Inform the user about objdump failures in --stdio When the browser fails to annotate it is difficult for users to find out what went wrong. Add some errors for objdump failures that are displayed in the UI. Note it would be even better to handle these errors smarter, like falling back to the binary when the debug info is somehow corrupted. But for now just giving a better error is an improvement. Committer note: This works for --stdio, where errors just scroll by the screen: # perf annotate --stdio intel_idle Failure running objdump --start-address=0xffffffff81418290 --stop-address=0xffffffff814183ae -l -d --no-show-raw -S -C /root/.debug/.build-id/28/2777c262e6b3c0451375163c9a81c893218ab1 2>/dev/null|grep -v /root/.debug/.build-id/28/2777c262e6b3c0451375163c9a81c893218ab1|expand Percent | Source code & Disassembly of vmlinux for cycles:pp ------------------------------------------------------------------ And with that one can use that command line to try to find out more about what happened instead of getting a blank screen, an improvement. We need tho to improve this further to get it to work with other UIs, like --tui and --gtk, where it continues showing a blank screen, no messages, as the pr_err() used is enough just for --stdio. Signed-off-by: Andi Kleen <ak@linux.intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Link: http://lkml.kernel.org/r/1446779167-18949-1-git-send-email-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-11-06 10:06:07 +07:00
/*
* If we were using debug info should retry with
* original binary.
*/
perf annotate: Use asprintf when formatting objdump command line We were using a local buffer with an arbitrary size, that would have to get increased to avoid truncation as warned by gcc 8: util/annotate.c: In function 'symbol__disassemble': util/annotate.c:1488:4: error: '%s' directive output may be truncated writing up to 4095 bytes into a region of size between 3966 and 8086 [-Werror=format-truncation=] "%s %s%s --start-address=0x%016" PRIx64 ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ util/annotate.c:1498:20: symfs_filename, symfs_filename); ~~~~~~~~~~~~~~ util/annotate.c:1490:50: note: format string is defined here " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand", ^~ In file included from /usr/include/stdio.h:861, from util/color.h:5, from util/sort.h:8, from util/annotate.c:14: /usr/include/bits/stdio2.h:67:10: note: '__builtin___snprintf_chk' output 116 or more bytes (assuming 8331) into a destination of size 8192 return __builtin___snprintf_chk (__s, __n, __USE_FORTIFY_LEVEL - 1, ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ __bos (__s), __fmt, __va_arg_pack ()); ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ So switch to asprintf, that will make sure enough space is available. Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-qagoy2dmbjpc9gdnaj0r3mml@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-14 20:34:11 +07:00
goto out_free_command;
perf annotate: Inform the user about objdump failures in --stdio When the browser fails to annotate it is difficult for users to find out what went wrong. Add some errors for objdump failures that are displayed in the UI. Note it would be even better to handle these errors smarter, like falling back to the binary when the debug info is somehow corrupted. But for now just giving a better error is an improvement. Committer note: This works for --stdio, where errors just scroll by the screen: # perf annotate --stdio intel_idle Failure running objdump --start-address=0xffffffff81418290 --stop-address=0xffffffff814183ae -l -d --no-show-raw -S -C /root/.debug/.build-id/28/2777c262e6b3c0451375163c9a81c893218ab1 2>/dev/null|grep -v /root/.debug/.build-id/28/2777c262e6b3c0451375163c9a81c893218ab1|expand Percent | Source code & Disassembly of vmlinux for cycles:pp ------------------------------------------------------------------ And with that one can use that command line to try to find out more about what happened instead of getting a blank screen, an improvement. We need tho to improve this further to get it to work with other UIs, like --tui and --gtk, where it continues showing a blank screen, no messages, as the pr_err() used is enough just for --stdio. Signed-off-by: Andi Kleen <ak@linux.intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Link: http://lkml.kernel.org/r/1446779167-18949-1-git-send-email-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-11-06 10:06:07 +07:00
}
perf annotate: Inform the user about objdump failures in --stdio When the browser fails to annotate it is difficult for users to find out what went wrong. Add some errors for objdump failures that are displayed in the UI. Note it would be even better to handle these errors smarter, like falling back to the binary when the debug info is somehow corrupted. But for now just giving a better error is an improvement. Committer note: This works for --stdio, where errors just scroll by the screen: # perf annotate --stdio intel_idle Failure running objdump --start-address=0xffffffff81418290 --stop-address=0xffffffff814183ae -l -d --no-show-raw -S -C /root/.debug/.build-id/28/2777c262e6b3c0451375163c9a81c893218ab1 2>/dev/null|grep -v /root/.debug/.build-id/28/2777c262e6b3c0451375163c9a81c893218ab1|expand Percent | Source code & Disassembly of vmlinux for cycles:pp ------------------------------------------------------------------ And with that one can use that command line to try to find out more about what happened instead of getting a blank screen, an improvement. We need tho to improve this further to get it to work with other UIs, like --tui and --gtk, where it continues showing a blank screen, no messages, as the pr_err() used is enough just for --stdio. Signed-off-by: Andi Kleen <ak@linux.intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Link: http://lkml.kernel.org/r/1446779167-18949-1-git-send-email-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-11-06 10:06:07 +07:00
nline = 0;
while (!feof(file)) {
/*
* The source code line number (lineno) needs to be kept in
* accross calls to symbol__parse_objdump_line(), so that it
* can associate it with the instructions till the next one.
* See disasm_line__new() and struct disasm_line::line_nr.
*/
if (symbol__parse_objdump_line(sym, file, args, &lineno) < 0)
break;
perf annotate: Inform the user about objdump failures in --stdio When the browser fails to annotate it is difficult for users to find out what went wrong. Add some errors for objdump failures that are displayed in the UI. Note it would be even better to handle these errors smarter, like falling back to the binary when the debug info is somehow corrupted. But for now just giving a better error is an improvement. Committer note: This works for --stdio, where errors just scroll by the screen: # perf annotate --stdio intel_idle Failure running objdump --start-address=0xffffffff81418290 --stop-address=0xffffffff814183ae -l -d --no-show-raw -S -C /root/.debug/.build-id/28/2777c262e6b3c0451375163c9a81c893218ab1 2>/dev/null|grep -v /root/.debug/.build-id/28/2777c262e6b3c0451375163c9a81c893218ab1|expand Percent | Source code & Disassembly of vmlinux for cycles:pp ------------------------------------------------------------------ And with that one can use that command line to try to find out more about what happened instead of getting a blank screen, an improvement. We need tho to improve this further to get it to work with other UIs, like --tui and --gtk, where it continues showing a blank screen, no messages, as the pr_err() used is enough just for --stdio. Signed-off-by: Andi Kleen <ak@linux.intel.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Link: http://lkml.kernel.org/r/1446779167-18949-1-git-send-email-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-11-06 10:06:07 +07:00
nline++;
}
if (nline == 0)
pr_err("No output from %s\n", command);
/*
* kallsyms does not have symbol sizes so there may a nop at the end.
* Remove it.
*/
if (dso__is_kcore(dso))
delete_last_nop(sym);
fclose(file);
err = 0;
perf annotate: Use asprintf when formatting objdump command line We were using a local buffer with an arbitrary size, that would have to get increased to avoid truncation as warned by gcc 8: util/annotate.c: In function 'symbol__disassemble': util/annotate.c:1488:4: error: '%s' directive output may be truncated writing up to 4095 bytes into a region of size between 3966 and 8086 [-Werror=format-truncation=] "%s %s%s --start-address=0x%016" PRIx64 ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ util/annotate.c:1498:20: symfs_filename, symfs_filename); ~~~~~~~~~~~~~~ util/annotate.c:1490:50: note: format string is defined here " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand", ^~ In file included from /usr/include/stdio.h:861, from util/color.h:5, from util/sort.h:8, from util/annotate.c:14: /usr/include/bits/stdio2.h:67:10: note: '__builtin___snprintf_chk' output 116 or more bytes (assuming 8331) into a destination of size 8192 return __builtin___snprintf_chk (__s, __n, __USE_FORTIFY_LEVEL - 1, ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ __bos (__s), __fmt, __va_arg_pack ()); ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ So switch to asprintf, that will make sure enough space is available. Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-qagoy2dmbjpc9gdnaj0r3mml@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-14 20:34:11 +07:00
out_free_command:
free(command);
out_remove_tmp:
close(stdout_fd[0]);
if (decomp)
unlink(symfs_filename);
if (delete_extract)
kcore_extract__delete(&kce);
out:
return err;
out_close_stdout:
close(stdout_fd[1]);
perf annotate: Use asprintf when formatting objdump command line We were using a local buffer with an arbitrary size, that would have to get increased to avoid truncation as warned by gcc 8: util/annotate.c: In function 'symbol__disassemble': util/annotate.c:1488:4: error: '%s' directive output may be truncated writing up to 4095 bytes into a region of size between 3966 and 8086 [-Werror=format-truncation=] "%s %s%s --start-address=0x%016" PRIx64 ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ util/annotate.c:1498:20: symfs_filename, symfs_filename); ~~~~~~~~~~~~~~ util/annotate.c:1490:50: note: format string is defined here " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand", ^~ In file included from /usr/include/stdio.h:861, from util/color.h:5, from util/sort.h:8, from util/annotate.c:14: /usr/include/bits/stdio2.h:67:10: note: '__builtin___snprintf_chk' output 116 or more bytes (assuming 8331) into a destination of size 8192 return __builtin___snprintf_chk (__s, __n, __USE_FORTIFY_LEVEL - 1, ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ __bos (__s), __fmt, __va_arg_pack ()); ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ So switch to asprintf, that will make sure enough space is available. Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-qagoy2dmbjpc9gdnaj0r3mml@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-03-14 20:34:11 +07:00
goto out_free_command;
}
static void calc_percent(struct sym_hist *sym_hist,
struct hists *hists,
struct annotation_data *data,
s64 offset, s64 end)
{
unsigned int hits = 0;
u64 period = 0;
while (offset < end) {
hits += sym_hist->addr[offset].nr_samples;
period += sym_hist->addr[offset].period;
++offset;
}
if (sym_hist->nr_samples) {
data->he.period = period;
data->he.nr_samples = hits;
data->percent[PERCENT_HITS_LOCAL] = 100.0 * hits / sym_hist->nr_samples;
}
if (hists->stats.nr_non_filtered_samples)
data->percent[PERCENT_HITS_GLOBAL] = 100.0 * hits / hists->stats.nr_non_filtered_samples;
if (sym_hist->period)
data->percent[PERCENT_PERIOD_LOCAL] = 100.0 * period / sym_hist->period;
if (hists->stats.total_period)
data->percent[PERCENT_PERIOD_GLOBAL] = 100.0 * period / hists->stats.total_period;
}
static void annotation__calc_percent(struct annotation *notes,
struct perf_evsel *leader, s64 len)
{
struct annotation_line *al, *next;
struct perf_evsel *evsel;
list_for_each_entry(al, &notes->src->source, node) {
s64 end;
int i = 0;
if (al->offset == -1)
continue;
next = annotation_line__next(al, &notes->src->source);
end = next ? next->offset : len;
for_each_group_evsel(evsel, leader) {
struct hists *hists = evsel__hists(evsel);
struct annotation_data *data;
struct sym_hist *sym_hist;
BUG_ON(i >= al->data_nr);
sym_hist = annotation__histogram(notes, evsel->idx);
data = &al->data[i++];
calc_percent(sym_hist, hists, data, al->offset, end);
}
}
}
void symbol__calc_percent(struct symbol *sym, struct perf_evsel *evsel)
{
struct annotation *notes = symbol__annotation(sym);
annotation__calc_percent(notes, evsel, symbol__size(sym));
}
int symbol__annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, size_t privsize,
struct annotation_options *options,
struct arch **parch)
{
struct annotate_args args = {
.privsize = privsize,
.evsel = evsel,
.options = options,
};
struct perf_env *env = perf_evsel__env(evsel);
const char *arch_name = perf_env__arch(env);
struct arch *arch;
int err;
if (!arch_name)
return -1;
args.arch = arch = arch__find(arch_name);
if (arch == NULL)
return -ENOTSUP;
if (parch)
*parch = arch;
if (arch->init) {
err = arch->init(arch, env ? env->cpuid : NULL);
if (err) {
pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
return err;
}
}
args.ms.map = map;
args.ms.sym = sym;
return symbol__disassemble(sym, &args);
}
static void insert_source_line(struct rb_root *root, struct annotation_line *al,
struct annotation_options *opts)
{
struct annotation_line *iter;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
int i, ret;
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct annotation_line, rb_node);
ret = strcmp(iter->path, al->path);
if (ret == 0) {
for (i = 0; i < al->data_nr; i++) {
iter->data[i].percent_sum += annotation_data__percent(&al->data[i],
opts->percent_type);
}
return;
}
if (ret < 0)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
for (i = 0; i < al->data_nr; i++) {
al->data[i].percent_sum = annotation_data__percent(&al->data[i],
opts->percent_type);
}
rb_link_node(&al->rb_node, parent, p);
rb_insert_color(&al->rb_node, root);
}
static int cmp_source_line(struct annotation_line *a, struct annotation_line *b)
{
int i;
for (i = 0; i < a->data_nr; i++) {
if (a->data[i].percent_sum == b->data[i].percent_sum)
continue;
return a->data[i].percent_sum > b->data[i].percent_sum;
}
return 0;
}
static void __resort_source_line(struct rb_root *root, struct annotation_line *al)
{
struct annotation_line *iter;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct annotation_line, rb_node);
if (cmp_source_line(al, iter))
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&al->rb_node, parent, p);
rb_insert_color(&al->rb_node, root);
}
static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
{
struct annotation_line *al;
struct rb_node *node;
node = rb_first(src_root);
while (node) {
struct rb_node *next;
al = rb_entry(node, struct annotation_line, rb_node);
next = rb_next(node);
rb_erase(node, src_root);
__resort_source_line(dest_root, al);
node = next;
}
}
static void print_summary(struct rb_root *root, const char *filename)
{
struct annotation_line *al;
struct rb_node *node;
printf("\nSorted summary for file %s\n", filename);
printf("----------------------------------------------\n\n");
if (RB_EMPTY_ROOT(root)) {
printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
return;
}
node = rb_first(root);
while (node) {
double percent, percent_max = 0.0;
const char *color;
char *path;
int i;
al = rb_entry(node, struct annotation_line, rb_node);
for (i = 0; i < al->data_nr; i++) {
percent = al->data[i].percent_sum;
color = get_percent_color(percent);
color_fprintf(stdout, color, " %7.2f", percent);
if (percent > percent_max)
percent_max = percent;
}
path = al->path;
color = get_percent_color(percent_max);
color_fprintf(stdout, color, " %s\n", path);
node = rb_next(node);
}
}
static void symbol__annotate_hits(struct symbol *sym, struct perf_evsel *evsel)
{
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evsel->idx);
u64 len = symbol__size(sym), offset;
for (offset = 0; offset < len; ++offset)
if (h->addr[offset].nr_samples != 0)
printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
sym->start + offset, h->addr[offset].nr_samples);
printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples);
}
perf annotate: Align source and offset lines Align source with offset lines, which are more advanced, because of the address column. Before: : static void *worker_thread(void *__tdata) : { 0.00 : 48a971: push %rbp 0.00 : 48a972: mov %rsp,%rbp 0.00 : 48a975: sub $0x30,%rsp 0.00 : 48a979: mov %rdi,-0x28(%rbp) 0.00 : 48a97d: mov %fs:0x28,%rax 0.00 : 48a986: mov %rax,-0x8(%rbp) 0.00 : 48a98a: xor %eax,%eax : struct thread_data *td = __tdata; 0.00 : 48a98c: mov -0x28(%rbp),%rax 0.00 : 48a990: mov %rax,-0x10(%rbp) : int m = 0, i; 0.00 : 48a994: movl $0x0,-0x1c(%rbp) : int ret; : : for (i = 0; i < loops; i++) { 0.00 : 48a99b: movl $0x0,-0x18(%rbp) After: : static void *worker_thread(void *__tdata) : { 0.00 : 48a971: push %rbp 0.00 : 48a972: mov %rsp,%rbp 0.00 : 48a975: sub $0x30,%rsp 0.00 : 48a979: mov %rdi,-0x28(%rbp) 0.00 : 48a97d: mov %fs:0x28,%rax 0.00 : 48a986: mov %rax,-0x8(%rbp) 0.00 : 48a98a: xor %eax,%eax : struct thread_data *td = __tdata; 0.00 : 48a98c: mov -0x28(%rbp),%rax 0.00 : 48a990: mov %rax,-0x10(%rbp) : int m = 0, i; 0.00 : 48a994: movl $0x0,-0x1c(%rbp) : int ret; : : for (i = 0; i < loops; i++) { 0.00 : 48a99b: movl $0x0,-0x18(%rbp) It makes bigger different when displaying script sources, where the comment lines looks oddly shifted from the lines which actually hold code. I'll send script support separately. Committer note: Do not use a fixed column width for the addresses, as kernel ones se more than 10 columns, look at the last offset and get the right width. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: David Ahern <dsahern@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20171011150158.11895-36-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-10-11 22:01:58 +07:00
static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
{
char bf[32];
struct annotation_line *line;
list_for_each_entry_reverse(line, lines, node) {
if (line->offset != -1)
return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset);
}
return 0;
}
int symbol__annotate_printf(struct symbol *sym, struct map *map,
struct perf_evsel *evsel,
struct annotation_options *opts)
{
struct dso *dso = map->dso;
char *filename;
const char *d_filename;
const char *evsel_name = perf_evsel__name(evsel);
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evsel->idx);
struct annotation_line *pos, *queue = NULL;
u64 start = map__rip_2objdump(map, sym->start);
perf annotate: Align source and offset lines Align source with offset lines, which are more advanced, because of the address column. Before: : static void *worker_thread(void *__tdata) : { 0.00 : 48a971: push %rbp 0.00 : 48a972: mov %rsp,%rbp 0.00 : 48a975: sub $0x30,%rsp 0.00 : 48a979: mov %rdi,-0x28(%rbp) 0.00 : 48a97d: mov %fs:0x28,%rax 0.00 : 48a986: mov %rax,-0x8(%rbp) 0.00 : 48a98a: xor %eax,%eax : struct thread_data *td = __tdata; 0.00 : 48a98c: mov -0x28(%rbp),%rax 0.00 : 48a990: mov %rax,-0x10(%rbp) : int m = 0, i; 0.00 : 48a994: movl $0x0,-0x1c(%rbp) : int ret; : : for (i = 0; i < loops; i++) { 0.00 : 48a99b: movl $0x0,-0x18(%rbp) After: : static void *worker_thread(void *__tdata) : { 0.00 : 48a971: push %rbp 0.00 : 48a972: mov %rsp,%rbp 0.00 : 48a975: sub $0x30,%rsp 0.00 : 48a979: mov %rdi,-0x28(%rbp) 0.00 : 48a97d: mov %fs:0x28,%rax 0.00 : 48a986: mov %rax,-0x8(%rbp) 0.00 : 48a98a: xor %eax,%eax : struct thread_data *td = __tdata; 0.00 : 48a98c: mov -0x28(%rbp),%rax 0.00 : 48a990: mov %rax,-0x10(%rbp) : int m = 0, i; 0.00 : 48a994: movl $0x0,-0x1c(%rbp) : int ret; : : for (i = 0; i < loops; i++) { 0.00 : 48a99b: movl $0x0,-0x18(%rbp) It makes bigger different when displaying script sources, where the comment lines looks oddly shifted from the lines which actually hold code. I'll send script support separately. Committer note: Do not use a fixed column width for the addresses, as kernel ones se more than 10 columns, look at the last offset and get the right width. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: David Ahern <dsahern@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20171011150158.11895-36-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-10-11 22:01:58 +07:00
int printed = 2, queue_len = 0, addr_fmt_width;
int more = 0;
bool context = opts->context;
u64 len;
int width = symbol_conf.show_total_period ? 12 : 8;
int graph_dotted_len;
char buf[512];
filename = strdup(dso->long_name);
if (!filename)
return -ENOMEM;
if (opts->full_path)
d_filename = filename;
else
d_filename = basename(filename);
len = symbol__size(sym);
perf annotate: Add basic support to event group view Add --group option to enable event grouping. When enabled, all the group members information will be shown with the leader so skip non-leader events. It only supports --stdio output currently. Later patches will extend additional features. $ perf annotate --group --stdio ... Percent | Source code & Disassembly of libpthread-2.15.so -------------------------------------------------------------------------------- : : : : Disassembly of section .text: : : 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>: 8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx 0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi 0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax 0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax 3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil 0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax 0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi 0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi 3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx) 0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx) 2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi 0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx) 83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586> 0.00 0.00 0.00 : 387dc0aa81: nop 0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax 0.00 0.00 0.00 : 387dc0aa84: retq ... Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 12:53:25 +07:00
if (perf_evsel__is_group_event(evsel)) {
perf annotate: Add basic support to event group view Add --group option to enable event grouping. When enabled, all the group members information will be shown with the leader so skip non-leader events. It only supports --stdio output currently. Later patches will extend additional features. $ perf annotate --group --stdio ... Percent | Source code & Disassembly of libpthread-2.15.so -------------------------------------------------------------------------------- : : : : Disassembly of section .text: : : 000000387dc0aa50 <__pthread_mutex_unlock_usercnt>: 8.08 2.40 5.29 : 387dc0aa50: mov %rdi,%rdx 0.00 0.00 0.00 : 387dc0aa53: mov 0x10(%rdi),%edi 0.00 0.00 0.00 : 387dc0aa56: mov %edi,%eax 0.00 0.80 0.00 : 387dc0aa58: and $0x7f,%eax 3.03 2.40 3.53 : 387dc0aa5b: test $0x7c,%dil 0.00 0.00 0.00 : 387dc0aa5f: jne 387dc0aaa9 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa61: test %eax,%eax 0.00 0.00 0.00 : 387dc0aa63: jne 387dc0aa85 <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa65: and $0x80,%edi 0.00 0.00 0.00 : 387dc0aa6b: test %esi,%esi 3.03 5.60 7.06 : 387dc0aa6d: movl $0x0,0x8(%rdx) 0.00 0.00 0.59 : 387dc0aa74: je 387dc0aa7a <__pthread_mutex_unlock_use 0.00 0.00 0.00 : 387dc0aa76: subl $0x1,0xc(%rdx) 2.02 5.60 1.18 : 387dc0aa7a: mov %edi,%esi 0.00 0.00 0.00 : 387dc0aa7c: lock decl (%rdx) 83.84 83.20 82.35 : 387dc0aa7f: jne 387dc0aada <_L_unlock_586> 0.00 0.00 0.00 : 387dc0aa81: nop 0.00 0.00 0.00 : 387dc0aa82: xor %eax,%eax 0.00 0.00 0.00 : 387dc0aa84: retq ... Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1362462812-30885-6-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-03-05 12:53:25 +07:00
width *= evsel->nr_members;
perf_evsel__group_desc(evsel, buf, sizeof(buf));
evsel_name = buf;
}
graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples, "
"percent: %s)\n",
width, width, symbol_conf.show_total_period ? "Period" :
symbol_conf.show_nr_samples ? "Samples" : "Percent",
d_filename, evsel_name, h->nr_samples,
percent_type_str(opts->percent_type));
printf("%-*.*s----\n",
graph_dotted_len, graph_dotted_len, graph_dotted_line);
if (verbose > 0)
symbol__annotate_hits(sym, evsel);
perf annotate: Align source and offset lines Align source with offset lines, which are more advanced, because of the address column. Before: : static void *worker_thread(void *__tdata) : { 0.00 : 48a971: push %rbp 0.00 : 48a972: mov %rsp,%rbp 0.00 : 48a975: sub $0x30,%rsp 0.00 : 48a979: mov %rdi,-0x28(%rbp) 0.00 : 48a97d: mov %fs:0x28,%rax 0.00 : 48a986: mov %rax,-0x8(%rbp) 0.00 : 48a98a: xor %eax,%eax : struct thread_data *td = __tdata; 0.00 : 48a98c: mov -0x28(%rbp),%rax 0.00 : 48a990: mov %rax,-0x10(%rbp) : int m = 0, i; 0.00 : 48a994: movl $0x0,-0x1c(%rbp) : int ret; : : for (i = 0; i < loops; i++) { 0.00 : 48a99b: movl $0x0,-0x18(%rbp) After: : static void *worker_thread(void *__tdata) : { 0.00 : 48a971: push %rbp 0.00 : 48a972: mov %rsp,%rbp 0.00 : 48a975: sub $0x30,%rsp 0.00 : 48a979: mov %rdi,-0x28(%rbp) 0.00 : 48a97d: mov %fs:0x28,%rax 0.00 : 48a986: mov %rax,-0x8(%rbp) 0.00 : 48a98a: xor %eax,%eax : struct thread_data *td = __tdata; 0.00 : 48a98c: mov -0x28(%rbp),%rax 0.00 : 48a990: mov %rax,-0x10(%rbp) : int m = 0, i; 0.00 : 48a994: movl $0x0,-0x1c(%rbp) : int ret; : : for (i = 0; i < loops; i++) { 0.00 : 48a99b: movl $0x0,-0x18(%rbp) It makes bigger different when displaying script sources, where the comment lines looks oddly shifted from the lines which actually hold code. I'll send script support separately. Committer note: Do not use a fixed column width for the addresses, as kernel ones se more than 10 columns, look at the last offset and get the right width. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: David Ahern <dsahern@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20171011150158.11895-36-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-10-11 22:01:58 +07:00
addr_fmt_width = annotated_source__addr_fmt_width(&notes->src->source, start);
list_for_each_entry(pos, &notes->src->source, node) {
int err;
if (context && queue == NULL) {
queue = pos;
queue_len = 0;
}
err = annotation_line__print(pos, sym, start, evsel, len,
opts->min_pcnt, printed, opts->max_lines,
queue, addr_fmt_width, opts->percent_type);
switch (err) {
case 0:
++printed;
if (context) {
printed += queue_len;
queue = NULL;
queue_len = 0;
}
break;
case 1:
/* filtered by max_lines */
++more;
break;
case -1:
default:
/*
* Filtered by min_pcnt or non IP lines when
* context != 0
*/
if (!context)
break;
if (queue_len == context)
queue = list_entry(queue->node.next, typeof(*queue), node);
else
++queue_len;
break;
}
}
free(filename);
return more;
}
static void FILE__set_percent_color(void *fp __maybe_unused,
double percent __maybe_unused,
bool current __maybe_unused)
{
}
static int FILE__set_jumps_percent_color(void *fp __maybe_unused,
int nr __maybe_unused, bool current __maybe_unused)
{
return 0;
}
static int FILE__set_color(void *fp __maybe_unused, int color __maybe_unused)
{
return 0;
}
static void FILE__printf(void *fp, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
vfprintf(fp, fmt, args);
va_end(args);
}
static void FILE__write_graph(void *fp, int graph)
{
const char *s;
switch (graph) {
case DARROW_CHAR: s = ""; break;
case UARROW_CHAR: s = ""; break;
case LARROW_CHAR: s = ""; break;
case RARROW_CHAR: s = ""; break;
default: s = "?"; break;
}
fputs(s, fp);
}
static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp,
struct annotation_options *opts)
{
struct annotation *notes = symbol__annotation(sym);
struct annotation_write_ops wops = {
.first_line = true,
.obj = fp,
.set_color = FILE__set_color,
.set_percent_color = FILE__set_percent_color,
.set_jumps_percent_color = FILE__set_jumps_percent_color,
.printf = FILE__printf,
.write_graph = FILE__write_graph,
};
struct annotation_line *al;
list_for_each_entry(al, &notes->src->source, node) {
if (annotation_line__filter(al, notes))
continue;
annotation_line__write(al, notes, &wops, opts);
fputc('\n', fp);
wops.first_line = false;
}
return 0;
}
int map_symbol__annotation_dump(struct map_symbol *ms, struct perf_evsel *evsel,
struct annotation_options *opts)
{
const char *ev_name = perf_evsel__name(evsel);
char buf[1024];
char *filename;
int err = -1;
FILE *fp;
if (asprintf(&filename, "%s.annotation", ms->sym->name) < 0)
return -1;
fp = fopen(filename, "w");
if (fp == NULL)
goto out_free_filename;
if (perf_evsel__is_group_event(evsel)) {
perf_evsel__group_desc(evsel, buf, sizeof(buf));
ev_name = buf;
}
fprintf(fp, "%s() %s\nEvent: %s\n\n",
ms->sym->name, ms->map->dso->long_name, ev_name);
symbol__annotate_fprintf2(ms->sym, fp, opts);
fclose(fp);
err = 0;
out_free_filename:
free(filename);
return err;
}
void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
{
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evidx);
memset(h, 0, notes->src->sizeof_sym_hist);
}
void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
{
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evidx);
int len = symbol__size(sym), offset;
h->nr_samples = 0;
for (offset = 0; offset < len; ++offset) {
h->addr[offset].nr_samples = h->addr[offset].nr_samples * 7 / 8;
h->nr_samples += h->addr[offset].nr_samples;
}
}
void annotated_source__purge(struct annotated_source *as)
{
struct annotation_line *al, *n;
list_for_each_entry_safe(al, n, &as->source, node) {
list_del(&al->node);
disasm_line__free(disasm_line(al));
}
}
static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
{
size_t printed;
if (dl->al.offset == -1)
return fprintf(fp, "%s\n", dl->al.line);
printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name);
if (dl->ops.raw[0] != '\0') {
printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
dl->ops.raw);
}
return printed + fprintf(fp, "\n");
}
size_t disasm__fprintf(struct list_head *head, FILE *fp)
{
struct disasm_line *pos;
size_t printed = 0;
list_for_each_entry(pos, head, al.node)
printed += disasm_line__fprintf(pos, fp);
return printed;
}
bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym)
{
if (!dl || !dl->ins.ops || !ins__is_jump(&dl->ins) ||
!disasm_line__has_local_offset(dl) || dl->ops.target.offset < 0 ||
dl->ops.target.offset >= (s64)symbol__size(sym))
return false;
return true;
}
void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
{
u64 offset, size = symbol__size(sym);
/* PLT symbols contain external offsets */
if (strstr(sym->name, "@plt"))
return;
for (offset = 0; offset < size; ++offset) {
struct annotation_line *al = notes->offsets[offset];
struct disasm_line *dl;
dl = disasm_line(al);
if (!disasm_line__is_valid_local_jump(dl, sym))
continue;
al = notes->offsets[dl->ops.target.offset];
/*
* FIXME: Oops, no jump target? Buggy disassembler? Or do we
* have to adjust to the previous offset?
*/
if (al == NULL)
continue;
if (++al->jump_sources > notes->max_jump_sources)
notes->max_jump_sources = al->jump_sources;
++notes->nr_jumps;
}
}
void annotation__set_offsets(struct annotation *notes, s64 size)
{
struct annotation_line *al;
notes->max_line_len = 0;
list_for_each_entry(al, &notes->src->source, node) {
size_t line_len = strlen(al->line);
if (notes->max_line_len < line_len)
notes->max_line_len = line_len;
al->idx = notes->nr_entries++;
if (al->offset != -1) {
al->idx_asm = notes->nr_asm_entries++;
/*
* FIXME: short term bandaid to cope with assembly
* routines that comes with labels in the same column
* as the address in objdump, sigh.
*
* E.g. copy_user_generic_unrolled
*/
if (al->offset < size)
notes->offsets[al->offset] = al;
} else
al->idx_asm = -1;
}
}
static inline int width_jumps(int n)
{
if (n >= 100)
return 5;
if (n / 10)
return 2;
return 1;
}
void annotation__init_column_widths(struct annotation *notes, struct symbol *sym)
{
notes->widths.addr = notes->widths.target =
notes->widths.min_addr = hex_width(symbol__size(sym));
notes->widths.max_addr = hex_width(sym->end);
notes->widths.jumps = width_jumps(notes->max_jump_sources);
}
void annotation__update_column_widths(struct annotation *notes)
{
if (notes->options->use_offset)
notes->widths.target = notes->widths.min_addr;
else
notes->widths.target = notes->widths.max_addr;
notes->widths.addr = notes->widths.target;
if (notes->options->show_nr_jumps)
notes->widths.addr += notes->widths.jumps + 1;
}
static void annotation__calc_lines(struct annotation *notes, struct map *map,
struct rb_root *root,
struct annotation_options *opts)
{
struct annotation_line *al;
struct rb_root tmp_root = RB_ROOT;
list_for_each_entry(al, &notes->src->source, node) {
double percent_max = 0.0;
int i;
for (i = 0; i < al->data_nr; i++) {
double percent;
percent = annotation_data__percent(&al->data[i],
opts->percent_type);
if (percent > percent_max)
percent_max = percent;
}
if (percent_max <= 0.5)
continue;
al->path = get_srcline(map->dso, notes->start + al->offset, NULL,
false, true, notes->start + al->offset);
insert_source_line(&tmp_root, al, opts);
}
resort_source_line(root, &tmp_root);
}
static void symbol__calc_lines(struct symbol *sym, struct map *map,
struct rb_root *root,
struct annotation_options *opts)
{
struct annotation *notes = symbol__annotation(sym);
annotation__calc_lines(notes, map, root, opts);
}
int symbol__tty_annotate2(struct symbol *sym, struct map *map,
struct perf_evsel *evsel,
struct annotation_options *opts)
{
struct dso *dso = map->dso;
struct rb_root source_line = RB_ROOT;
struct hists *hists = evsel__hists(evsel);
char buf[1024];
if (symbol__annotate2(sym, map, evsel, opts, NULL) < 0)
return -1;
if (opts->print_lines) {
srcline_full_filename = opts->full_path;
symbol__calc_lines(sym, map, &source_line, opts);
print_summary(&source_line, dso->long_name);
}
hists__scnprintf_title(hists, buf, sizeof(buf));
fprintf(stdout, "%s, [percent: %s]\n%s() %s\n",
buf, percent_type_str(opts->percent_type), sym->name, dso->long_name);
symbol__annotate_fprintf2(sym, stdout, opts);
annotated_source__purge(symbol__annotation(sym)->src);
return 0;
}
int symbol__tty_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel,
struct annotation_options *opts)
{
struct dso *dso = map->dso;
struct rb_root source_line = RB_ROOT;
if (symbol__annotate(sym, map, evsel, 0, opts, NULL) < 0)
return -1;
symbol__calc_percent(sym, evsel);
if (opts->print_lines) {
srcline_full_filename = opts->full_path;
symbol__calc_lines(sym, map, &source_line, opts);
print_summary(&source_line, dso->long_name);
}
symbol__annotate_printf(sym, map, evsel, opts);
annotated_source__purge(symbol__annotation(sym)->src);
return 0;
}
bool ui__has_annotation(void)
{
return use_browser == 1 && perf_hpp_list.sym;
}
static double annotation_line__max_percent(struct annotation_line *al,
struct annotation *notes,
unsigned int percent_type)
{
double percent_max = 0.0;
int i;
for (i = 0; i < notes->nr_events; i++) {
double percent;
percent = annotation_data__percent(&al->data[i],
percent_type);
if (percent > percent_max)
percent_max = percent;
}
return percent_max;
}
static void disasm_line__write(struct disasm_line *dl, struct annotation *notes,
void *obj, char *bf, size_t size,
void (*obj__printf)(void *obj, const char *fmt, ...),
void (*obj__write_graph)(void *obj, int graph))
{
if (dl->ins.ops && dl->ins.ops->scnprintf) {
if (ins__is_jump(&dl->ins)) {
bool fwd;
if (dl->ops.target.outside)
goto call_like;
fwd = dl->ops.target.offset > dl->al.offset;
obj__write_graph(obj, fwd ? DARROW_CHAR : UARROW_CHAR);
obj__printf(obj, " ");
} else if (ins__is_call(&dl->ins)) {
call_like:
obj__write_graph(obj, RARROW_CHAR);
obj__printf(obj, " ");
} else if (ins__is_ret(&dl->ins)) {
obj__write_graph(obj, LARROW_CHAR);
obj__printf(obj, " ");
} else {
obj__printf(obj, " ");
}
} else {
obj__printf(obj, " ");
}
disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset);
}
static void __annotation_line__write(struct annotation_line *al, struct annotation *notes,
bool first_line, bool current_entry, bool change_color, int width,
void *obj, unsigned int percent_type,
int (*obj__set_color)(void *obj, int color),
void (*obj__set_percent_color)(void *obj, double percent, bool current),
int (*obj__set_jumps_percent_color)(void *obj, int nr, bool current),
void (*obj__printf)(void *obj, const char *fmt, ...),
void (*obj__write_graph)(void *obj, int graph))
{
double percent_max = annotation_line__max_percent(al, notes, percent_type);
int pcnt_width = annotation__pcnt_width(notes),
cycles_width = annotation__cycles_width(notes);
bool show_title = false;
char bf[256];
int printed;
if (first_line && (al->offset == -1 || percent_max == 0.0)) {
if (notes->have_cycles) {
if (al->ipc == 0.0 && al->cycles == 0)
show_title = true;
} else
show_title = true;
}
if (al->offset != -1 && percent_max != 0.0) {
int i;
for (i = 0; i < notes->nr_events; i++) {
double percent;
percent = annotation_data__percent(&al->data[i], percent_type);
obj__set_percent_color(obj, percent, current_entry);
if (notes->options->show_total_period) {
obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period);
} else if (notes->options->show_nr_samples) {
obj__printf(obj, "%6" PRIu64 " ",
al->data[i].he.nr_samples);
} else {
obj__printf(obj, "%6.2f ", percent);
}
}
} else {
obj__set_percent_color(obj, 0, current_entry);
if (!show_title)
obj__printf(obj, "%-*s", pcnt_width, " ");
else {
obj__printf(obj, "%-*s", pcnt_width,
notes->options->show_total_period ? "Period" :
notes->options->show_nr_samples ? "Samples" : "Percent");
}
}
if (notes->have_cycles) {
if (al->ipc)
obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->ipc);
else if (!show_title)
obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " ");
else
obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC");
perf annotate: Create hotkey 'c' to show min/max cycles In the 'perf annotate' view, a new hotkey 'c' is created for showing the min/max cycles. For example, when press 'c', the annotate view is: Percent│ IPC Cycle(min/max) │ │ │ Disassembly of section .text: │ │ 000000000003aab0 <random@@GLIBC_2.2.5>: 8.22 │3.92 sub $0x18,%rsp │3.92 mov $0x1,%esi │3.92 xor %eax,%eax │3.92 cmpl $0x0,argp_program_version_hook@@G │3.92 1(2/1) ↓ je 20 │ lock cmpxchg %esi,__abort_msg@@GLIBC_P │ ↓ jne 29 │ ↓ jmp 43 │1.10 20: cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+ 8.93 │1.10 1(5/1) ↓ je 43 When press 'c' again, the annotate view is switched back: Percent│ IPC Cycle │ │ │ Disassembly of section .text: │ │ 000000000003aab0 <random@@GLIBC_2.2.5>: 8.22 │3.92 sub $0x18,%rsp │3.92 mov $0x1,%esi │3.92 xor %eax,%eax │3.92 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x │3.92 1 ↓ je 20 │ lock cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0 │ ↓ jne 29 │ ↓ jmp 43 │1.10 20: cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0 8.93 │1.10 1 ↓ je 43 Signed-off-by: Jin Yao <yao.jin@linux.intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1526569118-14217-3-git-send-email-yao.jin@linux.intel.com [ Rename all maxmin to minmax ] Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-05-17 21:58:38 +07:00
if (!notes->options->show_minmax_cycle) {
if (al->cycles)
obj__printf(obj, "%*" PRIu64 " ",
ANNOTATION__CYCLES_WIDTH - 1, al->cycles);
perf annotate: Create hotkey 'c' to show min/max cycles In the 'perf annotate' view, a new hotkey 'c' is created for showing the min/max cycles. For example, when press 'c', the annotate view is: Percent│ IPC Cycle(min/max) │ │ │ Disassembly of section .text: │ │ 000000000003aab0 <random@@GLIBC_2.2.5>: 8.22 │3.92 sub $0x18,%rsp │3.92 mov $0x1,%esi │3.92 xor %eax,%eax │3.92 cmpl $0x0,argp_program_version_hook@@G │3.92 1(2/1) ↓ je 20 │ lock cmpxchg %esi,__abort_msg@@GLIBC_P │ ↓ jne 29 │ ↓ jmp 43 │1.10 20: cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+ 8.93 │1.10 1(5/1) ↓ je 43 When press 'c' again, the annotate view is switched back: Percent│ IPC Cycle │ │ │ Disassembly of section .text: │ │ 000000000003aab0 <random@@GLIBC_2.2.5>: 8.22 │3.92 sub $0x18,%rsp │3.92 mov $0x1,%esi │3.92 xor %eax,%eax │3.92 cmpl $0x0,argp_program_version_hook@@GLIBC_2.2.5+0x │3.92 1 ↓ je 20 │ lock cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0 │ ↓ jne 29 │ ↓ jmp 43 │1.10 20: cmpxchg %esi,__abort_msg@@GLIBC_PRIVATE+0x8a0 8.93 │1.10 1 ↓ je 43 Signed-off-by: Jin Yao <yao.jin@linux.intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1526569118-14217-3-git-send-email-yao.jin@linux.intel.com [ Rename all maxmin to minmax ] Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-05-17 21:58:38 +07:00
else if (!show_title)
obj__printf(obj, "%*s",
ANNOTATION__CYCLES_WIDTH, " ");
else
obj__printf(obj, "%*s ",
ANNOTATION__CYCLES_WIDTH - 1,
"Cycle");
} else {
if (al->cycles) {
char str[32];
scnprintf(str, sizeof(str),
"%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")",
al->cycles, al->cycles_min,
al->cycles_max);
obj__printf(obj, "%*s ",
ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
str);
} else if (!show_title)
obj__printf(obj, "%*s",
ANNOTATION__MINMAX_CYCLES_WIDTH,
" ");
else
obj__printf(obj, "%*s ",
ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
"Cycle(min/max)");
}
}
obj__printf(obj, " ");
if (!*al->line)
obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " ");
else if (al->offset == -1) {
if (al->line_nr && notes->options->show_linenr)
printed = scnprintf(bf, sizeof(bf), "%-*d ", notes->widths.addr + 1, al->line_nr);
else
printed = scnprintf(bf, sizeof(bf), "%-*s ", notes->widths.addr, " ");
obj__printf(obj, bf);
obj__printf(obj, "%-*s", width - printed - pcnt_width - cycles_width + 1, al->line);
} else {
u64 addr = al->offset;
int color = -1;
if (!notes->options->use_offset)
addr += notes->start;
if (!notes->options->use_offset) {
printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
} else {
if (al->jump_sources &&
notes->options->offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
if (notes->options->show_nr_jumps) {
int prev;
printed = scnprintf(bf, sizeof(bf), "%*d ",
notes->widths.jumps,
al->jump_sources);
prev = obj__set_jumps_percent_color(obj, al->jump_sources,
current_entry);
obj__printf(obj, bf);
obj__set_color(obj, prev);
}
print_addr:
printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
notes->widths.target, addr);
} else if (ins__is_call(&disasm_line(al)->ins) &&
notes->options->offset_level >= ANNOTATION__OFFSET_CALL) {
goto print_addr;
} else if (notes->options->offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
goto print_addr;
} else {
printed = scnprintf(bf, sizeof(bf), "%-*s ",
notes->widths.addr, " ");
}
}
if (change_color)
color = obj__set_color(obj, HE_COLORSET_ADDR);
obj__printf(obj, bf);
if (change_color)
obj__set_color(obj, color);
disasm_line__write(disasm_line(al), notes, obj, bf, sizeof(bf), obj__printf, obj__write_graph);
obj__printf(obj, "%-*s", width - pcnt_width - cycles_width - 3 - printed, bf);
}
}
void annotation_line__write(struct annotation_line *al, struct annotation *notes,
struct annotation_write_ops *wops,
struct annotation_options *opts)
{
__annotation_line__write(al, notes, wops->first_line, wops->current_entry,
wops->change_color, wops->width, wops->obj,
opts->percent_type,
wops->set_color, wops->set_percent_color,
wops->set_jumps_percent_color, wops->printf,
wops->write_graph);
}
int symbol__annotate2(struct symbol *sym, struct map *map, struct perf_evsel *evsel,
struct annotation_options *options, struct arch **parch)
{
struct annotation *notes = symbol__annotation(sym);
size_t size = symbol__size(sym);
int nr_pcnt = 1, err;
notes->offsets = zalloc(size * sizeof(struct annotation_line *));
if (notes->offsets == NULL)
return -1;
if (perf_evsel__is_group_event(evsel))
nr_pcnt = evsel->nr_members;
err = symbol__annotate(sym, map, evsel, 0, options, parch);
if (err)
goto out_free_offsets;
notes->options = options;
symbol__calc_percent(sym, evsel);
notes->start = map__rip_2objdump(map, sym->start);
annotation__set_offsets(notes, size);
annotation__mark_jump_targets(notes, sym);
annotation__compute_ipc(notes, size);
annotation__init_column_widths(notes, sym);
notes->nr_events = nr_pcnt;
annotation__update_column_widths(notes);
return 0;
out_free_offsets:
zfree(&notes->offsets);
return -1;
}
#define ANNOTATION__CFG(n) \
{ .name = #n, .value = &annotation__default_options.n, }
/*
* Keep the entries sorted, they are bsearch'ed
*/
static struct annotation_config {
const char *name;
perf annotate: Allow setting the offset level in .perfconfig The default is 1 (jump_target): # perf annotate --ignore-vmlinux --stdio2 _raw_spin_lock_irqsave Samples: 3K of event 'cycles:ppp', 3000 Hz, Event count (approx.): 2766398574 _raw_spin_lock_irqsave() /proc/kcore 0.26 nop 4.61 push %rbx 19.33 pushfq 7.97 pop %rax 0.32 nop 0.06 mov %rax,%rbx 14.63 cli 0.06 nop xor %eax,%eax mov $0x1,%edx 49.94 lock cmpxchg %edx,(%rdi) 0.16 test %eax,%eax ↓ jne 2b 2.66 mov %rbx,%rax pop %rbx ← retq 2b: mov %eax,%esi → callq *ffffffffb30eaed0 mov %rbx,%rax pop %rbx ← retq # But one can ask for showing offsets for call instructions by setting this: # perf annotate --ignore-vmlinux --stdio2 _raw_spin_lock_irqsave Samples: 3K of event 'cycles:ppp', 3000 Hz, Event count (approx.): 2766398574 _raw_spin_lock_irqsave() /proc/kcore 0.26 nop 4.61 push %rbx 19.33 pushfq 7.97 pop %rax 0.32 nop 0.06 mov %rax,%rbx 14.63 cli 0.06 nop xor %eax,%eax mov $0x1,%edx 49.94 lock cmpxchg %edx,(%rdi) 0.16 test %eax,%eax ↓ jne 2b 2.66 mov %rbx,%rax pop %rbx ← retq 2b: mov %eax,%esi 2d: → callq *ffffffffb30eaed0 mov %rbx,%rax pop %rbx ← retq # Or using a big value to ask for all offsets to be shown: # cat ~/.perfconfig [annotate] offset_level = 100 hide_src_code = true # perf annotate --ignore-vmlinux --stdio2 _raw_spin_lock_irqsave Samples: 3K of event 'cycles:ppp', 3000 Hz, Event count (approx.): 2766398574 _raw_spin_lock_irqsave() /proc/kcore 0.26 0: nop 4.61 5: push %rbx 19.33 6: pushfq 7.97 7: pop %rax 0.32 8: nop 0.06 d: mov %rax,%rbx 14.63 10: cli 0.06 11: nop 17: xor %eax,%eax 19: mov $0x1,%edx 49.94 1e: lock cmpxchg %edx,(%rdi) 0.16 22: test %eax,%eax 24: ↓ jne 2b 2.66 26: mov %rbx,%rax 29: pop %rbx 2a: ← retq 2b: mov %eax,%esi 2d: → callq *ffffffffb30eaed0 32: mov %rbx,%rax 35: pop %rbx 36: ← retq # This also affects the TUI, i.e. the default 'perf annotate' and 'perf top/report' -> A hotkey -> annotate interfaces, when slang-devel is present in the build, i.e.: # perf version --build-options | grep slang libslang: [ on ] # HAVE_SLANG_SUPPORT # Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Martin Liška <mliska@suse.cz> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Thomas Richter <tmricht@linux.vnet.ibm.com> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-venm6x5zrt40eu8hxdsmqxz6@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-04-13 01:23:02 +07:00
void *value;
} annotation__configs[] = {
ANNOTATION__CFG(hide_src_code),
ANNOTATION__CFG(jump_arrows),
perf annotate: Allow setting the offset level in .perfconfig The default is 1 (jump_target): # perf annotate --ignore-vmlinux --stdio2 _raw_spin_lock_irqsave Samples: 3K of event 'cycles:ppp', 3000 Hz, Event count (approx.): 2766398574 _raw_spin_lock_irqsave() /proc/kcore 0.26 nop 4.61 push %rbx 19.33 pushfq 7.97 pop %rax 0.32 nop 0.06 mov %rax,%rbx 14.63 cli 0.06 nop xor %eax,%eax mov $0x1,%edx 49.94 lock cmpxchg %edx,(%rdi) 0.16 test %eax,%eax ↓ jne 2b 2.66 mov %rbx,%rax pop %rbx ← retq 2b: mov %eax,%esi → callq *ffffffffb30eaed0 mov %rbx,%rax pop %rbx ← retq # But one can ask for showing offsets for call instructions by setting this: # perf annotate --ignore-vmlinux --stdio2 _raw_spin_lock_irqsave Samples: 3K of event 'cycles:ppp', 3000 Hz, Event count (approx.): 2766398574 _raw_spin_lock_irqsave() /proc/kcore 0.26 nop 4.61 push %rbx 19.33 pushfq 7.97 pop %rax 0.32 nop 0.06 mov %rax,%rbx 14.63 cli 0.06 nop xor %eax,%eax mov $0x1,%edx 49.94 lock cmpxchg %edx,(%rdi) 0.16 test %eax,%eax ↓ jne 2b 2.66 mov %rbx,%rax pop %rbx ← retq 2b: mov %eax,%esi 2d: → callq *ffffffffb30eaed0 mov %rbx,%rax pop %rbx ← retq # Or using a big value to ask for all offsets to be shown: # cat ~/.perfconfig [annotate] offset_level = 100 hide_src_code = true # perf annotate --ignore-vmlinux --stdio2 _raw_spin_lock_irqsave Samples: 3K of event 'cycles:ppp', 3000 Hz, Event count (approx.): 2766398574 _raw_spin_lock_irqsave() /proc/kcore 0.26 0: nop 4.61 5: push %rbx 19.33 6: pushfq 7.97 7: pop %rax 0.32 8: nop 0.06 d: mov %rax,%rbx 14.63 10: cli 0.06 11: nop 17: xor %eax,%eax 19: mov $0x1,%edx 49.94 1e: lock cmpxchg %edx,(%rdi) 0.16 22: test %eax,%eax 24: ↓ jne 2b 2.66 26: mov %rbx,%rax 29: pop %rbx 2a: ← retq 2b: mov %eax,%esi 2d: → callq *ffffffffb30eaed0 32: mov %rbx,%rax 35: pop %rbx 36: ← retq # This also affects the TUI, i.e. the default 'perf annotate' and 'perf top/report' -> A hotkey -> annotate interfaces, when slang-devel is present in the build, i.e.: # perf version --build-options | grep slang libslang: [ on ] # HAVE_SLANG_SUPPORT # Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Martin Liška <mliska@suse.cz> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Thomas Richter <tmricht@linux.vnet.ibm.com> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-venm6x5zrt40eu8hxdsmqxz6@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-04-13 01:23:02 +07:00
ANNOTATION__CFG(offset_level),
ANNOTATION__CFG(show_linenr),
ANNOTATION__CFG(show_nr_jumps),
ANNOTATION__CFG(show_nr_samples),
ANNOTATION__CFG(show_total_period),
ANNOTATION__CFG(use_offset),
};
#undef ANNOTATION__CFG
static int annotation_config__cmp(const void *name, const void *cfgp)
{
const struct annotation_config *cfg = cfgp;
return strcmp(name, cfg->name);
}
static int annotation__config(const char *var, const char *value,
void *data __maybe_unused)
{
struct annotation_config *cfg;
const char *name;
if (!strstarts(var, "annotate."))
return 0;
name = var + 9;
cfg = bsearch(name, annotation__configs, ARRAY_SIZE(annotation__configs),
sizeof(struct annotation_config), annotation_config__cmp);
if (cfg == NULL)
pr_debug("%s variable unknown, ignoring...", var);
perf annotate: Allow setting the offset level in .perfconfig The default is 1 (jump_target): # perf annotate --ignore-vmlinux --stdio2 _raw_spin_lock_irqsave Samples: 3K of event 'cycles:ppp', 3000 Hz, Event count (approx.): 2766398574 _raw_spin_lock_irqsave() /proc/kcore 0.26 nop 4.61 push %rbx 19.33 pushfq 7.97 pop %rax 0.32 nop 0.06 mov %rax,%rbx 14.63 cli 0.06 nop xor %eax,%eax mov $0x1,%edx 49.94 lock cmpxchg %edx,(%rdi) 0.16 test %eax,%eax ↓ jne 2b 2.66 mov %rbx,%rax pop %rbx ← retq 2b: mov %eax,%esi → callq *ffffffffb30eaed0 mov %rbx,%rax pop %rbx ← retq # But one can ask for showing offsets for call instructions by setting this: # perf annotate --ignore-vmlinux --stdio2 _raw_spin_lock_irqsave Samples: 3K of event 'cycles:ppp', 3000 Hz, Event count (approx.): 2766398574 _raw_spin_lock_irqsave() /proc/kcore 0.26 nop 4.61 push %rbx 19.33 pushfq 7.97 pop %rax 0.32 nop 0.06 mov %rax,%rbx 14.63 cli 0.06 nop xor %eax,%eax mov $0x1,%edx 49.94 lock cmpxchg %edx,(%rdi) 0.16 test %eax,%eax ↓ jne 2b 2.66 mov %rbx,%rax pop %rbx ← retq 2b: mov %eax,%esi 2d: → callq *ffffffffb30eaed0 mov %rbx,%rax pop %rbx ← retq # Or using a big value to ask for all offsets to be shown: # cat ~/.perfconfig [annotate] offset_level = 100 hide_src_code = true # perf annotate --ignore-vmlinux --stdio2 _raw_spin_lock_irqsave Samples: 3K of event 'cycles:ppp', 3000 Hz, Event count (approx.): 2766398574 _raw_spin_lock_irqsave() /proc/kcore 0.26 0: nop 4.61 5: push %rbx 19.33 6: pushfq 7.97 7: pop %rax 0.32 8: nop 0.06 d: mov %rax,%rbx 14.63 10: cli 0.06 11: nop 17: xor %eax,%eax 19: mov $0x1,%edx 49.94 1e: lock cmpxchg %edx,(%rdi) 0.16 22: test %eax,%eax 24: ↓ jne 2b 2.66 26: mov %rbx,%rax 29: pop %rbx 2a: ← retq 2b: mov %eax,%esi 2d: → callq *ffffffffb30eaed0 32: mov %rbx,%rax 35: pop %rbx 36: ← retq # This also affects the TUI, i.e. the default 'perf annotate' and 'perf top/report' -> A hotkey -> annotate interfaces, when slang-devel is present in the build, i.e.: # perf version --build-options | grep slang libslang: [ on ] # HAVE_SLANG_SUPPORT # Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Martin Liška <mliska@suse.cz> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> Cc: Thomas Richter <tmricht@linux.vnet.ibm.com> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-venm6x5zrt40eu8hxdsmqxz6@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2018-04-13 01:23:02 +07:00
else if (strcmp(var, "annotate.offset_level") == 0) {
perf_config_int(cfg->value, name, value);
if (*(int *)cfg->value > ANNOTATION__MAX_OFFSET_LEVEL)
*(int *)cfg->value = ANNOTATION__MAX_OFFSET_LEVEL;
else if (*(int *)cfg->value < ANNOTATION__MIN_OFFSET_LEVEL)
*(int *)cfg->value = ANNOTATION__MIN_OFFSET_LEVEL;
} else {
*(bool *)cfg->value = perf_config_bool(name, value);
}
return 0;
}
void annotation_config__init(void)
{
perf_config(annotation__config, NULL);
annotation__default_options.show_total_period = symbol_conf.show_total_period;
annotation__default_options.show_nr_samples = symbol_conf.show_nr_samples;
}
static unsigned int parse_percent_type(char *str1, char *str2)
{
unsigned int type = (unsigned int) -1;
if (!strcmp("period", str1)) {
if (!strcmp("local", str2))
type = PERCENT_PERIOD_LOCAL;
else if (!strcmp("global", str2))
type = PERCENT_PERIOD_GLOBAL;
}
if (!strcmp("hits", str1)) {
if (!strcmp("local", str2))
type = PERCENT_HITS_LOCAL;
else if (!strcmp("global", str2))
type = PERCENT_HITS_GLOBAL;
}
return type;
}
int annotate_parse_percent_type(const struct option *opt, const char *_str,
int unset __maybe_unused)
{
struct annotation_options *opts = opt->value;
unsigned int type;
char *str1, *str2;
int err = -1;
str1 = strdup(_str);
if (!str1)
return -ENOMEM;
str2 = strchr(str1, '-');
if (!str2)
goto out;
*str2++ = 0;
type = parse_percent_type(str1, str2);
if (type == (unsigned int) -1)
type = parse_percent_type(str2, str1);
if (type != (unsigned int) -1) {
opts->percent_type = type;
err = 0;
}
out:
free(str1);
return err;
}