linux_dsm_epyc7002/tools/perf/util/env.c
Jan Stancek da8a58b56c perf tools: Replace _SC_NPROCESSORS_CONF with max_present_cpu in cpu_topology_map
There are 2 problems wrt. cpu_topology_map on systems with sparse CPUs:

1. offline/absent CPUs will have their socket_id and core_id set to -1
   which triggers:
   "socket_id number is too big.You may need to upgrade the perf tool."

2. size of cpu_topology_map (perf_env.cpu[]) is allocated based on
   _SC_NPROCESSORS_CONF, but can be indexed with CPU ids going above.
   Users of perf_env.cpu[] are using CPU id as index. This can lead
   to read beyond what was allocated:
   ==19991== Invalid read of size 4
   ==19991==    at 0x490CEB: check_cpu_topology (topology.c:69)
   ==19991==    by 0x490CEB: test_session_topology (topology.c:106)
   ...

For example:
  _SC_NPROCESSORS_CONF == 16
  available: 2 nodes (0-1)
  node 0 cpus: 0 6 8 10 16 22 24 26
  node 0 size: 12004 MB
  node 0 free: 9470 MB
  node 1 cpus: 1 7 9 11 23 25 27
  node 1 size: 12093 MB
  node 1 free: 9406 MB
  node distances:
  node   0   1
    0:  10  20
    1:  20  10

This patch changes HEADER_NRCPUS.nr_cpus_available from _SC_NPROCESSORS_CONF
to max_present_cpu and updates any user of cpu_topology_map to iterate
with nr_cpus_avail.

As a consequence HEADER_CPU_TOPOLOGY core_id and socket_id lists get longer,
but maintain compatibility with pre-patch state - index to cpu_topology_map is
CPU id.

  perf test 36 -v
  36: Session topology                           :
  --- start ---
  test child forked, pid 22211
  templ file: /tmp/perf-test-gmdX5i
  CPU 0, core 0, socket 0
  CPU 1, core 0, socket 1
  CPU 6, core 10, socket 0
  CPU 7, core 10, socket 1
  CPU 8, core 1, socket 0
  CPU 9, core 1, socket 1
  CPU 10, core 9, socket 0
  CPU 11, core 9, socket 1
  CPU 16, core 0, socket 0
  CPU 22, core 10, socket 0
  CPU 23, core 10, socket 1
  CPU 24, core 1, socket 0
  CPU 25, core 1, socket 1
  CPU 26, core 9, socket 0
  CPU 27, core 9, socket 1
  test child finished with 0
  ---- end ----
  Session topology: Ok

Signed-off-by: Jan Stancek <jstancek@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/d7c05c6445fca74a8442c2c73cfffd349c52c44f.1487146877.git.jstancek@redhat.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-02-17 12:56:35 -03:00

94 lines
1.8 KiB
C

#include "cpumap.h"
#include "env.h"
#include "util.h"
struct perf_env perf_env;
void perf_env__exit(struct perf_env *env)
{
int i;
zfree(&env->hostname);
zfree(&env->os_release);
zfree(&env->version);
zfree(&env->arch);
zfree(&env->cpu_desc);
zfree(&env->cpuid);
zfree(&env->cmdline);
zfree(&env->cmdline_argv);
zfree(&env->sibling_cores);
zfree(&env->sibling_threads);
zfree(&env->pmu_mappings);
zfree(&env->cpu);
for (i = 0; i < env->nr_numa_nodes; i++)
cpu_map__put(env->numa_nodes[i].map);
zfree(&env->numa_nodes);
for (i = 0; i < env->caches_cnt; i++)
cpu_cache_level__free(&env->caches[i]);
zfree(&env->caches);
}
int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
{
int i;
/* do not include NULL termination */
env->cmdline_argv = calloc(argc, sizeof(char *));
if (env->cmdline_argv == NULL)
goto out_enomem;
/*
* Must copy argv contents because it gets moved around during option
* parsing:
*/
for (i = 0; i < argc ; i++) {
env->cmdline_argv[i] = argv[i];
if (env->cmdline_argv[i] == NULL)
goto out_free;
}
env->nr_cmdline = argc;
return 0;
out_free:
zfree(&env->cmdline_argv);
out_enomem:
return -ENOMEM;
}
int perf_env__read_cpu_topology_map(struct perf_env *env)
{
int cpu, nr_cpus;
if (env->cpu != NULL)
return 0;
if (env->nr_cpus_avail == 0)
env->nr_cpus_avail = cpu__max_present_cpu();
nr_cpus = env->nr_cpus_avail;
if (nr_cpus == -1)
return -EINVAL;
env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
if (env->cpu == NULL)
return -ENOMEM;
for (cpu = 0; cpu < nr_cpus; ++cpu) {
env->cpu[cpu].core_id = cpu_map__get_core_id(cpu);
env->cpu[cpu].socket_id = cpu_map__get_socket_id(cpu);
}
env->nr_cpus_avail = nr_cpus;
return 0;
}
void cpu_cache_level__free(struct cpu_cache_level *cache)
{
free(cache->type);
free(cache->map);
free(cache->size);
}