perf tools: Use perf_cpu_map__nr instead of cpu_map__nr
authorJiri Olsa <jolsa@kernel.org>
Thu, 22 Aug 2019 11:11:38 +0000 (13:11 +0200)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Thu, 22 Aug 2019 14:14:54 +0000 (11:14 -0300)
Switch the rest of the perf code to use libperf's perf_cpu_map__nr(),
which is the same as current cpu_map__nr() and remove the cpu_map__nr()
function.

Link: http://lkml.kernel.org/n/tip-6e0guy75clis7nm0xpuz9fga@git.kernel.org
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20190822111141.25823-3-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/arch/arm/util/cs-etm.c
tools/perf/util/cpumap.h
tools/perf/util/evlist.c
tools/perf/util/mmap.c
tools/perf/util/stat-display.c

index 5cb07e8cb296b49139268875163e9de82fabb514..c786ab095d15e85723d49e7e69c9a43fea54df74 100644 (file)
@@ -653,7 +653,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
                cpu_map = online_cpus;
        } else {
                /* Make sure all specified CPUs are online */
-               for (i = 0; i < cpu_map__nr(event_cpus); i++) {
+               for (i = 0; i < perf_cpu_map__nr(event_cpus); i++) {
                        if (cpu_map__has(event_cpus, i) &&
                            !cpu_map__has(online_cpus, i))
                                return -EINVAL;
@@ -662,7 +662,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
                cpu_map = event_cpus;
        }
 
-       nr_cpu = cpu_map__nr(cpu_map);
+       nr_cpu = perf_cpu_map__nr(cpu_map);
        /* Get PMU type as dynamically assigned by the core */
        type = cs_etm_pmu->type;
 
index a3d27f4131be96cbd871886c145a01425bf38735..77f85e9c88d42b3a0df0595467ef4aa743be7ce5 100644 (file)
@@ -49,11 +49,6 @@ static inline int cpu_map__id_to_cpu(int id)
        return id & 0xffff;
 }
 
-static inline int cpu_map__nr(const struct perf_cpu_map *map)
-{
-       return map ? map->nr : 1;
-}
-
 static inline bool cpu_map__empty(const struct perf_cpu_map *map)
 {
        return map ? map->map[0] == -1 : true;
index c4489a1ad6bc89b4778bbf3709f7a24aa3230459..15d1046014d78eee3ad1a355f6e0ac0da970b242 100644 (file)
@@ -370,7 +370,7 @@ static int perf_evlist__enable_event_thread(struct evlist *evlist,
                                            int thread)
 {
        int cpu;
-       int nr_cpus = cpu_map__nr(evlist->core.cpus);
+       int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
 
        if (!evsel->core.fd)
                return -EINVAL;
@@ -396,7 +396,7 @@ int perf_evlist__enable_event_idx(struct evlist *evlist,
 
 int perf_evlist__alloc_pollfd(struct evlist *evlist)
 {
-       int nr_cpus = cpu_map__nr(evlist->core.cpus);
+       int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
        int nr_threads = thread_map__nr(evlist->core.threads);
        int nfds = 0;
        struct evsel *evsel;
@@ -692,7 +692,7 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct evlist *evlist,
        int i;
        struct perf_mmap *map;
 
-       evlist->nr_mmaps = cpu_map__nr(evlist->core.cpus);
+       evlist->nr_mmaps = perf_cpu_map__nr(evlist->core.cpus);
        if (cpu_map__empty(evlist->core.cpus))
                evlist->nr_mmaps = thread_map__nr(evlist->core.threads);
        map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
@@ -807,7 +807,7 @@ static int perf_evlist__mmap_per_cpu(struct evlist *evlist,
                                     struct mmap_params *mp)
 {
        int cpu, thread;
-       int nr_cpus = cpu_map__nr(evlist->core.cpus);
+       int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
        int nr_threads = thread_map__nr(evlist->core.threads);
 
        pr_debug2("perf event ring buffer mmapped per cpu\n");
@@ -1014,7 +1014,7 @@ int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
        evlist__for_each_entry(evlist, evsel) {
                if ((evsel->core.attr.read_format & PERF_FORMAT_ID) &&
                    evsel->sample_id == NULL &&
-                   perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
+                   perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
                        return -ENOMEM;
        }
 
index 42a5971146aed7d685e367a291d2eab7772e9a22..5f3532e51ec99d2b50bd249b4a3bef4d2f9b553e 100644 (file)
@@ -331,7 +331,7 @@ static void build_node_mask(int node, cpu_set_t *mask)
        if (!cpu_map)
                return;
 
-       nr_cpus = cpu_map__nr(cpu_map);
+       nr_cpus = perf_cpu_map__nr(cpu_map);
        for (c = 0; c < nr_cpus; c++) {
                cpu = cpu_map->map[c]; /* map c index to online cpu index */
                if (cpu__get_node(cpu) == node)
index f7b39f4bc51e31bba2945ab408e552bd822b1d06..3df0e39ccd522826e3b5feecc21a0edfdc57694d 100644 (file)
@@ -745,7 +745,7 @@ static void print_aggr_thread(struct perf_stat_config *config,
 {
        FILE *output = config->output;
        int nthreads = thread_map__nr(counter->core.threads);
-       int ncpus = cpu_map__nr(counter->core.cpus);
+       int ncpus = perf_cpu_map__nr(counter->core.cpus);
        int thread, sorted_threads, id;
        struct perf_aggr_thread_value *buf;