struct evsel *evsel, u32 option)
{
int i, err = -EINVAL;
- struct perf_cpu_map *event_cpus = evsel->evlist->cpus;
+ struct perf_cpu_map *event_cpus = evsel->evlist->core.cpus;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
/* Set option of each CPU we have */
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
struct evsel *evsel, *cs_etm_evsel = NULL;
- struct perf_cpu_map *cpus = evlist->cpus;
+ struct perf_cpu_map *cpus = evlist->core.cpus;
bool privileged = (geteuid() == 0 || perf_event_paranoid() < 0);
int err = 0;
{
int i;
int etmv3 = 0, etmv4 = 0;
- struct perf_cpu_map *event_cpus = evlist->cpus;
+ struct perf_cpu_map *event_cpus = evlist->core.cpus;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
/* cpu map is not empty, we have specific CPUs to work with */
u32 offset;
u64 nr_cpu, type;
struct perf_cpu_map *cpu_map;
- struct perf_cpu_map *event_cpus = session->evlist->cpus;
+ struct perf_cpu_map *event_cpus = session->evlist->core.cpus;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
container_of(itr, struct intel_bts_recording, itr);
struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
struct evsel *evsel, *intel_bts_evsel = NULL;
- const struct perf_cpu_map *cpus = evlist->cpus;
+ const struct perf_cpu_map *cpus = evlist->core.cpus;
bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
btsr->evlist = evlist;
ui__warning("Intel Processor Trace: TSC not available\n");
}
- per_cpu_mmaps = !cpu_map__empty(session->evlist->cpus);
+ per_cpu_mmaps = !cpu_map__empty(session->evlist->core.cpus);
auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
bool have_timing_info, need_immediate = false;
struct evsel *evsel, *intel_pt_evsel = NULL;
- const struct perf_cpu_map *cpus = evlist->cpus;
+ const struct perf_cpu_map *cpus = evlist->core.cpus;
bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
u64 tsc_bit;
int err;
static int set_tracing_cpu(struct perf_ftrace *ftrace)
{
- struct perf_cpu_map *cpumap = ftrace->evlist->cpus;
+ struct perf_cpu_map *cpumap = ftrace->evlist->core.cpus;
if (!target__has_cpu(&ftrace->target))
return 0;
return err;
}
- err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
+ err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
process_synthesized_event, NULL);
if (err < 0) {
pr_err("Couldn't synthesize cpu map.\n");
switch (stat_config.aggr_mode) {
case AGGR_SOCKET:
- if (cpu_map__build_socket_map(evsel_list->cpus, &stat_config.aggr_map)) {
+ if (cpu_map__build_socket_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
perror("cannot build socket map");
return -1;
}
stat_config.aggr_get_id = perf_stat__get_socket_cached;
break;
case AGGR_DIE:
- if (cpu_map__build_die_map(evsel_list->cpus, &stat_config.aggr_map)) {
+ if (cpu_map__build_die_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
perror("cannot build die map");
return -1;
}
stat_config.aggr_get_id = perf_stat__get_die_cached;
break;
case AGGR_CORE:
- if (cpu_map__build_core_map(evsel_list->cpus, &stat_config.aggr_map)) {
+ if (cpu_map__build_core_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
perror("cannot build core map");
return -1;
}
break;
case AGGR_NONE:
if (term_percore_set()) {
- if (cpu_map__build_core_map(evsel_list->cpus,
+ if (cpu_map__build_core_map(evsel_list->core.cpus,
&stat_config.aggr_map)) {
perror("cannot build core map");
return -1;
* taking the highest cpu number to be the size of
* the aggregation translate cpumap.
*/
- nr = cpu_map__get_max(evsel_list->cpus);
+ nr = cpu_map__get_max(evsel_list->core.cpus);
stat_config.cpus_aggr_map = cpu_map__empty_new(nr + 1);
return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
}
switch (stat_config.aggr_mode) {
case AGGR_SOCKET:
- if (perf_env__build_socket_map(env, evsel_list->cpus, &stat_config.aggr_map)) {
+ if (perf_env__build_socket_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
perror("cannot build socket map");
return -1;
}
stat_config.aggr_get_id = perf_stat__get_socket_file;
break;
case AGGR_DIE:
- if (perf_env__build_die_map(env, evsel_list->cpus, &stat_config.aggr_map)) {
+ if (perf_env__build_die_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
perror("cannot build die map");
return -1;
}
stat_config.aggr_get_id = perf_stat__get_die_file;
break;
case AGGR_CORE:
- if (perf_env__build_core_map(env, evsel_list->cpus, &stat_config.aggr_map)) {
+ if (perf_env__build_core_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
perror("cannot build core map");
return -1;
}
evlist__for_each_entry(evlist, counter) {
try_again:
- if (evsel__open(counter, top->evlist->cpus,
+ if (evsel__open(counter, top->evlist->core.cpus,
top->evlist->threads) < 0) {
/*
#ifndef __LIBPERF_INTERNAL_EVLIST_H
#define __LIBPERF_INTERNAL_EVLIST_H
+struct perf_cpu_map;
+
struct perf_evlist {
- struct list_head entries;
- int nr_entries;
- bool has_user_cpus;
+ struct list_head entries;
+ int nr_entries;
+ bool has_user_cpus;
+ struct perf_cpu_map *cpus;
};
#endif /* __LIBPERF_INTERNAL_EVLIST_H */
mp->idx = idx;
if (per_cpu) {
- mp->cpu = evlist->cpus->map[idx];
+ mp->cpu = evlist->core.cpus->map[idx];
if (evlist->threads)
mp->tid = thread_map__pid(evlist->threads, 0);
else
perf_evlist__munmap(evlist);
evlist__close(evlist);
- perf_cpu_map__put(evlist->cpus);
+ perf_cpu_map__put(evlist->core.cpus);
perf_thread_map__put(evlist->threads);
- evlist->cpus = NULL;
+ evlist->core.cpus = NULL;
evlist->threads = NULL;
perf_evlist__purge(evlist);
perf_evlist__exit(evlist);
*/
if (!evsel->core.own_cpus || evlist->core.has_user_cpus) {
perf_cpu_map__put(evsel->core.cpus);
- evsel->core.cpus = perf_cpu_map__get(evlist->cpus);
+ evsel->core.cpus = perf_cpu_map__get(evlist->core.cpus);
} else if (evsel->core.cpus != evsel->core.own_cpus) {
perf_cpu_map__put(evsel->core.cpus);
evsel->core.cpus = perf_cpu_map__get(evsel->core.own_cpus);
int thread)
{
int cpu;
- int nr_cpus = cpu_map__nr(evlist->cpus);
+ int nr_cpus = cpu_map__nr(evlist->core.cpus);
if (!evsel->fd)
return -EINVAL;
int perf_evlist__enable_event_idx(struct evlist *evlist,
struct evsel *evsel, int idx)
{
- bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
+ bool per_cpu_mmaps = !cpu_map__empty(evlist->core.cpus);
if (per_cpu_mmaps)
return perf_evlist__enable_event_cpu(evlist, evsel, idx);
int perf_evlist__alloc_pollfd(struct evlist *evlist)
{
- int nr_cpus = cpu_map__nr(evlist->cpus);
+ int nr_cpus = cpu_map__nr(evlist->core.cpus);
int nr_threads = thread_map__nr(evlist->threads);
int nfds = 0;
struct evsel *evsel;
{
struct perf_sample_id *sid = SID(evsel, cpu, thread);
sid->idx = idx;
- if (evlist->cpus && cpu >= 0)
- sid->cpu = evlist->cpus->map[cpu];
+ if (evlist->core.cpus && cpu >= 0)
+ sid->cpu = evlist->core.cpus->map[cpu];
else
sid->cpu = -1;
if (!evsel->system_wide && evlist->threads && thread >= 0)
int i;
struct perf_mmap *map;
- evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
- if (cpu_map__empty(evlist->cpus))
+ evlist->nr_mmaps = cpu_map__nr(evlist->core.cpus);
+ if (cpu_map__empty(evlist->core.cpus))
evlist->nr_mmaps = thread_map__nr(evlist->threads);
map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
if (!map)
{
struct evsel *evsel;
int revent;
- int evlist_cpu = cpu_map__cpu(evlist->cpus, cpu_idx);
+ int evlist_cpu = cpu_map__cpu(evlist->core.cpus, cpu_idx);
evlist__for_each_entry(evlist, evsel) {
struct perf_mmap *maps = evlist->mmap;
struct mmap_params *mp)
{
int cpu, thread;
- int nr_cpus = cpu_map__nr(evlist->cpus);
+ int nr_cpus = cpu_map__nr(evlist->core.cpus);
int nr_threads = thread_map__nr(evlist->threads);
pr_debug2("perf event ring buffer mmapped per cpu\n");
int comp_level)
{
struct evsel *evsel;
- const struct perf_cpu_map *cpus = evlist->cpus;
+ const struct perf_cpu_map *cpus = evlist->core.cpus;
const struct perf_thread_map *threads = evlist->threads;
/*
* Delay setting mp.prot: set it before calling perf_mmap__mmap.
* original reference count of 1. If that is not the case it is up to
* the caller to increase the reference count.
*/
- if (cpus != evlist->cpus) {
- perf_cpu_map__put(evlist->cpus);
- evlist->cpus = perf_cpu_map__get(cpus);
+ if (cpus != evlist->core.cpus) {
+ perf_cpu_map__put(evlist->core.cpus);
+ evlist->core.cpus = perf_cpu_map__get(cpus);
}
if (threads != evlist->threads) {
* Default: one fd per CPU, all threads, aka systemwide
* as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
*/
- if (evlist->threads == NULL && evlist->cpus == NULL) {
+ if (evlist->threads == NULL && evlist->core.cpus == NULL) {
err = perf_evlist__create_syswide_maps(evlist);
if (err < 0)
goto out_err;
goto out_delete_evlist;
evlist__for_each_entry(evlist, counter) {
- if (evsel__open(counter, evlist->cpus,
+ if (evsel__open(counter, evlist->core.cpus,
evlist->threads) < 0)
goto out_delete_evlist;
}
struct perf_mmap *mmap;
struct perf_mmap *overwrite_mmap;
struct perf_thread_map *threads;
- struct perf_cpu_map *cpus;
struct evsel *selected;
struct events_stats stats;
struct perf_env *env;
if (opts->group)
perf_evlist__set_leader(evlist);
- if (evlist->cpus->map[0] < 0)
+ if (evlist->core.cpus->map[0] < 0)
opts->no_inherit = true;
use_comm_exec = perf_can_comm_exec();
evsel = perf_evlist__last(temp_evlist);
- if (!evlist || cpu_map__empty(evlist->cpus)) {
+ if (!evlist || cpu_map__empty(evlist->core.cpus)) {
struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
cpu = cpus ? cpus->map[0] : 0;
perf_cpu_map__put(cpus);
} else {
- cpu = evlist->cpus->map[0];
+ cpu = evlist->core.cpus->map[0];
}
while (1) {
for (i = 0; i < perf_evsel__nr_cpus(evsel); i++) {
int cpu2 = evsel__cpus(evsel)->map[i];
- if (config->aggr_get_id(config, evlist->cpus, cpu2) == id)
+ if (config->aggr_get_id(config, evlist->core.cpus, cpu2) == id)
return cpu2;
}
return 0;
evlist__for_each_entry(evlist, counter) {
val = 0;
for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
- s2 = config->aggr_get_id(config, evlist->cpus, cpu);
+ s2 = config->aggr_get_id(config, evlist->core.cpus, cpu);
if (s2 != id)
continue;
val += perf_counts(counter->counts, cpu, 0)->val;
u64 ena, run, val;
double uval;
- nrcpus = evlist->cpus->nr;
+ nrcpus = evlist->core.cpus->nr;
for (cpu = 0; cpu < nrcpus; cpu++) {
bool first = true;
return err;
}
- err = perf_event__synthesize_cpu_map(tool, evlist->cpus,
+ err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus,
process, NULL);
if (err < 0) {
pr_err("Couldn't synthesize thread map.\n");
if (target->cpu_list)
ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
- top->evlist->cpus->nr > 1 ? "s" : "",
+ top->evlist->core.cpus->nr > 1 ? "s" : "",
target->cpu_list);
else {
if (target->tid)
ret += SNPRINTF(bf + ret, size - ret, ")");
else
ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
- top->evlist->cpus->nr,
- top->evlist->cpus->nr > 1 ? "s" : "");
+ top->evlist->core.cpus->nr,
+ top->evlist->core.cpus->nr > 1 ? "s" : "");
}
perf_top__reset_sample_counters(top);