struct perf_evsel *evsel, u32 option)
{
int i, err = -EINVAL;
- struct cpu_map *event_cpus = evsel->evlist->cpus;
- struct cpu_map *online_cpus = cpu_map__new(NULL);
+ struct perf_cpu_map *event_cpus = evsel->evlist->cpus;
+ struct perf_cpu_map *online_cpus = cpu_map__new(NULL);
/* Set option of each CPU we have */
for (i = 0; i < cpu__max_cpu(); i++) {
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
struct perf_evsel *evsel, *cs_etm_evsel = NULL;
- struct cpu_map *cpus = evlist->cpus;
+ struct perf_cpu_map *cpus = evlist->cpus;
bool privileged = (geteuid() == 0 || perf_event_paranoid() < 0);
int err = 0;
{
int i;
int etmv3 = 0, etmv4 = 0;
- struct cpu_map *event_cpus = evlist->cpus;
- struct cpu_map *online_cpus = cpu_map__new(NULL);
+ struct perf_cpu_map *event_cpus = evlist->cpus;
+ struct perf_cpu_map *online_cpus = cpu_map__new(NULL);
/* cpu map is not empty, we have specific CPUs to work with */
if (!cpu_map__empty(event_cpus)) {
int i;
u32 offset;
u64 nr_cpu, type;
- struct cpu_map *cpu_map;
- struct cpu_map *event_cpus = session->evlist->cpus;
- struct cpu_map *online_cpus = cpu_map__new(NULL);
+ struct perf_cpu_map *cpu_map;
+ struct perf_cpu_map *event_cpus = session->evlist->cpus;
+ struct perf_cpu_map *online_cpus = cpu_map__new(NULL);
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
const char *sysfs = sysfs__mountpoint();
int cpu;
u64 midr = 0;
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
FILE *file;
if (!sysfs || !pmu || !pmu->cpus)
.sample_time = true,
};
struct thread_map *threads = NULL;
- struct cpu_map *cpus = NULL;
+ struct perf_cpu_map *cpus = NULL;
struct perf_evlist *evlist = NULL;
struct perf_evsel *evsel = NULL;
int err = -1, ret, i;
container_of(itr, struct intel_bts_recording, itr);
struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
struct perf_evsel *evsel, *intel_bts_evsel = NULL;
- const struct cpu_map *cpus = evlist->cpus;
+ const struct perf_cpu_map *cpus = evlist->cpus;
bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
btsr->evlist = evlist;
struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
bool have_timing_info, need_immediate = false;
struct perf_evsel *evsel, *intel_pt_evsel = NULL;
- const struct cpu_map *cpus = evlist->cpus;
+ const struct perf_cpu_map *cpus = evlist->cpus;
bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
u64 tsc_bit;
int err;
}
}
-static int do_threads(struct worker *worker, struct cpu_map *cpu)
+static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
{
pthread_attr_t thread_attr, *attrp = NULL;
cpu_set_t cpuset;
int j, ret = 0;
struct sigaction act;
struct worker *worker = NULL;
- struct cpu_map *cpu;
+ struct perf_cpu_map *cpu;
struct rlimit rl, prevrl;
unsigned int i;
(int) runtime.tv_sec);
}
-static int do_threads(struct worker *worker, struct cpu_map *cpu)
+static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
{
pthread_attr_t thread_attr, *attrp = NULL;
cpu_set_t cpuset;
struct sigaction act;
unsigned int i;
struct worker *worker = NULL;
- struct cpu_map *cpu;
+ struct perf_cpu_map *cpu;
pthread_t wthread;
struct rlimit rl, prevrl;
unsigned int i;
pthread_attr_t thread_attr;
struct worker *worker = NULL;
- struct cpu_map *cpu;
+ struct perf_cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0);
if (argc) {
}
static void create_threads(struct worker *w, pthread_attr_t thread_attr,
- struct cpu_map *cpu)
+ struct perf_cpu_map *cpu)
{
cpu_set_t cpuset;
unsigned int i;
unsigned int i;
struct sigaction act;
pthread_attr_t thread_attr;
- struct cpu_map *cpu;
+ struct perf_cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_lock_pi_usage, 0);
if (argc)
}
static void block_threads(pthread_t *w,
- pthread_attr_t thread_attr, struct cpu_map *cpu)
+ pthread_attr_t thread_attr, struct perf_cpu_map *cpu)
{
cpu_set_t cpuset;
unsigned int i;
unsigned int i, j;
struct sigaction act;
pthread_attr_t thread_attr;
- struct cpu_map *cpu;
+ struct perf_cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_requeue_usage, 0);
if (argc)
}
static void block_threads(pthread_t *w, pthread_attr_t thread_attr,
- struct cpu_map *cpu)
+ struct perf_cpu_map *cpu)
{
cpu_set_t cpuset;
unsigned int i;
struct sigaction act;
pthread_attr_t thread_attr;
struct thread_data *waking_worker;
- struct cpu_map *cpu;
+ struct perf_cpu_map *cpu;
argc = parse_options(argc, argv, options,
bench_futex_wake_parallel_usage, 0);
}
static void block_threads(pthread_t *w,
- pthread_attr_t thread_attr, struct cpu_map *cpu)
+ pthread_attr_t thread_attr, struct perf_cpu_map *cpu)
{
cpu_set_t cpuset;
unsigned int i;
unsigned int i, j;
struct sigaction act;
pthread_attr_t thread_attr;
- struct cpu_map *cpu;
+ struct perf_cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_wake_usage, 0);
if (argc) {
c2c.cpu2node = cpu2node;
for (node = 0; node < c2c.nodes_cnt; node++) {
- struct cpu_map *map = n[node].map;
+ struct perf_cpu_map *map = n[node].map;
unsigned long *set;
set = bitmap_alloc(c2c.cpus_cnt);
return 0;
}
-static int set_tracing_cpumask(struct cpu_map *cpumap)
+static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
{
char *cpumask;
size_t mask_size;
static int set_tracing_cpu(struct perf_ftrace *ftrace)
{
- struct cpu_map *cpumap = ftrace->evlist->cpus;
+ struct perf_cpu_map *cpumap = ftrace->evlist->cpus;
if (!target__has_cpu(&ftrace->target))
return 0;
static int reset_tracing_cpu(void)
{
- struct cpu_map *cpumap = cpu_map__new(NULL);
+ struct perf_cpu_map *cpumap = cpu_map__new(NULL);
int ret;
ret = set_tracing_cpumask(cpumap);
bool comp;
struct thread_map *color_pids;
const char *color_pids_str;
- struct cpu_map *color_cpus;
+ struct perf_cpu_map *color_cpus;
const char *color_cpus_str;
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
const char *cpus_str;
};
static int setup_map_cpus(struct perf_sched *sched)
{
- struct cpu_map *map;
+ struct perf_cpu_map *map;
sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
static int setup_color_cpus(struct perf_sched *sched)
{
- struct cpu_map *map;
+ struct perf_cpu_map *map;
if (!sched->map.color_cpus_str)
return 0;
bool show_bpf_events;
bool allocated;
bool per_event_dump;
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
struct thread_map *threads;
int name_width;
const char *time_str;
u64 bytes_written;
struct perf_tool tool;
bool maps_allocated;
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
struct thread_map *threads;
enum aggr_mode aggr_mode;
};
};
static int perf_stat__get_socket(struct perf_stat_config *config __maybe_unused,
- struct cpu_map *map, int cpu)
+ struct perf_cpu_map *map, int cpu)
{
return cpu_map__get_socket(map, cpu, NULL);
}
static int perf_stat__get_die(struct perf_stat_config *config __maybe_unused,
- struct cpu_map *map, int cpu)
+ struct perf_cpu_map *map, int cpu)
{
return cpu_map__get_die(map, cpu, NULL);
}
static int perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
- struct cpu_map *map, int cpu)
+ struct perf_cpu_map *map, int cpu)
{
return cpu_map__get_core(map, cpu, NULL);
}
-static int cpu_map__get_max(struct cpu_map *map)
+static int cpu_map__get_max(struct perf_cpu_map *map)
{
int i, max = -1;
}
static int perf_stat__get_aggr(struct perf_stat_config *config,
- aggr_get_id_t get_id, struct cpu_map *map, int idx)
+ aggr_get_id_t get_id, struct perf_cpu_map *map, int idx)
{
int cpu;
}
static int perf_stat__get_socket_cached(struct perf_stat_config *config,
- struct cpu_map *map, int idx)
+ struct perf_cpu_map *map, int idx)
{
return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx);
}
static int perf_stat__get_die_cached(struct perf_stat_config *config,
- struct cpu_map *map, int idx)
+ struct perf_cpu_map *map, int idx)
{
return perf_stat__get_aggr(config, perf_stat__get_die, map, idx);
}
static int perf_stat__get_core_cached(struct perf_stat_config *config,
- struct cpu_map *map, int idx)
+ struct perf_cpu_map *map, int idx)
{
return perf_stat__get_aggr(config, perf_stat__get_core, map, idx);
}
stat_config.cpus_aggr_map = NULL;
}
-static inline int perf_env__get_cpu(struct perf_env *env, struct cpu_map *map, int idx)
+static inline int perf_env__get_cpu(struct perf_env *env, struct perf_cpu_map *map, int idx)
{
int cpu;
return cpu;
}
-static int perf_env__get_socket(struct cpu_map *map, int idx, void *data)
+static int perf_env__get_socket(struct perf_cpu_map *map, int idx, void *data)
{
struct perf_env *env = data;
int cpu = perf_env__get_cpu(env, map, idx);
return cpu == -1 ? -1 : env->cpu[cpu].socket_id;
}
-static int perf_env__get_die(struct cpu_map *map, int idx, void *data)
+static int perf_env__get_die(struct perf_cpu_map *map, int idx, void *data)
{
struct perf_env *env = data;
int die_id = -1, cpu = perf_env__get_cpu(env, map, idx);
return die_id;
}
-static int perf_env__get_core(struct cpu_map *map, int idx, void *data)
+static int perf_env__get_core(struct perf_cpu_map *map, int idx, void *data)
{
struct perf_env *env = data;
int core = -1, cpu = perf_env__get_cpu(env, map, idx);
return core;
}
-static int perf_env__build_socket_map(struct perf_env *env, struct cpu_map *cpus,
- struct cpu_map **sockp)
+static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus,
+ struct perf_cpu_map **sockp)
{
return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env);
}
-static int perf_env__build_die_map(struct perf_env *env, struct cpu_map *cpus,
- struct cpu_map **diep)
+static int perf_env__build_die_map(struct perf_env *env, struct perf_cpu_map *cpus,
+ struct perf_cpu_map **diep)
{
return cpu_map__build_map(cpus, diep, perf_env__get_die, env);
}
-static int perf_env__build_core_map(struct perf_env *env, struct cpu_map *cpus,
- struct cpu_map **corep)
+static int perf_env__build_core_map(struct perf_env *env, struct perf_cpu_map *cpus,
+ struct perf_cpu_map **corep)
{
return cpu_map__build_map(cpus, corep, perf_env__get_core, env);
}
static int perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused,
- struct cpu_map *map, int idx)
+ struct perf_cpu_map *map, int idx)
{
return perf_env__get_socket(map, idx, &perf_stat.session->header.env);
}
static int perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused,
- struct cpu_map *map, int idx)
+ struct perf_cpu_map *map, int idx)
{
return perf_env__get_die(map, idx, &perf_stat.session->header.env);
}
static int perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused,
- struct cpu_map *map, int idx)
+ struct perf_cpu_map *map, int idx)
{
return perf_env__get_core(map, idx, &perf_stat.session->header.env);
}
{
struct perf_tool *tool = session->tool;
struct perf_stat *st = container_of(tool, struct perf_stat, tool);
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
if (st->cpus) {
pr_warning("Extra cpu map event, ignoring.\n");
static unsigned long *get_bitmap(const char *str, int nbits)
{
- struct cpu_map *map = cpu_map__new(str);
+ struct perf_cpu_map *map = cpu_map__new(str);
unsigned long *bm = NULL;
int i;
.done_cnt = 0,
};
struct thread_map *threads = NULL;
- struct cpu_map *cpus = NULL;
+ struct perf_cpu_map *cpus = NULL;
struct perf_evlist *evlist = NULL;
struct perf_evsel *evsel = NULL;
int err = -1, ret;
struct cpu_map_event *map_event = &event->cpu_map;
struct cpu_map_mask *mask;
struct cpu_map_data *data;
- struct cpu_map *map;
+ struct perf_cpu_map *map;
int i;
data = &map_event->data;
struct cpu_map_event *map_event = &event->cpu_map;
struct cpu_map_entries *cpus;
struct cpu_map_data *data;
- struct cpu_map *map;
+ struct perf_cpu_map *map;
data = &map_event->data;
int test__cpu_map_synthesize(struct test *test __maybe_unused, int subtest __maybe_unused)
{
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
/* This one is better stores in mask. */
cpus = cpu_map__new("0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19");
static int cpu_map_print(const char *str)
{
- struct cpu_map *map = cpu_map__new(str);
+ struct perf_cpu_map *map = cpu_map__new(str);
char buf[100];
if (!map)
static int attach__cpu_disabled(struct perf_evlist *evlist)
{
struct perf_evsel *evsel = perf_evlist__last(evlist);
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
int err;
pr_debug("attaching to CPU 0 as enabled\n");
static int attach__cpu_enabled(struct perf_evlist *evlist)
{
struct perf_evsel *evsel = perf_evlist__last(evlist);
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
int err;
pr_debug("attaching to CPU 0 as enabled\n");
{
struct event_update_event *ev = (struct event_update_event*) event;
struct event_update_event_cpus *ev_data;
- struct cpu_map *map;
+ struct perf_cpu_map *map;
ev_data = (struct event_update_event_cpus*) ev->data;
},
};
struct thread_map *threads = NULL;
- struct cpu_map *cpus = NULL;
+ struct perf_cpu_map *cpus = NULL;
struct perf_evlist *evlist = NULL;
struct perf_evsel *evsel = NULL;
int found, err = -1;
static unsigned long *get_bitmap(const char *str, int nbits)
{
- struct cpu_map *map = cpu_map__new(str);
+ struct perf_cpu_map *map = cpu_map__new(str);
unsigned long *bm = NULL;
int i;
int err = -1;
union perf_event *event;
struct thread_map *threads;
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
struct perf_evlist *evlist;
cpu_set_t cpu_set;
const char *syscall_names[] = { "getsid", "getppid", "getpgid", };
int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int subtest __maybe_unused)
{
int err = -1, fd, cpu;
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
struct perf_evsel *evsel;
unsigned int nr_openat_calls = 111, i;
cpu_set_t cpu_set;
.disabled = 1,
.freq = 1,
};
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
struct thread_map *threads;
struct perf_mmap *md;
},
};
struct thread_map *threads = NULL;
- struct cpu_map *cpus = NULL;
+ struct perf_cpu_map *cpus = NULL;
struct perf_evlist *evlist = NULL;
struct perf_evsel *evsel, *cpu_clocks_evsel, *cycles_evsel;
struct perf_evsel *switch_evsel, *tracking_evsel;
};
const char *argv[] = { "true", NULL };
char sbuf[STRERR_BUFSIZE];
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
struct thread_map *threads;
struct perf_mmap *md;
return 0;
}
-static int check_cpu_topology(char *path, struct cpu_map *map)
+static int check_cpu_topology(char *path, struct perf_cpu_map *map)
{
struct perf_session *session;
struct perf_data data = {
int test__session_topology(struct test *test __maybe_unused, int subtest __maybe_unused)
{
char path[PATH_MAX];
- struct cpu_map *map;
+ struct perf_cpu_map *map;
int ret = TEST_FAIL;
TEST_ASSERT_VAL("can't get templ file", !get_temp(path));
static int max_node_num;
static int *cpunode_map;
-static struct cpu_map *cpu_map__default_new(void)
+static struct perf_cpu_map *cpu_map__default_new(void)
{
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
int nr_cpus;
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
return cpus;
}
-static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
+static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
{
size_t payload_size = nr_cpus * sizeof(int);
- struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
+ struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
if (cpus != NULL) {
cpus->nr = nr_cpus;
return cpus;
}
-struct cpu_map *cpu_map__read(FILE *file)
+struct perf_cpu_map *cpu_map__read(FILE *file)
{
- struct cpu_map *cpus = NULL;
+ struct perf_cpu_map *cpus = NULL;
int nr_cpus = 0;
int *tmp_cpus = NULL, *tmp;
int max_entries = 0;
return cpus;
}
-static struct cpu_map *cpu_map__read_all_cpu_map(void)
+static struct perf_cpu_map *cpu_map__read_all_cpu_map(void)
{
- struct cpu_map *cpus = NULL;
+ struct perf_cpu_map *cpus = NULL;
FILE *onlnf;
onlnf = fopen("/sys/devices/system/cpu/online", "r");
return cpus;
}
-struct cpu_map *cpu_map__new(const char *cpu_list)
+struct perf_cpu_map *cpu_map__new(const char *cpu_list)
{
- struct cpu_map *cpus = NULL;
+ struct perf_cpu_map *cpus = NULL;
unsigned long start_cpu, end_cpu = 0;
char *p = NULL;
int i, nr_cpus = 0;
return cpus;
}
-static struct cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus)
+static struct perf_cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus)
{
- struct cpu_map *map;
+ struct perf_cpu_map *map;
map = cpu_map__empty_new(cpus->nr);
if (map) {
return map;
}
-static struct cpu_map *cpu_map__from_mask(struct cpu_map_mask *mask)
+static struct perf_cpu_map *cpu_map__from_mask(struct cpu_map_mask *mask)
{
- struct cpu_map *map;
+ struct perf_cpu_map *map;
int nr, nbits = mask->nr * mask->long_size * BITS_PER_BYTE;
nr = bitmap_weight(mask->mask, nbits);
}
-struct cpu_map *cpu_map__new_data(struct cpu_map_data *data)
+struct perf_cpu_map *cpu_map__new_data(struct cpu_map_data *data)
{
if (data->type == PERF_CPU_MAP__CPUS)
return cpu_map__from_entries((struct cpu_map_entries *)data->data);
return cpu_map__from_mask((struct cpu_map_mask *)data->data);
}
-size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp)
+size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp)
{
#define BUFSIZE 1024
char buf[BUFSIZE];
#undef BUFSIZE
}
-struct cpu_map *cpu_map__dummy_new(void)
+struct perf_cpu_map *cpu_map__dummy_new(void)
{
- struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
+ struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
if (cpus != NULL) {
cpus->nr = 1;
return cpus;
}
-struct cpu_map *cpu_map__empty_new(int nr)
+struct perf_cpu_map *cpu_map__empty_new(int nr)
{
- struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr);
+ struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr);
if (cpus != NULL) {
int i;
return cpus;
}
-static void cpu_map__delete(struct cpu_map *map)
+static void cpu_map__delete(struct perf_cpu_map *map)
{
if (map) {
WARN_ONCE(refcount_read(&map->refcnt) != 0,
}
}
-struct cpu_map *cpu_map__get(struct cpu_map *map)
+struct perf_cpu_map *cpu_map__get(struct perf_cpu_map *map)
{
if (map)
refcount_inc(&map->refcnt);
return map;
}
-void cpu_map__put(struct cpu_map *map)
+void cpu_map__put(struct perf_cpu_map *map)
{
if (map && refcount_dec_and_test(&map->refcnt))
cpu_map__delete(map);
return ret ?: value;
}
-int cpu_map__get_socket(struct cpu_map *map, int idx, void *data __maybe_unused)
+int cpu_map__get_socket(struct perf_cpu_map *map, int idx, void *data __maybe_unused)
{
int cpu;
return *(int *)a - *(int *)b;
}
-int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
- int (*f)(struct cpu_map *map, int cpu, void *data),
+int cpu_map__build_map(struct perf_cpu_map *cpus, struct perf_cpu_map **res,
+ int (*f)(struct perf_cpu_map *map, int cpu, void *data),
void *data)
{
- struct cpu_map *c;
+ struct perf_cpu_map *c;
int nr = cpus->nr;
int cpu, s1, s2;
return ret ?: value;
}
-int cpu_map__get_die(struct cpu_map *map, int idx, void *data)
+int cpu_map__get_die(struct perf_cpu_map *map, int idx, void *data)
{
int cpu, die_id, s;
return ret ?: value;
}
-int cpu_map__get_core(struct cpu_map *map, int idx, void *data)
+int cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data)
{
int cpu, s_die;
return (s_die << 16) | (cpu & 0xffff);
}
-int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp)
+int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct perf_cpu_map **sockp)
{
return cpu_map__build_map(cpus, sockp, cpu_map__get_socket, NULL);
}
-int cpu_map__build_die_map(struct cpu_map *cpus, struct cpu_map **diep)
+int cpu_map__build_die_map(struct perf_cpu_map *cpus, struct perf_cpu_map **diep)
{
return cpu_map__build_map(cpus, diep, cpu_map__get_die, NULL);
}
-int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep)
+int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct perf_cpu_map **corep)
{
return cpu_map__build_map(cpus, corep, cpu_map__get_core, NULL);
}
return 0;
}
-bool cpu_map__has(struct cpu_map *cpus, int cpu)
+bool cpu_map__has(struct perf_cpu_map *cpus, int cpu)
{
return cpu_map__idx(cpus, cpu) != -1;
}
-int cpu_map__idx(struct cpu_map *cpus, int cpu)
+int cpu_map__idx(struct perf_cpu_map *cpus, int cpu)
{
int i;
return -1;
}
-int cpu_map__cpu(struct cpu_map *cpus, int idx)
+int cpu_map__cpu(struct perf_cpu_map *cpus, int idx)
{
return cpus->map[idx];
}
-size_t cpu_map__snprint(struct cpu_map *map, char *buf, size_t size)
+size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size)
{
int i, cpu, start = -1;
bool first = true;
return '?';
}
-size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
+size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
{
int i, cpu;
char *ptr = buf;
return ptr - buf;
}
-const struct cpu_map *cpu_map__online(void) /* thread unsafe */
+const struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */
{
- static const struct cpu_map *online = NULL;
+ static const struct perf_cpu_map *online = NULL;
if (!online)
online = cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */
#include "perf.h"
#include "util/debug.h"
-struct cpu_map {
+struct perf_cpu_map {
refcount_t refcnt;
int nr;
int map[];
};
-struct cpu_map *cpu_map__new(const char *cpu_list);
-struct cpu_map *cpu_map__empty_new(int nr);
-struct cpu_map *cpu_map__dummy_new(void);
-struct cpu_map *cpu_map__new_data(struct cpu_map_data *data);
-struct cpu_map *cpu_map__read(FILE *file);
-size_t cpu_map__snprint(struct cpu_map *map, char *buf, size_t size);
-size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size);
-size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp);
+struct perf_cpu_map *cpu_map__new(const char *cpu_list);
+struct perf_cpu_map *cpu_map__empty_new(int nr);
+struct perf_cpu_map *cpu_map__dummy_new(void);
+struct perf_cpu_map *cpu_map__new_data(struct cpu_map_data *data);
+struct perf_cpu_map *cpu_map__read(FILE *file);
+size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size);
+size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size);
+size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp);
int cpu_map__get_socket_id(int cpu);
-int cpu_map__get_socket(struct cpu_map *map, int idx, void *data);
+int cpu_map__get_socket(struct perf_cpu_map *map, int idx, void *data);
int cpu_map__get_die_id(int cpu);
-int cpu_map__get_die(struct cpu_map *map, int idx, void *data);
+int cpu_map__get_die(struct perf_cpu_map *map, int idx, void *data);
int cpu_map__get_core_id(int cpu);
-int cpu_map__get_core(struct cpu_map *map, int idx, void *data);
-int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp);
-int cpu_map__build_die_map(struct cpu_map *cpus, struct cpu_map **diep);
-int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep);
-const struct cpu_map *cpu_map__online(void); /* thread unsafe */
+int cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data);
+int cpu_map__build_socket_map(struct perf_cpu_map *cpus, struct perf_cpu_map **sockp);
+int cpu_map__build_die_map(struct perf_cpu_map *cpus, struct perf_cpu_map **diep);
+int cpu_map__build_core_map(struct perf_cpu_map *cpus, struct perf_cpu_map **corep);
+const struct perf_cpu_map *cpu_map__online(void); /* thread unsafe */
-struct cpu_map *cpu_map__get(struct cpu_map *map);
-void cpu_map__put(struct cpu_map *map);
+struct perf_cpu_map *cpu_map__get(struct perf_cpu_map *map);
+void cpu_map__put(struct perf_cpu_map *map);
-static inline int cpu_map__socket(struct cpu_map *sock, int s)
+static inline int cpu_map__socket(struct perf_cpu_map *sock, int s)
{
if (!sock || s > sock->nr || s < 0)
return 0;
return id & 0xffff;
}
-static inline int cpu_map__nr(const struct cpu_map *map)
+static inline int cpu_map__nr(const struct perf_cpu_map *map)
{
return map ? map->nr : 1;
}
-static inline bool cpu_map__empty(const struct cpu_map *map)
+static inline bool cpu_map__empty(const struct perf_cpu_map *map)
{
return map ? map->map[0] == -1 : true;
}
int cpu__max_present_cpu(void);
int cpu__get_node(int cpu);
-int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
- int (*f)(struct cpu_map *map, int cpu, void *data),
+int cpu_map__build_map(struct perf_cpu_map *cpus, struct perf_cpu_map **res,
+ int (*f)(struct perf_cpu_map *map, int cpu, void *data),
void *data);
-int cpu_map__cpu(struct cpu_map *cpus, int idx);
-bool cpu_map__has(struct cpu_map *cpus, int cpu);
-int cpu_map__idx(struct cpu_map *cpus, int cpu);
+int cpu_map__cpu(struct perf_cpu_map *cpus, int idx);
+bool cpu_map__has(struct perf_cpu_map *cpus, int cpu);
+int cpu_map__idx(struct perf_cpu_map *cpus, int cpu);
#endif /* __PERF_CPUMAP_H */
size_t sz;
long ncpus;
int ret = -1;
- struct cpu_map *map;
+ struct perf_cpu_map *map;
bool has_die = has_die_topology();
ncpus = cpu__max_present_cpu();
struct numa_topology *numa_topology__new(void)
{
- struct cpu_map *node_map = NULL;
+ struct perf_cpu_map *node_map = NULL;
struct numa_topology *tp = NULL;
char path[MAXPATHLEN];
char *buf = NULL;
u32 node;
u64 mem_total;
u64 mem_free;
- struct cpu_map *map;
+ struct perf_cpu_map *map;
};
struct memory_node {
}
static void synthesize_cpus(struct cpu_map_entries *cpus,
- struct cpu_map *map)
+ struct perf_cpu_map *map)
{
int i;
}
static void synthesize_mask(struct cpu_map_mask *mask,
- struct cpu_map *map, int max)
+ struct perf_cpu_map *map, int max)
{
int i;
set_bit(map->map[i], mask->mask);
}
-static size_t cpus_size(struct cpu_map *map)
+static size_t cpus_size(struct perf_cpu_map *map)
{
return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
}
-static size_t mask_size(struct cpu_map *map, int *max)
+static size_t mask_size(struct perf_cpu_map *map, int *max)
{
int i;
return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
}
-void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
+void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
{
size_t size_cpus, size_mask;
bool is_dummy = cpu_map__empty(map);
return zalloc(*size);
}
-void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
+void cpu_map_data__synthesize(struct cpu_map_data *data, struct perf_cpu_map *map,
u16 type, int max)
{
data->type = type;
};
}
-static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
+static struct cpu_map_event* cpu_map_event__new(struct perf_cpu_map *map)
{
size_t size = sizeof(struct cpu_map_event);
struct cpu_map_event *event;
}
int perf_event__synthesize_cpu_map(struct perf_tool *tool,
- struct cpu_map *map,
+ struct perf_cpu_map *map,
perf_event__handler_t process,
struct machine *machine)
{
size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
{
- struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
+ struct perf_cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
size_t ret;
ret = fprintf(fp, ": ");
struct perf_tool;
struct thread_map;
-struct cpu_map;
+struct perf_cpu_map;
struct perf_stat_config;
struct perf_counts_values;
perf_event__handler_t process,
struct machine *machine);
int perf_event__synthesize_cpu_map(struct perf_tool *tool,
- struct cpu_map *cpus,
+ struct perf_cpu_map *cpus,
perf_event__handler_t process,
struct machine *machine);
int perf_event__synthesize_threads(struct perf_tool *tool,
int kallsyms__get_function_start(const char *kallsyms_filename,
const char *symbol_name, u64 *addr);
-void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max);
-void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
+void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max);
+void cpu_map_data__synthesize(struct cpu_map_data *data, struct perf_cpu_map *map,
u16 type, int max);
void event_attr_init(struct perf_event_attr *attr);
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
-void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
+void perf_evlist__init(struct perf_evlist *evlist, struct perf_cpu_map *cpus,
struct thread_map *threads)
{
int i;
int comp_level)
{
struct perf_evsel *evsel;
- const struct cpu_map *cpus = evlist->cpus;
+ const struct perf_cpu_map *cpus = evlist->cpus;
const struct thread_map *threads = evlist->threads;
/*
* Delay setting mp.prot: set it before calling perf_mmap__mmap.
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
{
bool all_threads = (target->per_thread && target->system_wide);
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
struct thread_map *threads;
/*
return -1;
}
-void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
+void perf_evlist__set_maps(struct perf_evlist *evlist, struct perf_cpu_map *cpus,
struct thread_map *threads)
{
/*
static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
{
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
struct thread_map *threads;
int err = -ENOMEM;
struct pollfd;
struct thread_map;
-struct cpu_map;
+struct perf_cpu_map;
struct record_opts;
#define PERF_EVLIST__HLIST_BITS 8
struct perf_mmap *mmap;
struct perf_mmap *overwrite_mmap;
struct thread_map *threads;
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
struct perf_evsel *selected;
struct events_stats stats;
struct perf_env *env;
struct perf_evlist *perf_evlist__new(void);
struct perf_evlist *perf_evlist__new_default(void);
struct perf_evlist *perf_evlist__new_dummy(void);
-void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
+void perf_evlist__init(struct perf_evlist *evlist, struct perf_cpu_map *cpus,
struct thread_map *threads);
void perf_evlist__exit(struct perf_evlist *evlist);
void perf_evlist__delete(struct perf_evlist *evlist);
void perf_evlist__set_selected(struct perf_evlist *evlist,
struct perf_evsel *evsel);
-void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
+void perf_evlist__set_maps(struct perf_evlist *evlist, struct perf_cpu_map *cpus,
struct thread_map *threads);
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel);
return fd;
}
-int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
struct thread_map *threads)
{
int cpu, thread, nthreads;
return -EINVAL;
if (cpus == NULL) {
- static struct cpu_map *empty_cpu_map;
+ static struct perf_cpu_map *empty_cpu_map;
if (empty_cpu_map == NULL) {
empty_cpu_map = cpu_map__dummy_new();
}
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
- struct cpu_map *cpus)
+ struct perf_cpu_map *cpus)
{
return perf_evsel__open(evsel, cpus, NULL);
}
int perf_evsel__store_ids(struct perf_evsel *evsel, struct perf_evlist *evlist)
{
- struct cpu_map *cpus = evsel->cpus;
+ struct perf_cpu_map *cpus = evsel->cpus;
struct thread_map *threads = evsel->threads;
if (perf_evsel__alloc_id(evsel, cpus->nr, threads->nr))
u64 db_id;
struct cgroup *cgrp;
void *handler;
- struct cpu_map *cpus;
- struct cpu_map *own_cpus;
+ struct perf_cpu_map *cpus;
+ struct perf_cpu_map *own_cpus;
struct thread_map *threads;
unsigned int sample_size;
int id_pos;
extern struct perf_missing_features perf_missing_features;
-struct cpu_map;
+struct perf_cpu_map;
struct target;
struct thread_map;
struct record_opts;
-static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
+static inline struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
{
return evsel->cpus;
}
int perf_evsel__disable(struct perf_evsel *evsel);
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
- struct cpu_map *cpus);
+ struct perf_cpu_map *cpus);
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
struct thread_map *threads);
-int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
struct thread_map *threads);
void perf_evsel__close(struct perf_evsel *evsel);
struct event_update_event *ev = &event->event_update;
struct event_update_event_scale *ev_scale;
struct event_update_event_cpus *ev_cpus;
- struct cpu_map *map;
+ struct perf_cpu_map *map;
size_t ret;
ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id);
struct event_update_event_cpus *ev_cpus;
struct perf_evlist *evlist;
struct perf_evsel *evsel;
- struct cpu_map *map;
+ struct perf_cpu_map *map;
if (!pevlist || *pevlist == NULL)
return -EINVAL;
static void build_node_mask(int node, cpu_set_t *mask)
{
int c, cpu, nr_cpus;
- const struct cpu_map *cpu_map = NULL;
+ const struct perf_cpu_map *cpu_map = NULL;
cpu_map = cpu_map__online();
if (!cpu_map)
const char *cpu_list)
{
struct perf_evsel *evsel;
- struct cpu_map *cpus = pmu ? pmu->cpus :
+ struct perf_cpu_map *cpus = pmu ? pmu->cpus :
cpu_list ? cpu_map__new(cpu_list) : NULL;
event_attr_init(attr);
closedir(dir);
}
-static struct cpu_map *__pmu_cpumask(const char *path)
+static struct perf_cpu_map *__pmu_cpumask(const char *path)
{
FILE *file;
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
file = fopen(path, "r");
if (!file)
#define CPUS_TEMPLATE_UNCORE "%s/bus/event_source/devices/%s/cpumask"
#define CPUS_TEMPLATE_CPU "%s/bus/event_source/devices/%s/cpus"
-static struct cpu_map *pmu_cpumask(const char *name)
+static struct perf_cpu_map *pmu_cpumask(const char *name)
{
char path[PATH_MAX];
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
const char *sysfs = sysfs__mountpoint();
const char *templates[] = {
CPUS_TEMPLATE_UNCORE,
static bool pmu_is_uncore(const char *name)
{
char path[PATH_MAX];
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
const char *sysfs = sysfs__mountpoint();
snprintf(path, PATH_MAX, CPUS_TEMPLATE_UNCORE, sysfs, name);
bool is_uncore;
int max_precise;
struct perf_event_attr *default_config;
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
struct list_head format; /* HEAD struct perf_pmu_format -> list */
struct list_head aliases; /* HEAD struct perf_pmu_alias -> list */
struct list_head list; /* ELEM */
struct pyrf_cpu_map {
PyObject_HEAD
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
};
static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
PyObject *args, PyObject *kwargs)
{
struct perf_evsel *evsel = &pevsel->evsel;
- struct cpu_map *cpus = NULL;
+ struct perf_cpu_map *cpus = NULL;
struct thread_map *threads = NULL;
PyObject *pcpus = NULL, *pthreads = NULL;
int group = 0, inherit = 0;
PyObject *args, PyObject *kwargs __maybe_unused)
{
PyObject *pcpus = NULL, *pthreads = NULL;
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
struct thread_map *threads;
if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
static bool perf_probe_api(setup_probe_fn_t fn)
{
const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
int cpu, ret, i = 0;
cpus = cpu_map__new(NULL);
.config = PERF_COUNT_SW_CPU_CLOCK,
.exclude_kernel = 1,
};
- struct cpu_map *cpus;
+ struct perf_cpu_map *cpus;
int cpu, fd;
cpus = cpu_map__new(NULL);
evsel = perf_evlist__last(temp_evlist);
if (!evlist || cpu_map__empty(evlist->cpus)) {
- struct cpu_map *cpus = cpu_map__new(NULL);
+ struct perf_cpu_map *cpus = cpu_map__new(NULL);
cpu = cpus ? cpus->map[0] : 0;
cpu_map__put(cpus);
struct perf_evsel *counter, u64 tstamp)
{
struct thread_map *threads = counter->threads;
- struct cpu_map *cpus = counter->cpus;
+ struct perf_cpu_map *cpus = counter->cpus;
int cpu, thread;
if (config->aggr_mode == AGGR_GLOBAL) {
const char *cpu_list, unsigned long *cpu_bitmap)
{
int i, err = -1;
- struct cpu_map *map;
+ struct perf_cpu_map *map;
for (i = 0; i < PERF_TYPE_MAX; ++i) {
struct perf_evsel *evsel;
struct perf_counts_values *vals, int cpu, bool *skip)
{
unsigned long *mask = counter->per_pkg_mask;
- struct cpu_map *cpus = perf_evsel__cpus(counter);
+ struct perf_cpu_map *cpus = perf_evsel__cpus(counter);
int s;
*skip = false;
};
typedef int (*aggr_get_id_t)(struct perf_stat_config *config,
- struct cpu_map *m, int cpu);
+ struct perf_cpu_map *m, int cpu);
struct perf_stat_config {
enum aggr_mode aggr_mode;
const char *csv_sep;
struct stats *walltime_nsecs_stats;
struct rusage ru_data;
- struct cpu_map *aggr_map;
+ struct perf_cpu_map *aggr_map;
aggr_get_id_t aggr_get_id;
- struct cpu_map *cpus_aggr_map;
+ struct perf_cpu_map *cpus_aggr_map;
u64 *walltime_run;
struct rblist metric_events;
};
{
int i;
int ret = 0;
- struct cpu_map *m;
+ struct perf_cpu_map *m;
int c;
m = cpu_map__new(s);