return -1;
}
-int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
+int perf_evlist__mmap_ops(struct perf_evlist *evlist,
+ struct perf_evlist_mmap_ops *ops,
+ struct perf_mmap_param *mp)
{
struct perf_evsel *evsel;
const struct perf_cpu_map *cpus = evlist->cpus;
const struct perf_thread_map *threads = evlist->threads;
- struct perf_mmap_param mp;
+
+ if (!ops)
+ return -EINVAL;
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
return -ENOMEM;
}
+ if (perf_cpu_map__empty(cpus))
+ return mmap_per_thread(evlist, mp);
+
+ return mmap_per_cpu(evlist, mp);
+}
+
+int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
+{
+ struct perf_mmap_param mp;
+ struct perf_evlist_mmap_ops ops;
+
evlist->mmap_len = (pages + 1) * page_size;
mp.mask = evlist->mmap_len - page_size - 1;
- if (perf_cpu_map__empty(cpus))
- return mmap_per_thread(evlist, &mp);
-
- return mmap_per_cpu(evlist, &mp);
+ return perf_evlist__mmap_ops(evlist, &ops, &mp);
}
void perf_evlist__munmap(struct perf_evlist *evlist)
struct perf_cpu_map;
struct perf_thread_map;
+struct perf_mmap_param;
struct perf_evlist {
struct list_head entries;
struct perf_mmap *mmap_ovw;
};
+struct perf_evlist_mmap_ops {
+};
+
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
void *ptr, short revent);
+int perf_evlist__mmap_ops(struct perf_evlist *evlist,
+ struct perf_evlist_mmap_ops *ops,
+ struct perf_mmap_param *mp);
+
/**
* __perf_evlist__for_each_entry - iterate thru all the evsels
* @list: list_head instance to iterate