#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
+static int
+perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
+ int output, int cpu)
+{
+ /*
+ * The last one will be done at perf_mmap__consume(), so that we
+ * make sure we don't prevent tools from consuming every last event in
+ * the ring buffer.
+ *
+ * I.e. we can get the POLLHUP meaning that the fd doesn't exist
+ * anymore, but the last events for it are still in the ring buffer,
+ * waiting to be consumed.
+ *
+ * Tools can chose to ignore this at their own discretion, but the
+ * evlist layer can't just drop it when filtering events in
+ * perf_evlist__filter_pollfd().
+ */
+ refcount_set(&map->refcnt, 2);
+
+ return perf_mmap__mmap(map, mp, output, cpu);
+}
+
static int
mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
int idx, struct perf_mmap_param *mp, int cpu_idx,
if (*output == -1) {
*output = fd;
- if (perf_mmap__mmap(map, mp, *output, evlist_cpu) < 0)
+ if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
return -1;
} else {
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
const struct perf_cpu_map *cpus = evlist->cpus;
const struct perf_thread_map *threads = evlist->threads;
- if (!ops || !ops->get)
+ if (!ops || !ops->get || !ops->mmap)
return -EINVAL;
if (!evlist->mmap)
{
struct perf_mmap_param mp;
struct perf_evlist_mmap_ops ops = {
- .get = perf_evlist__mmap_cb_get,
+ .get = perf_evlist__mmap_cb_get,
+ .mmap = perf_evlist__mmap_cb_mmap,
};
evlist->mmap_len = (pages + 1) * page_size;
(*perf_evlist_mmap__cb_idx_t)(struct perf_evlist*, struct perf_mmap_param*, int, bool);
typedef struct perf_mmap*
(*perf_evlist_mmap__cb_get_t)(struct perf_evlist*, bool, int);
+typedef int
+(*perf_evlist_mmap__cb_mmap_t)(struct perf_mmap*, struct perf_mmap_param*, int, int);
struct perf_evlist_mmap_ops {
perf_evlist_mmap__cb_idx_t idx;
perf_evlist_mmap__cb_get_t get;
+ perf_evlist_mmap__cb_mmap_t mmap;
};
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);