Move perf_mmap__put() from tools/perf to libperf.
Once perf_mmap__put() is moved, we need a way to call application
related unmap code (AIO and aux related code for eprf), when the map
goes away.
Add the perf_mmap::unmap callback to do that.
The unmap path from perf is:
perf_mmap__put (libperf)
perf_mmap__munmap (libperf)
map->unmap_cb -> perf_mmap__unmap_cb (perf)
mmap__munmap (perf)
Committer notes:
Add missing linux/kernel.h to tools/perf/lib/mmap.c to get the BUG_ON
definition.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/20191007125344.14268-8-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
* every aio write request started in record__aio_push() so
* decrement it because the request is now complete.
*/
- perf_mmap__put(md);
+ perf_mmap__put(&md->core);
rc = 1;
} else {
/*
* map->refcount is decremented in record__aio_complete() after
* aio write operation finishes successfully.
*/
- perf_mmap__put(map);
+ perf_mmap__put(&map->core);
}
return ret;
/* perf sample has 16 bits size limit */
#define PERF_SAMPLE_MAX_SIZE (1 << 16)
+struct perf_mmap;
+
+typedef void (*libperf_unmap_cb_t)(struct perf_mmap *map);
+
/**
* struct perf_mmap - perf's ring buffer mmap details
*
* @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
*/
struct perf_mmap {
- void *base;
- int mask;
- int fd;
- int cpu;
- refcount_t refcnt;
- u64 prev;
- u64 start;
- u64 end;
- bool overwrite;
- u64 flush;
- char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
+ void *base;
+ int mask;
+ int fd;
+ int cpu;
+ refcount_t refcnt;
+ u64 prev;
+ u64 start;
+ u64 end;
+ bool overwrite;
+ u64 flush;
+ libperf_unmap_cb_t unmap_cb;
+ char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
};
struct perf_mmap_param {
size_t perf_mmap__mmap_len(struct perf_mmap *map);
-void perf_mmap__init(struct perf_mmap *map, bool overwrite);
+void perf_mmap__init(struct perf_mmap *map, bool overwrite,
+ libperf_unmap_cb_t unmap_cb);
int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
int fd, int cpu);
void perf_mmap__munmap(struct perf_mmap *map);
void perf_mmap__get(struct perf_mmap *map);
+void perf_mmap__put(struct perf_mmap *map);
#endif /* __LIBPERF_INTERNAL_MMAP_H */
#include <sys/mman.h>
#include <internal/mmap.h>
#include <internal/lib.h>
+#include <linux/kernel.h>
-void perf_mmap__init(struct perf_mmap *map, bool overwrite)
+void perf_mmap__init(struct perf_mmap *map, bool overwrite,
+ libperf_unmap_cb_t unmap_cb)
{
map->fd = -1;
map->overwrite = overwrite;
+ map->unmap_cb = unmap_cb;
refcount_set(&map->refcnt, 0);
}
map->fd = -1;
refcount_set(&map->refcnt, 0);
}
+ if (map && map->unmap_cb)
+ map->unmap_cb(map);
}
void perf_mmap__get(struct perf_mmap *map)
{
refcount_inc(&map->refcnt);
}
+
+void perf_mmap__put(struct perf_mmap *map)
+{
+ BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
+
+ if (refcount_dec_and_test(&map->refcnt))
+ perf_mmap__munmap(map);
+}
struct mmap *map = fda->priv[fd].ptr;
if (map)
- perf_mmap__put(map);
+ perf_mmap__put(&map->core);
}
int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
if (evlist->mmap)
for (i = 0; i < evlist->core.nr_mmaps; i++)
- mmap__munmap(&evlist->mmap[i]);
+ perf_mmap__munmap(&evlist->mmap[i].core);
if (evlist->overwrite_mmap)
for (i = 0; i < evlist->core.nr_mmaps; i++)
- mmap__munmap(&evlist->overwrite_mmap[i]);
+ perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
}
void evlist__munmap(struct evlist *evlist)
zfree(&evlist->overwrite_mmap);
}
+static void perf_mmap__unmap_cb(struct perf_mmap *map)
+{
+ struct mmap *m = container_of(map, struct mmap, core);
+
+ mmap__munmap(m);
+}
+
static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
bool overwrite)
{
* Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
* thus does perf_mmap__get() on it.
*/
- perf_mmap__init(&map[i].core, overwrite);
+ perf_mmap__init(&map[i].core, overwrite, perf_mmap__unmap_cb);
}
return map;
*/
if (!evsel->core.system_wide &&
perf_evlist__add_pollfd(&evlist->core, fd, &maps[idx], revent) < 0) {
- perf_mmap__put(&maps[idx]);
+ perf_mmap__put(&maps[idx].core);
return -1;
}
return perf_mmap__read_head(map) == map->core.prev && !map->auxtrace_mmap.base;
}
-void perf_mmap__put(struct mmap *map)
-{
- BUG_ON(map->core.base && refcount_read(&map->core.refcnt) == 0);
-
- if (refcount_dec_and_test(&map->core.refcnt))
- mmap__munmap(map);
-}
-
void perf_mmap__consume(struct mmap *map)
{
if (!map->core.overwrite) {
}
if (refcount_read(&map->core.refcnt) == 1 && perf_mmap__empty(map))
- perf_mmap__put(map);
+ perf_mmap__put(&map->core);
}
int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
void mmap__munmap(struct mmap *map)
{
- perf_mmap__munmap(&map->core);
perf_mmap__aio_munmap(map);
if (map->data != NULL) {
munmap(map->data, mmap__mmap_len(map));
int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu);
void mmap__munmap(struct mmap *map);
-void perf_mmap__put(struct mmap *map);
-
void perf_mmap__consume(struct mmap *map);
static inline u64 perf_mmap__read_head(struct mmap *mm)