#include <sys/prctl.h>
#include <perf/cpumap.h>
#include <perf/evlist.h>
+#include <perf/mmap.h>
#include "debug.h"
#include "parse-events.h"
comm2_time = sample.time;
}
next_event:
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
}
perf_mmap__read_done(md);
}
#include <semaphore.h>
#include <signal.h>
#include <math.h>
+#include <perf/mmap.h>
static const char *get_filename_for_perf_kvm(void)
{
while ((event = perf_mmap__read_event(md)) != NULL) {
err = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp);
if (err) {
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
pr_err("Failed to parse sample\n");
return -1;
}
* FIXME: Here we can't consume the event, as perf_session__queue_event will
* point to it, and it'll get possibly overwritten by the kernel.
*/
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
if (err) {
pr_err("Failed to enqueue sample: %d\n", err);
#include <linux/err.h>
#include <linux/ctype.h>
+#include <perf/mmap.h>
static volatile int done;
static volatile int resize;
if (ret)
break;
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
if (top->qe.rotate) {
pthread_mutex_lock(&top->qe.mutex);
#include <sys/sysmacros.h>
#include <linux/ctype.h>
+#include <perf/mmap.h>
#ifndef O_CLOEXEC
# define O_CLOEXEC 02000000
if (err)
goto out_disable;
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
if (interrupted)
goto out_disable;
$(call do_install,include/perf/cpumap.h,$(prefix)/include/perf,644); \
$(call do_install,include/perf/threadmap.h,$(prefix)/include/perf,644); \
$(call do_install,include/perf/evlist.h,$(prefix)/include/perf,644); \
- $(call do_install,include/perf/evsel.h,$(prefix)/include/perf,644);
- $(call do_install,include/perf/event.h,$(prefix)/include/perf,644);
+ $(call do_install,include/perf/evsel.h,$(prefix)/include/perf,644); \
+ $(call do_install,include/perf/event.h,$(prefix)/include/perf,644); \
+ $(call do_install,include/perf/mmap.h,$(prefix)/include/perf,644);
install_pkgconfig: $(LIBPERF_PC)
$(call QUIET_INSTALL, $(LIBPERF_PC)) \
void perf_mmap__get(struct perf_mmap *map);
void perf_mmap__put(struct perf_mmap *map);
+u64 perf_mmap__read_head(struct perf_mmap *map);
+
#endif /* __LIBPERF_INTERNAL_MMAP_H */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LIBPERF_MMAP_H
+#define __LIBPERF_MMAP_H
+
+#include <perf/core.h>
+
+struct perf_mmap;
+
+LIBPERF_API void perf_mmap__consume(struct perf_mmap *map);
+
+#endif /* __LIBPERF_MMAP_H */
perf_evlist__next;
perf_evlist__set_maps;
perf_evlist__poll;
+ perf_mmap__consume;
local:
*;
};
// SPDX-License-Identifier: GPL-2.0
#include <sys/mman.h>
+#include <linux/ring_buffer.h>
+#include <linux/perf_event.h>
+#include <perf/mmap.h>
#include <internal/mmap.h>
#include <internal/lib.h>
#include <linux/kernel.h>
if (refcount_dec_and_test(&map->refcnt))
perf_mmap__munmap(map);
}
+
+static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
+{
+ ring_buffer_write_tail(md->base, tail);
+}
+
+u64 perf_mmap__read_head(struct perf_mmap *map)
+{
+ return ring_buffer_read_head(map->base);
+}
+
+static bool perf_mmap__empty(struct perf_mmap *map)
+{
+ struct perf_event_mmap_page *pc = map->base;
+
+ return perf_mmap__read_head(map) == map->prev && !pc->aux_size;
+}
+
+void perf_mmap__consume(struct perf_mmap *map)
+{
+ if (!map->overwrite) {
+ u64 old = map->prev;
+
+ perf_mmap__write_tail(map, old);
+ }
+
+ if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
+ perf_mmap__put(map);
+}
#include "util/mmap.h"
#include <errno.h>
#include <linux/string.h>
+#include <perf/mmap.h>
#define NR_ITERS 111
#include <linux/string.h>
#include <api/fs/fs.h>
#include <bpf/bpf.h>
+#include <perf/mmap.h>
#include "tests.h"
#include "llvm.h"
#include "debug.h"
#include <sys/param.h>
#include <perf/cpumap.h>
#include <perf/evlist.h>
+#include <perf/mmap.h>
#include "debug.h"
#include "dso.h"
while ((event = perf_mmap__read_event(md)) != NULL) {
ret = process_event(machine, evlist, event, state);
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
if (ret < 0)
return ret;
}
#include <sys/prctl.h>
#include <perf/cpumap.h>
#include <perf/evlist.h>
+#include <perf/mmap.h>
#include "debug.h"
#include "parse-events.h"
(pid_t)event->comm.tid == getpid() &&
strcmp(event->comm.comm, comm) == 0)
found += 1;
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
}
perf_mmap__read_done(md);
}
#include <linux/kernel.h>
#include <linux/string.h>
#include <perf/evlist.h>
+#include <perf/mmap.h>
/*
* This test will generate random numbers of calls to some getpid syscalls,
goto out_delete_evlist;
}
nr_events[evsel->idx]++;
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
}
perf_mmap__read_done(md);
#include "debug.h"
#include "util/mmap.h"
#include <errno.h>
+#include <perf/mmap.h>
#ifndef O_DIRECTORY
#define O_DIRECTORY 00200000
++nr_events;
if (type != PERF_RECORD_SAMPLE) {
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
continue;
}
#include <pthread.h>
#include <sched.h>
+#include <perf/mmap.h>
#include "evlist.h"
#include "evsel.h"
#include "debug.h"
++errs;
}
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
}
perf_mmap__read_done(md);
}
#include "util/mmap.h"
#include "util/thread_map.h"
#include <perf/evlist.h>
+#include <perf/mmap.h>
#define NR_LOOPS 10000000
total_periods += sample.period;
nr_samples++;
next_event:
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
}
perf_mmap__read_done(md);
#include <linux/zalloc.h>
#include <perf/cpumap.h>
#include <perf/evlist.h>
+#include <perf/mmap.h>
#include "debug.h"
#include "parse-events.h"
while ((event = perf_mmap__read_event(md)) != NULL) {
cnt += 1;
ret = add_event(evlist, &events, event);
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
if (ret < 0)
goto out_free_nodes;
}
#include <linux/string.h>
#include <perf/cpumap.h>
#include <perf/evlist.h>
+#include <perf/mmap.h>
static int exited;
static int nr_exit;
if (event->header.type == PERF_RECORD_EXIT)
nr_exit++;
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
}
perf_mmap__read_done(md);
#include <perf/evlist.h>
#include <perf/evsel.h>
#include <perf/cpumap.h>
+#include <perf/mmap.h>
#include <internal/xyarray.h>
else
pr_warning("cannot locate proper evsel for the side band event\n");
- perf_mmap__consume(map);
+ perf_mmap__consume(&map->core);
got_data = true;
}
perf_mmap__read_done(map);
#include <stdlib.h>
#include <string.h>
#include <unistd.h> // sysconf()
+#include <perf/mmap.h>
#ifdef HAVE_LIBNUMA_SUPPORT
#include <numaif.h>
#endif
/* non-overwirte doesn't pause the ringbuffer */
if (!map->core.overwrite)
- map->core.end = perf_mmap__read_head(map);
+ map->core.end = perf_mmap__read_head(&map->core);
event = perf_mmap__read(map, &map->core.start, map->core.end);
return event;
}
-static bool perf_mmap__empty(struct mmap *map)
-{
- struct perf_event_mmap_page *pc = map->core.base;
-
- return perf_mmap__read_head(map) == map->core.prev && !pc->aux_size;
-}
-
-void perf_mmap__consume(struct mmap *map)
-{
- if (!map->core.overwrite) {
- u64 old = map->core.prev;
-
- perf_mmap__write_tail(map, old);
- }
-
- if (refcount_read(&map->core.refcnt) == 1 && perf_mmap__empty(map))
- perf_mmap__put(&map->core);
-}
-
int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
struct auxtrace_mmap_params *mp __maybe_unused,
void *userpg __maybe_unused,
*/
static int __perf_mmap__read_init(struct mmap *md)
{
- u64 head = perf_mmap__read_head(md);
+ u64 head = perf_mmap__read_head(&md->core);
u64 old = md->core.prev;
unsigned char *data = md->core.base + page_size;
unsigned long size;
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
md->core.prev = head;
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
return -EAGAIN;
}
int perf_mmap__push(struct mmap *md, void *to,
int push(struct mmap *map, void *to, void *buf, size_t size))
{
- u64 head = perf_mmap__read_head(md);
+ u64 head = perf_mmap__read_head(&md->core);
unsigned char *data = md->core.base + page_size;
unsigned long size;
void *buf;
}
md->core.prev = head;
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
out:
return rc;
}
if (!refcount_read(&map->core.refcnt))
return;
- map->core.prev = perf_mmap__read_head(map);
+ map->core.prev = perf_mmap__read_head(&map->core);
}
int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu);
void mmap__munmap(struct mmap *map);
-void perf_mmap__consume(struct mmap *map);
-
-static inline u64 perf_mmap__read_head(struct mmap *mm)
-{
- return ring_buffer_read_head(mm->core.base);
-}
-
-static inline void perf_mmap__write_tail(struct mmap *md, u64 tail)
-{
- ring_buffer_write_tail(md->core.base, tail);
-}
-
union perf_event *perf_mmap__read_forward(struct mmap *map);
union perf_event *perf_mmap__read_event(struct mmap *map);
#include <linux/err.h>
#include <perf/cpumap.h>
#include <traceevent/event-parse.h>
+#include <perf/mmap.h>
#include "evlist.h"
#include "callchain.h"
#include "evsel.h"
err = perf_evsel__parse_sample(evsel, event, &pevent->sample);
/* Consume the even only after we parsed it out. */
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
if (err)
return PyErr_Format(PyExc_OSError,