return -1;
}
-int perf_mmap__push(struct perf_mmap *md, bool overwrite,
- void *to, int push(void *to, void *buf, size_t size))
+/*
+ * Report the start and end of the available data in ringbuffer
+ */
+int perf_mmap__read_init(struct perf_mmap *md, bool overwrite,
+ u64 *startp, u64 *endp)
{
u64 head = perf_mmap__read_head(md);
u64 old = md->prev;
- u64 end, start;
unsigned char *data = md->base + page_size;
unsigned long size;
- void *buf;
- int rc = 0;
- start = overwrite ? head : old;
- end = overwrite ? old : head;
+ *startp = overwrite ? head : old;
+ *endp = overwrite ? old : head;
- if (start == end)
+ if (*startp == *endp)
return 0;
- size = end - start;
+ size = *endp - *startp;
if (size > (unsigned long)(md->mask) + 1) {
if (!overwrite) {
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
* Backward ring buffer is full. We still have a chance to read
* most of data from it.
*/
- if (overwrite_rb_find_range(data, md->mask, head, &start, &end))
+ if (overwrite_rb_find_range(data, md->mask, head, startp, endp))
return -1;
}
+ return 1;
+}
+
+int perf_mmap__push(struct perf_mmap *md, bool overwrite,
+ void *to, int push(void *to, void *buf, size_t size))
+{
+ u64 head = perf_mmap__read_head(md);
+ u64 end, start;
+ unsigned char *data = md->base + page_size;
+ unsigned long size;
+ void *buf;
+ int rc = 0;
+
+ rc = perf_mmap__read_init(md, overwrite, &start, &end);
+ if (rc < 1)
+ return rc;
+
size = end - start;
if ((start & md->mask) + size != (end & md->mask)) {