LIB_H += util/header.h
LIB_H += util/help.h
LIB_H += util/session.h
+LIB_H += util/ordered-events.h
LIB_H += util/strbuf.h
LIB_H += util/strlist.h
LIB_H += util/strfilter.h
LIB_OBJS += $(OUTPUT)util/map.o
LIB_OBJS += $(OUTPUT)util/pstack.o
LIB_OBJS += $(OUTPUT)util/session.o
+LIB_OBJS += $(OUTPUT)util/ordered-events.o
LIB_OBJS += $(OUTPUT)util/comm.o
LIB_OBJS += $(OUTPUT)util/thread.o
LIB_OBJS += $(OUTPUT)util/thread_map.o
--- /dev/null
+#include <linux/list.h>
+#include "ordered-events.h"
+#include "evlist.h"
+#include "session.h"
+#include "asm/bug.h"
+#include "debug.h"
+
+static void queue_event(struct ordered_events *oe, struct ordered_event *new)
+{
+ struct ordered_event *last = oe->last;
+ u64 timestamp = new->timestamp;
+ struct list_head *p;
+
+ ++oe->nr_events;
+ oe->last = new;
+
+ if (!last) {
+ list_add(&new->list, &oe->events);
+ oe->max_timestamp = timestamp;
+ return;
+ }
+
+ /*
+ * last event might point to some random place in the list as it's
+ * the last queued event. We expect that the new event is close to
+ * this.
+ */
+ if (last->timestamp <= timestamp) {
+ while (last->timestamp <= timestamp) {
+ p = last->list.next;
+ if (p == &oe->events) {
+ list_add_tail(&new->list, &oe->events);
+ oe->max_timestamp = timestamp;
+ return;
+ }
+ last = list_entry(p, struct ordered_event, list);
+ }
+ list_add_tail(&new->list, &last->list);
+ } else {
+ while (last->timestamp > timestamp) {
+ p = last->list.prev;
+ if (p == &oe->events) {
+ list_add(&new->list, &oe->events);
+ return;
+ }
+ last = list_entry(p, struct ordered_event, list);
+ }
+ list_add(&new->list, &last->list);
+ }
+}
+
+#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
+static struct ordered_event *alloc_event(struct ordered_events *oe)
+{
+ struct list_head *cache = &oe->cache;
+ struct ordered_event *new = NULL;
+
+ if (!list_empty(cache)) {
+ new = list_entry(cache->next, struct ordered_event, list);
+ list_del(&new->list);
+ } else if (oe->buffer) {
+ new = oe->buffer + oe->buffer_idx;
+ if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
+ oe->buffer = NULL;
+ } else if (oe->cur_alloc_size < oe->max_alloc_size) {
+ size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
+
+ oe->buffer = malloc(size);
+ if (!oe->buffer)
+ return NULL;
+
+ oe->cur_alloc_size += size;
+ list_add(&oe->buffer->list, &oe->to_free);
+
+ /* First entry is abused to maintain the to_free list. */
+ oe->buffer_idx = 2;
+ new = oe->buffer + 1;
+ }
+
+ return new;
+}
+
+struct ordered_event *
+ordered_events__new(struct ordered_events *oe, u64 timestamp)
+{
+ struct ordered_event *new;
+
+ new = alloc_event(oe);
+ if (new) {
+ new->timestamp = timestamp;
+ queue_event(oe, new);
+ }
+
+ return new;
+}
+
+void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
+{
+ list_del(&event->list);
+ list_add(&event->list, &oe->cache);
+ oe->nr_events--;
+}
+
+static int __ordered_events__flush(struct perf_session *s,
+ struct perf_tool *tool)
+{
+ struct ordered_events *oe = &s->ordered_events;
+ struct list_head *head = &oe->events;
+ struct ordered_event *tmp, *iter;
+ struct perf_sample sample;
+ u64 limit = oe->next_flush;
+ u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
+ bool show_progress = limit == ULLONG_MAX;
+ struct ui_progress prog;
+ int ret;
+
+ if (!tool->ordered_events || !limit)
+ return 0;
+
+ if (show_progress)
+ ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
+
+ list_for_each_entry_safe(iter, tmp, head, list) {
+ if (session_done())
+ return 0;
+
+ if (iter->timestamp > limit)
+ break;
+
+ ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
+ if (ret)
+ pr_err("Can't parse sample, err = %d\n", ret);
+ else {
+ ret = perf_session__deliver_event(s, iter->event, &sample, tool,
+ iter->file_offset);
+ if (ret)
+ return ret;
+ }
+
+ ordered_events__delete(oe, iter);
+ oe->last_flush = iter->timestamp;
+
+ if (show_progress)
+ ui_progress__update(&prog, 1);
+ }
+
+ if (list_empty(head))
+ oe->last = NULL;
+ else if (last_ts <= limit)
+ oe->last = list_entry(head->prev, struct ordered_event, list);
+
+ return 0;
+}
+
+int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
+ enum oe_flush how)
+{
+ struct ordered_events *oe = &s->ordered_events;
+ int err;
+
+ switch (how) {
+ case OE_FLUSH__FINAL:
+ oe->next_flush = ULLONG_MAX;
+ break;
+
+ case OE_FLUSH__HALF:
+ {
+ struct ordered_event *first, *last;
+ struct list_head *head = &oe->events;
+
+ first = list_entry(head->next, struct ordered_event, list);
+ last = oe->last;
+
+ /* Warn if we are called before any event got allocated. */
+ if (WARN_ONCE(!last || list_empty(head), "empty queue"))
+ return 0;
+
+ oe->next_flush = first->timestamp;
+ oe->next_flush += (last->timestamp - first->timestamp) / 2;
+ break;
+ }
+
+ case OE_FLUSH__ROUND:
+ default:
+ break;
+ };
+
+ err = __ordered_events__flush(s, tool);
+
+ if (!err) {
+ if (how == OE_FLUSH__ROUND)
+ oe->next_flush = oe->max_timestamp;
+ }
+
+ return err;
+}
--- /dev/null
+#ifndef __ORDERED_EVENTS_H
+#define __ORDERED_EVENTS_H
+
+#include <linux/types.h>
+#include "tool.h"
+
+struct perf_session;
+
+struct ordered_event {
+ u64 timestamp;
+ u64 file_offset;
+ union perf_event *event;
+ struct list_head list;
+};
+
+enum oe_flush {
+ OE_FLUSH__FINAL,
+ OE_FLUSH__ROUND,
+ OE_FLUSH__HALF,
+};
+
+struct ordered_events {
+ u64 last_flush;
+ u64 next_flush;
+ u64 max_timestamp;
+ u64 max_alloc_size;
+ u64 cur_alloc_size;
+ struct list_head events;
+ struct list_head cache;
+ struct list_head to_free;
+ struct ordered_event *buffer;
+ struct ordered_event *last;
+ int buffer_idx;
+ unsigned int nr_events;
+};
+
+struct ordered_event *ordered_events__new(struct ordered_events *oe, u64 timestamp);
+void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event);
+int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
+ enum oe_flush how);
+#endif /* __ORDERED_EVENTS_H */
#include "util.h"
#include "cpumap.h"
#include "perf_regs.h"
-#include "asm/bug.h"
static int perf_session__open(struct perf_session *session)
{
[PERF_RECORD_HEADER_MAX] = NULL,
};
-struct ordered_event {
- u64 timestamp;
- u64 file_offset;
- union perf_event *event;
- struct list_head list;
-};
-
-enum oe_flush {
- OE_FLUSH__FINAL,
- OE_FLUSH__ROUND,
- OE_FLUSH__HALF,
-};
-
static void perf_session_free_sample_buffers(struct perf_session *session)
{
struct ordered_events *oe = &session->ordered_events;
}
}
-/* The queue is ordered by time */
-static void queue_event(struct ordered_events *oe, struct ordered_event *new)
-{
- struct ordered_event *last = oe->last;
- u64 timestamp = new->timestamp;
- struct list_head *p;
-
- ++oe->nr_events;
- oe->last = new;
-
- if (!last) {
- list_add(&new->list, &oe->events);
- oe->max_timestamp = timestamp;
- return;
- }
-
- /*
- * last event might point to some random place in the list as it's
- * the last queued event. We expect that the new event is close to
- * this.
- */
- if (last->timestamp <= timestamp) {
- while (last->timestamp <= timestamp) {
- p = last->list.next;
- if (p == &oe->events) {
- list_add_tail(&new->list, &oe->events);
- oe->max_timestamp = timestamp;
- return;
- }
- last = list_entry(p, struct ordered_event, list);
- }
- list_add_tail(&new->list, &last->list);
- } else {
- while (last->timestamp > timestamp) {
- p = last->list.prev;
- if (p == &oe->events) {
- list_add(&new->list, &oe->events);
- return;
- }
- last = list_entry(p, struct ordered_event, list);
- }
- list_add(&new->list, &last->list);
- }
-}
-
-#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
-static struct ordered_event *alloc_event(struct ordered_events *oe)
-{
- struct list_head *cache = &oe->cache;
- struct ordered_event *new = NULL;
-
- if (!list_empty(cache)) {
- new = list_entry(cache->next, struct ordered_event, list);
- list_del(&new->list);
- } else if (oe->buffer) {
- new = oe->buffer + oe->buffer_idx;
- if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
- oe->buffer = NULL;
- } else if (oe->cur_alloc_size < oe->max_alloc_size) {
- size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
-
- oe->buffer = malloc(size);
- if (!oe->buffer)
- return NULL;
-
- oe->cur_alloc_size += size;
- list_add(&oe->buffer->list, &oe->to_free);
-
- /* First entry is abused to maintain the to_free list. */
- oe->buffer_idx = 2;
- new = oe->buffer + 1;
- }
-
- return new;
-}
-
-static struct ordered_event *
-ordered_events__new(struct ordered_events *oe, u64 timestamp)
-{
- struct ordered_event *new;
-
- new = alloc_event(oe);
- if (new) {
- new->timestamp = timestamp;
- queue_event(oe, new);
- }
-
- return new;
-}
-
-static void
-ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
-{
- list_del(&event->list);
- list_add(&event->list, &oe->cache);
- oe->nr_events--;
-}
-
-static int __ordered_events__flush(struct perf_session *s,
- struct perf_tool *tool)
-{
- struct ordered_events *oe = &s->ordered_events;
- struct list_head *head = &oe->events;
- struct ordered_event *tmp, *iter;
- struct perf_sample sample;
- u64 limit = oe->next_flush;
- u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
- bool show_progress = limit == ULLONG_MAX;
- struct ui_progress prog;
- int ret;
-
- if (!tool->ordered_events || !limit)
- return 0;
-
- if (show_progress)
- ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
-
- list_for_each_entry_safe(iter, tmp, head, list) {
- if (session_done())
- return 0;
-
- if (iter->timestamp > limit)
- break;
-
- ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
- if (ret)
- pr_err("Can't parse sample, err = %d\n", ret);
- else {
- ret = perf_session__deliver_event(s, iter->event, &sample, tool,
- iter->file_offset);
- if (ret)
- return ret;
- }
-
- ordered_events__delete(oe, iter);
- oe->last_flush = iter->timestamp;
-
- if (show_progress)
- ui_progress__update(&prog, 1);
- }
-
- if (list_empty(head))
- oe->last = NULL;
- else if (last_ts <= limit)
- oe->last = list_entry(head->prev, struct ordered_event, list);
-
- return 0;
-}
-
-static int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
- enum oe_flush how)
-{
- struct ordered_events *oe = &s->ordered_events;
- int err;
-
- switch (how) {
- case OE_FLUSH__FINAL:
- oe->next_flush = ULLONG_MAX;
- break;
-
- case OE_FLUSH__HALF:
- {
- struct ordered_event *first, *last;
- struct list_head *head = &oe->events;
-
- first = list_entry(head->next, struct ordered_event, list);
- last = oe->last;
-
- /* Warn if we are called before any event got allocated. */
- if (WARN_ONCE(!last || list_empty(head), "empty queue"))
- return 0;
-
- oe->next_flush = first->timestamp;
- oe->next_flush += (last->timestamp - first->timestamp) / 2;
- break;
- }
-
- case OE_FLUSH__ROUND:
- default:
- break;
- };
-
- err = __ordered_events__flush(s, tool);
-
- if (!err) {
- if (how == OE_FLUSH__ROUND)
- oe->next_flush = oe->max_timestamp;
- }
-
- return err;
-}
-
/*
* When perf record finishes a pass on every buffers, it records this pseudo
* event.
#include "symbol.h"
#include "thread.h"
#include "data.h"
+#include "ordered-events.h"
#include <linux/rbtree.h>
#include <linux/perf_event.h>
-struct ordered_event;
struct ip_callchain;
struct thread;
-struct ordered_events {
- u64 last_flush;
- u64 next_flush;
- u64 max_timestamp;
- u64 max_alloc_size;
- u64 cur_alloc_size;
- struct list_head events;
- struct list_head cache;
- struct list_head to_free;
- struct ordered_event *buffer;
- struct ordered_event *last;
- int buffer_idx;
- unsigned int nr_events;
-};
-
struct perf_session {
struct perf_header header;
struct machines machines;