struct evsel *evsel;
struct evsel_runtime *er;
- list_for_each_entry(evsel, &evlist->entries, node) {
+ list_for_each_entry(evsel, &evlist->entries, core.node) {
er = perf_evsel__get_runtime(evsel);
if (er == NULL) {
pr_err("Failed to allocate memory for evsel runtime data\n");
continue;
}
- list_del_init(&evsel->node);
+ list_del_init(&evsel->core.node);
evsel->evlist = NULL;
evsel__delete(evsel);
}
{
struct evsel_menu *menu = container_of(browser,
struct evsel_menu, b);
- struct evsel *evsel = list_entry(entry, struct evsel, node);
+ struct evsel *evsel = list_entry(entry, struct evsel, core.node);
struct hists *hists = evsel__hists(evsel);
bool current_entry = ui_browser__is_current_entry(browser, row);
unsigned long nr_events = hists->stats.nr_events[PERF_RECORD_SAMPLE];
ui_browser__show_title(&menu->b, title);
switch (key) {
case K_TAB:
- if (pos->node.next == &evlist->entries)
+ if (pos->core.node.next == &evlist->entries)
pos = perf_evlist__first(evlist);
else
pos = perf_evsel__next(pos);
goto browse_hists;
case K_UNTAB:
- if (pos->node.prev == &evlist->entries)
+ if (pos->core.node.prev == &evlist->entries)
pos = perf_evlist__last(evlist);
else
pos = perf_evsel__prev(pos);
static bool filter_group_entries(struct ui_browser *browser __maybe_unused,
void *entry)
{
- struct evsel *evsel = list_entry(entry, struct evsel, node);
+ struct evsel *evsel = list_entry(entry, struct evsel, core.node);
if (symbol_conf.event_group && !perf_evsel__is_group_leader(evsel))
return true;
struct evsel *pos, *n;
evlist__for_each_entry_safe(evlist, n, pos) {
- list_del_init(&pos->node);
+ list_del_init(&pos->core.node);
pos->evlist = NULL;
evsel__delete(pos);
}
void evlist__add(struct evlist *evlist, struct evsel *entry)
{
entry->evlist = evlist;
- list_add_tail(&entry->node, &evlist->entries);
+ list_add_tail(&entry->core.node, &evlist->entries);
entry->idx = evlist->nr_entries;
entry->tracking = !entry->idx;
void evlist__remove(struct evlist *evlist, struct evsel *evsel)
{
evsel->evlist = NULL;
- list_del_init(&evsel->node);
+ list_del_init(&evsel->core.node);
evlist->nr_entries -= 1;
}
struct evsel *evsel, *temp;
__evlist__for_each_entry_safe(list, temp, evsel) {
- list_del_init(&evsel->node);
+ list_del_init(&evsel->core.node);
evlist__add(evlist, evsel);
}
}
{
struct evsel *evsel, *leader;
- leader = list_entry(list->next, struct evsel, node);
- evsel = list_entry(list->prev, struct evsel, node);
+ leader = list_entry(list->next, struct evsel, core.node);
+ evsel = list_entry(list->prev, struct evsel, core.node);
leader->nr_members = evsel->idx - leader->idx + 1;
evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
if (evsel == NULL)
goto out_delete_partial_list;
- list_add_tail(&evsel->node, &head);
+ list_add_tail(&evsel->core.node, &head);
}
perf_evlist__splice_list_tail(evlist, &head);
evlist__for_each_entry_safe(evlist, n, evsel) {
if (evsel->leader == move_evsel->leader)
- list_move_tail(&evsel->node, &move);
+ list_move_tail(&evsel->core.node, &move);
}
list_splice(&move, &evlist->entries);
static inline struct evsel *perf_evlist__first(struct evlist *evlist)
{
- return list_entry(evlist->entries.next, struct evsel, node);
+ return list_entry(evlist->entries.next, struct evsel, core.node);
}
static inline struct evsel *perf_evlist__last(struct evlist *evlist)
{
- return list_entry(evlist->entries.prev, struct evsel, node);
+ return list_entry(evlist->entries.prev, struct evsel, core.node);
}
size_t perf_evlist__fprintf(struct evlist *evlist, FILE *fp);
* @evsel: struct evsel iterator
*/
#define __evlist__for_each_entry(list, evsel) \
- list_for_each_entry(evsel, list, node)
+ list_for_each_entry(evsel, list, core.node)
/**
* evlist__for_each_entry - iterate thru all the evsels
* @evsel: struct evsel iterator
*/
#define __evlist__for_each_entry_continue(list, evsel) \
- list_for_each_entry_continue(evsel, list, node)
+ list_for_each_entry_continue(evsel, list, core.node)
/**
* evlist__for_each_entry_continue - continue iteration thru all the evsels
* @evsel: struct evsel iterator
*/
#define __evlist__for_each_entry_reverse(list, evsel) \
- list_for_each_entry_reverse(evsel, list, node)
+ list_for_each_entry_reverse(evsel, list, core.node)
/**
* evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
* @evsel: struct evsel iterator
*/
#define __evlist__for_each_entry_safe(list, tmp, evsel) \
- list_for_each_entry_safe(evsel, tmp, list, node)
+ list_for_each_entry_safe(evsel, tmp, list, core.node)
/**
* evlist__for_each_entry_safe - safely iterate thru all the evsels
evsel->evlist = NULL;
evsel->bpf_obj = NULL;
evsel->bpf_fd = -1;
- INIT_LIST_HEAD(&evsel->node);
+ INIT_LIST_HEAD(&evsel->core.node);
INIT_LIST_HEAD(&evsel->config_terms);
perf_evsel__object.init(evsel);
evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
void perf_evsel__exit(struct evsel *evsel)
{
- assert(list_empty(&evsel->node));
+ assert(list_empty(&evsel->core.node));
assert(evsel->evlist == NULL);
perf_evsel__free_counts(evsel);
perf_evsel__free_fd(evsel);
#include <stddef.h>
#include <linux/perf_event.h>
#include <linux/types.h>
+#include <internal/evsel.h>
#include "xyarray.h"
#include "symbol_conf.h"
#include "cpumap.h"
/** struct evsel - event selector
*
* @evlist - evlist this evsel is in, if it is in one.
- * @node - To insert it into evlist->entries or in other list_heads, say in
- * the event parsing routines.
+ * @core - libperf evsel object
* @name - Can be set to retain the original event name passed by the user,
* so that when showing results in tools such as 'perf stat', we
* show the name used, not some alias.
* @priv: And what is in its containing unnamed union are tool specific
*/
struct evsel {
- struct list_head node;
+ struct perf_evsel core;
struct evlist *evlist;
struct perf_event_attr attr;
char *filter;
static inline struct evsel *perf_evsel__next(struct evsel *evsel)
{
- return list_entry(evsel->node.next, struct evsel, node);
+ return list_entry(evsel->core.node.next, struct evsel, core.node);
}
static inline struct evsel *perf_evsel__prev(struct evsel *evsel)
{
- return list_entry(evsel->node.prev, struct evsel, node);
+ return list_entry(evsel->core.node.prev, struct evsel, core.node);
}
/**
/* Iterates group WITHOUT the leader. */
#define for_each_group_member(_evsel, _leader) \
-for ((_evsel) = list_entry((_leader)->node.next, struct evsel, node); \
+for ((_evsel) = list_entry((_leader)->core.node.next, struct evsel, core.node); \
(_evsel) && (_evsel)->leader == (_leader); \
- (_evsel) = list_entry((_evsel)->node.next, struct evsel, node))
+ (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
/* Iterates group WITH the leader. */
#define for_each_group_evsel(_evsel, _leader) \
for ((_evsel) = _leader; \
(_evsel) && (_evsel)->leader == (_leader); \
- (_evsel) = list_entry((_evsel)->node.next, struct evsel, node))
+ (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
static inline bool perf_evsel__has_branch_callstack(const struct evsel *evsel)
{
if (config_terms)
list_splice(config_terms, &evsel->config_terms);
- list_add_tail(&evsel->node, list);
+ list_add_tail(&evsel->core.node, list);
return evsel;
}
list_splice(&config_terms, &evsel->config_terms);
}
- list_add_tail(&evsel->node, list);
+ list_add_tail(&evsel->core.node, list);
return 0;
}
pr_debug("Failed to add BPF event %s:%s\n",
group, event);
- list_for_each_entry_safe(evsel, tmp, &new_evsels, node) {
- list_del_init(&evsel->node);
+ list_for_each_entry_safe(evsel, tmp, &new_evsels, core.node) {
+ list_del_init(&evsel->core.node);
evsel__delete(evsel);
}
return err;
}
pr_debug("adding %s:%s\n", group, event);
- list_for_each_entry(pos, &new_evsels, node) {
+ list_for_each_entry(pos, &new_evsels, core.node) {
pr_debug("adding %s:%s to %p\n",
group, event, pos);
pos->bpf_fd = fd;
bool is_leader = true;
int i, nr_pmu = 0, total_members, ret = 0;
- leader = list_first_entry(list, struct evsel, node);
- evsel = list_last_entry(list, struct evsel, node);
+ leader = list_first_entry(list, struct evsel, core.node);
+ evsel = list_last_entry(list, struct evsel, core.node);
total_members = evsel->idx - leader->idx + 1;
leaders = calloc(total_members, sizeof(uintptr_t));
return;
__perf_evlist__set_leader(list);
- leader = list_entry(list->next, struct evsel, node);
+ leader = list_entry(list->next, struct evsel, core.node);
leader->group_name = name ? strdup(name) : NULL;
}
if (!last)
return 0;
- if (last->node.prev == &evlist->entries)
+ if (last->core.node.prev == &evlist->entries)
return 0;
- last = list_entry(last->node.prev, struct evsel, node);
+ last = list_entry(last->core.node.prev, struct evsel, core.node);
} while (!last->cmdline_group_boundary);
return 0;
struct evlist *evlist = counter->evlist;
struct evsel *alias;
- alias = list_prepare_entry(counter, &(evlist->entries), node);
- list_for_each_entry_continue (alias, &evlist->entries, node) {
+ alias = list_prepare_entry(counter, &(evlist->entries), core.node);
+ list_for_each_entry_continue (alias, &evlist->entries, core.node) {
if (strcmp(perf_evsel__name(alias), perf_evsel__name(counter)) ||
alias->scale != counter->scale ||
alias->cgrp != counter->cgrp ||
struct evsel *pos;
int nr_tracepoints = 0;
- list_for_each_entry(pos, pattrs, node) {
+ list_for_each_entry(pos, pattrs, core.node) {
if (pos->attr.type != PERF_TYPE_TRACEPOINT)
continue;
++nr_tracepoints;
{
struct evsel *pos;
- list_for_each_entry(pos, pattrs, node)
+ list_for_each_entry(pos, pattrs, core.node)
if (pos->attr.type == PERF_TYPE_TRACEPOINT)
return true;