goto out_err;
}
- perf_evlist__enable(evlist);
+ evlist__enable(evlist);
comm1 = "Test COMM 1";
CHECK__(prctl(PR_SET_NAME, (unsigned long)comm1, 0, 0, 0));
goto out;
/* everything is good - enable the events and process */
- perf_evlist__enable(kvm->evlist);
+ evlist__enable(kvm->evlist);
while (!done) {
struct fdarray *fda = &kvm->evlist->pollfd;
* so don't spoil it by prematurely enabling them.
*/
if (!target__none(&opts->target) && !opts->initial_delay)
- perf_evlist__enable(rec->evlist);
+ evlist__enable(rec->evlist);
/*
* Let the child rip
if (opts->initial_delay) {
usleep(opts->initial_delay * USEC_PER_MSEC);
- perf_evlist__enable(rec->evlist);
+ evlist__enable(rec->evlist);
}
trigger_ready(&auxtrace_snapshot_trigger);
* - we have initial delay configured
*/
if (!target__none(&target) || stat_config.initial_delay)
- perf_evlist__enable(evsel_list);
+ evlist__enable(evsel_list);
}
static void disable_counters(void)
* so leave the check here.
*/
if (!target__none(&opts->target))
- perf_evlist__enable(top->evlist);
+ evlist__enable(top->evlist);
ret = -1;
if (pthread_create(&thread_process, NULL, process_thread, top)) {
goto out_error_mmap;
if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
- perf_evlist__enable(evlist);
+ evlist__enable(evlist);
if (forks)
perf_evlist__start_workload(evlist);
if (trace->opts.initial_delay) {
usleep(trace->opts.initial_delay * 1000);
- perf_evlist__enable(evlist);
+ evlist__enable(evlist);
}
trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
return TEST_FAIL;
}
- perf_evlist__enable(evlist);
+ evlist__enable(evlist);
testcase();
perf_evlist__disable(evlist);
goto out_delete_evlist;
}
- perf_evlist__enable(evlist);
+ evlist__enable(evlist);
(*func)();
perf_evlist__disable(evlist);
goto out_put;
}
- perf_evlist__enable(evlist);
+ evlist__enable(evlist);
do_something();
* enabled.
*/
- perf_evlist__enable(evlist);
+ evlist__enable(evlist);
comm = "Test COMM 1";
CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0));
* disabled with the dummy event still enabled.
*/
- perf_evlist__enable(evlist);
+ evlist__enable(evlist);
evsel = perf_evlist__last(evlist);
goto out_delete_evlist;
}
- perf_evlist__enable(evlist);
+ evlist__enable(evlist);
/*
* Generate the event:
* Now that all is properly set up, enable the events, they will
* count just on workload.pid, which will start...
*/
- perf_evlist__enable(evlist);
+ evlist__enable(evlist);
/*
* Now!
goto out_delete_evlist;
}
- perf_evlist__enable(evlist);
+ evlist__enable(evlist);
/* collect samples */
for (i = 0; i < NR_LOOPS; i++)
goto out_err;
}
- perf_evlist__enable(evlist);
+ evlist__enable(evlist);
err = evsel__disable(cpu_clocks_evsel);
if (err) {
evlist->enabled = false;
}
-void perf_evlist__enable(struct evlist *evlist)
+void evlist__enable(struct evlist *evlist)
{
struct evsel *pos;
void perf_evlist__toggle_enable(struct evlist *evlist)
{
- (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
+ (evlist->enabled ? perf_evlist__disable : evlist__enable)(evlist);
}
static int perf_evlist__enable_event_cpu(struct evlist *evlist,
size_t perf_evlist__mmap_size(unsigned long pages);
void perf_evlist__disable(struct evlist *evlist);
-void perf_evlist__enable(struct evlist *evlist);
+void evlist__enable(struct evlist *evlist);
void perf_evlist__toggle_enable(struct evlist *evlist);
int perf_evlist__enable_event_idx(struct evlist *evlist,