#undef nsyscalls
}
-static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp,
- size_t *sizep)
+static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
{
- cpu_set_t *mask;
- size_t size;
int i, cpu = -1, nrcpus = 1024;
realloc:
- mask = CPU_ALLOC(nrcpus);
- size = CPU_ALLOC_SIZE(nrcpus);
- CPU_ZERO_S(size, mask);
+ CPU_ZERO(maskp);
- if (sched_getaffinity(pid, size, mask) == -1) {
- CPU_FREE(mask);
+ if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
if (errno == EINVAL && nrcpus < (1024 << 8)) {
nrcpus = nrcpus << 2;
goto realloc;
}
for (i = 0; i < nrcpus; i++) {
- if (CPU_ISSET_S(i, size, mask)) {
- if (cpu == -1) {
+ if (CPU_ISSET(i, maskp)) {
+ if (cpu == -1)
cpu = i;
- *maskp = mask;
- *sizep = size;
- } else
- CPU_CLR_S(i, size, mask);
+ else
+ CPU_CLR(i, maskp);
}
}
- if (cpu == -1)
- CPU_FREE(mask);
-
return cpu;
}
.freq = 10,
.mmap_pages = 256,
};
- cpu_set_t *cpu_mask = NULL;
- size_t cpu_mask_size = 0;
+ cpu_set_t cpu_mask;
+ size_t cpu_mask_size = sizeof(cpu_mask);
struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
struct perf_evsel *evsel;
struct perf_sample sample;
evsel->attr.sample_type |= PERF_SAMPLE_TIME;
perf_evlist__config_attrs(evlist, &opts);
- err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask,
- &cpu_mask_size);
+ err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
if (err < 0) {
pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
goto out_delete_evlist;
/*
* So that we can check perf_sample.cpu on all the samples.
*/
- if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
+ if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
pr_debug("sched_setaffinity: %s\n", strerror(errno));
- goto out_free_cpu_mask;
+ goto out_delete_evlist;
}
/*
}
out_err:
perf_evlist__munmap(evlist);
-out_free_cpu_mask:
- CPU_FREE(cpu_mask);
out_delete_evlist:
perf_evlist__delete(evlist);
out: