zfree(&env->sibling_threads);
zfree(&env->pmu_mappings);
zfree(&env->cpu);
+ zfree(&env->numa_map);
for (i = 0; i < env->nr_numa_nodes; i++)
perf_cpu_map__put(env->numa_nodes[i].map);
return normalize_arch(arch_name);
}
+
+
+int perf_env__numa_node(struct perf_env *env, int cpu)
+{
+ if (!env->nr_numa_map) {
+ struct numa_node *nn;
+ int i, nr = 0;
+
+ for (i = 0; i < env->nr_numa_nodes; i++) {
+ nn = &env->numa_nodes[i];
+ nr = max(nr, perf_cpu_map__max(nn->map));
+ }
+
+ nr++;
+
+ /*
+ * We initialize the numa_map array to prepare
+ * it for missing cpus, which return node -1
+ */
+ env->numa_map = malloc(nr * sizeof(int));
+ if (!env->numa_map)
+ return -1;
+
+ for (i = 0; i < nr; i++)
+ env->numa_map[i] = -1;
+
+ env->nr_numa_map = nr;
+
+ for (i = 0; i < env->nr_numa_nodes; i++) {
+ int tmp, j;
+
+ nn = &env->numa_nodes[i];
+ perf_cpu_map__for_each_cpu(j, tmp, nn->map)
+ env->numa_map[j] = i;
+ }
+ }
+
+ return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
+}
struct rb_root btfs;
u32 btfs_cnt;
} bpf_progs;
+
+ /* For fast cpu to numa node lookup via perf_env__numa_node */
+ int *numa_map;
+ int nr_numa_map;
};
enum perf_compress_type {
__u32 prog_id);
void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
+
+int perf_env__numa_node(struct perf_env *env, int cpu);
#endif /* __PERF_ENV_H */