perf stat: Move create_perf_stat_counter() to stat.c
authorJiri Olsa <jolsa@kernel.org>
Thu, 30 Aug 2018 06:32:17 +0000 (08:32 +0200)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Thu, 30 Aug 2018 18:52:22 +0000 (15:52 -0300)
Move create_perf_stat_counter() to the 'stat' class, so that we can use
it globally.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20180830063252.23729-9-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/builtin-stat.c
tools/perf/util/stat.c
tools/perf/util/stat.h

index 45bbd156d496c67c53296298981fe1e7970c4d5c..142cff8eb12b3a89d8da47021b92827f05711ead 100644 (file)
@@ -234,58 +234,6 @@ static void perf_stat__reset_stats(void)
                perf_stat__reset_shadow_per_stat(&stat_config.stats[i]);
 }
 
-static int create_perf_stat_counter(struct perf_evsel *evsel,
-                                   struct perf_stat_config *config)
-{
-       struct perf_event_attr *attr = &evsel->attr;
-       struct perf_evsel *leader = evsel->leader;
-
-       if (config->scale) {
-               attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
-                                   PERF_FORMAT_TOTAL_TIME_RUNNING;
-       }
-
-       /*
-        * The event is part of non trivial group, let's enable
-        * the group read (for leader) and ID retrieval for all
-        * members.
-        */
-       if (leader->nr_members > 1)
-               attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
-
-       attr->inherit = !config->no_inherit;
-
-       /*
-        * Some events get initialized with sample_(period/type) set,
-        * like tracepoints. Clear it up for counting.
-        */
-       attr->sample_period = 0;
-
-       if (config->identifier)
-               attr->sample_type = PERF_SAMPLE_IDENTIFIER;
-
-       /*
-        * Disabling all counters initially, they will be enabled
-        * either manually by us or by kernel via enable_on_exec
-        * set later.
-        */
-       if (perf_evsel__is_group_leader(evsel)) {
-               attr->disabled = 1;
-
-               /*
-                * In case of initial_delay we enable tracee
-                * events manually.
-                */
-               if (target__none(&target) && !config->initial_delay)
-                       attr->enable_on_exec = 1;
-       }
-
-       if (target__has_cpu(&target) && !target__has_per_thread(&target))
-               return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
-
-       return perf_evsel__open_per_thread(evsel, evsel->threads);
-}
-
 static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
                                     union perf_event *event,
                                     struct perf_sample *sample __maybe_unused,
@@ -568,7 +516,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 
        evlist__for_each_entry(evsel_list, counter) {
 try_again:
-               if (create_perf_stat_counter(counter, &stat_config) < 0) {
+               if (create_perf_stat_counter(counter, &stat_config, &target) < 0) {
 
                        /* Weak group failed. Reset the group. */
                        if ((errno == EINVAL || errno == EBADF) &&
index a0061e0b0fade70868ac6f9ffc90e4d9b0e7e990..3bd24255376a7c2648af04a7c4e7c889a95ca800 100644 (file)
@@ -435,3 +435,56 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
 
        return ret;
 }
+
+int create_perf_stat_counter(struct perf_evsel *evsel,
+                            struct perf_stat_config *config,
+                            struct target *target)
+{
+       struct perf_event_attr *attr = &evsel->attr;
+       struct perf_evsel *leader = evsel->leader;
+
+       if (config->scale) {
+               attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+                                   PERF_FORMAT_TOTAL_TIME_RUNNING;
+       }
+
+       /*
+        * The event is part of non trivial group, let's enable
+        * the group read (for leader) and ID retrieval for all
+        * members.
+        */
+       if (leader->nr_members > 1)
+               attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
+
+       attr->inherit = !config->no_inherit;
+
+       /*
+        * Some events get initialized with sample_(period/type) set,
+        * like tracepoints. Clear it up for counting.
+        */
+       attr->sample_period = 0;
+
+       if (config->identifier)
+               attr->sample_type = PERF_SAMPLE_IDENTIFIER;
+
+       /*
+        * Disabling all counters initially, they will be enabled
+        * either manually by us or by kernel via enable_on_exec
+        * set later.
+        */
+       if (perf_evsel__is_group_leader(evsel)) {
+               attr->disabled = 1;
+
+               /*
+                * In case of initial_delay we enable tracee
+                * events manually.
+                */
+               if (target__none(target) && !config->initial_delay)
+                       attr->enable_on_exec = 1;
+       }
+
+       if (target__has_cpu(target) && !target__has_per_thread(target))
+               return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
+
+       return perf_evsel__open_per_thread(evsel, evsel->threads);
+}
index 918cde064cdc6f80f2a1986236157e14489b9cf1..8fb596641545b808c4e5bfa69f69024dd201096a 100644 (file)
@@ -175,4 +175,8 @@ int perf_event__process_stat_event(struct perf_tool *tool,
 size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp);
 size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp);
 size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp);
+
+int create_perf_stat_counter(struct perf_evsel *evsel,
+                            struct perf_stat_config *config,
+                            struct target *target);
 #endif