perf/x86/intel: Extract memory code PEBS parser for reuse
authorAndi Kleen <ak@linux.intel.com>
Tue, 2 Apr 2019 19:45:00 +0000 (12:45 -0700)
committerIngo Molnar <mingo@kernel.org>
Tue, 16 Apr 2019 10:19:39 +0000 (12:19 +0200)
Extract some code related to memory profiling from the PEBS record
parser into separate functions. It can be reused by the upcoming
adaptive PEBS parser. No functional changes.
Rename intel_hsw_weight to intel_get_tsx_weight, and
intel_hsw_transaction to intel_get_tsx_transaction. Because the input is
not the hsw pebs format anymore.

Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: acme@kernel.org
Cc: jolsa@kernel.org
Link: https://lkml.kernel.org/r/20190402194509.2832-4-kan.liang@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/events/intel/ds.c

index f57e6cb7fd993c873a0119d3481361e2ba04cff8..2f80b7e282ff21349e0d268757a092db155f5f50 100644 (file)
@@ -1125,34 +1125,50 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
        return 0;
 }
 
-static inline u64 intel_hsw_weight(struct pebs_record_skl *pebs)
+static inline u64 intel_get_tsx_weight(u64 tsx_tuning)
 {
-       if (pebs->tsx_tuning) {
-               union hsw_tsx_tuning tsx = { .value = pebs->tsx_tuning };
+       if (tsx_tuning) {
+               union hsw_tsx_tuning tsx = { .value = tsx_tuning };
                return tsx.cycles_last_block;
        }
        return 0;
 }
 
-static inline u64 intel_hsw_transaction(struct pebs_record_skl *pebs)
+static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax)
 {
-       u64 txn = (pebs->tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
+       u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
 
        /* For RTM XABORTs also log the abort code from AX */
-       if ((txn & PERF_TXN_TRANSACTION) && (pebs->ax & 1))
-               txn |= ((pebs->ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
+       if ((txn & PERF_TXN_TRANSACTION) && (ax & 1))
+               txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
        return txn;
 }
 
+#define PERF_X86_EVENT_PEBS_HSW_PREC \
+               (PERF_X86_EVENT_PEBS_ST_HSW | \
+                PERF_X86_EVENT_PEBS_LD_HSW | \
+                PERF_X86_EVENT_PEBS_NA_HSW)
+
+static u64 get_data_src(struct perf_event *event, u64 aux)
+{
+       u64 val = PERF_MEM_NA;
+       int fl = event->hw.flags;
+       bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
+
+       if (fl & PERF_X86_EVENT_PEBS_LDLAT)
+               val = load_latency_data(aux);
+       else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
+               val = precise_datala_hsw(event, aux);
+       else if (fst)
+               val = precise_store_data(aux);
+       return val;
+}
+
 static void setup_pebs_sample_data(struct perf_event *event,
                                   struct pt_regs *iregs, void *__pebs,
                                   struct perf_sample_data *data,
                                   struct pt_regs *regs)
 {
-#define PERF_X86_EVENT_PEBS_HSW_PREC \
-               (PERF_X86_EVENT_PEBS_ST_HSW | \
-                PERF_X86_EVENT_PEBS_LD_HSW | \
-                PERF_X86_EVENT_PEBS_NA_HSW)
        /*
         * We cast to the biggest pebs_record but are careful not to
         * unconditionally access the 'extra' entries.
@@ -1160,17 +1176,13 @@ static void setup_pebs_sample_data(struct perf_event *event,
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        struct pebs_record_skl *pebs = __pebs;
        u64 sample_type;
-       int fll, fst, dsrc;
-       int fl = event->hw.flags;
+       int fll;
 
        if (pebs == NULL)
                return;
 
        sample_type = event->attr.sample_type;
-       dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
-
-       fll = fl & PERF_X86_EVENT_PEBS_LDLAT;
-       fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
+       fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
 
        perf_sample_data_init(data, 0, event->hw.last_period);
 
@@ -1185,16 +1197,8 @@ static void setup_pebs_sample_data(struct perf_event *event,
        /*
         * data.data_src encodes the data source
         */
-       if (dsrc) {
-               u64 val = PERF_MEM_NA;
-               if (fll)
-                       val = load_latency_data(pebs->dse);
-               else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
-                       val = precise_datala_hsw(event, pebs->dse);
-               else if (fst)
-                       val = precise_store_data(pebs->dse);
-               data->data_src.val = val;
-       }
+       if (sample_type & PERF_SAMPLE_DATA_SRC)
+               data->data_src.val = get_data_src(event, pebs->dse);
 
        /*
         * We must however always use iregs for the unwinder to stay sane; the
@@ -1281,10 +1285,11 @@ static void setup_pebs_sample_data(struct perf_event *event,
        if (x86_pmu.intel_cap.pebs_format >= 2) {
                /* Only set the TSX weight when no memory weight. */
                if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
-                       data->weight = intel_hsw_weight(pebs);
+                       data->weight = intel_get_tsx_weight(pebs->tsx_tuning);
 
                if (sample_type & PERF_SAMPLE_TRANSACTION)
-                       data->txn = intel_hsw_transaction(pebs);
+                       data->txn = intel_get_tsx_transaction(pebs->tsx_tuning,
+                                                             pebs->ax);
        }
 
        /*