perf/x86/intel/uncore: Factor out box ref/unref functions
authorKan Liang <kan.liang@linux.intel.com>
Wed, 1 May 2019 00:53:45 +0000 (17:53 -0700)
committerIngo Molnar <mingo@kernel.org>
Mon, 17 Jun 2019 10:36:19 +0000 (12:36 +0200)
For uncore box which can only be accessed by MSR, its reference
box->refcnt is updated in CPU hot plug. The uncore boxes need to be
initalized and exited accordingly for the first/last CPU of a socket.

Starts from Snow Ridge server, a new type of uncore box is introduced,
which can only be accessed by MMIO. The driver needs to map/unmap
MMIO space for the first/last CPU of a socket.

Extract the codes of box ref/unref and init/exit for reuse later.

There is no functional change.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: acme@kernel.org
Cc: eranian@google.com
Link: https://lkml.kernel.org/r/1556672028-119221-4-git-send-email-kan.liang@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/events/intel/uncore.c

index 342c323e0f6ac20123e934418b4c2b892e235c86..7b0c88903d47f47d7740895f63d143b56f65d57c 100644 (file)
@@ -1143,12 +1143,27 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
                uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
 }
 
-static int uncore_event_cpu_offline(unsigned int cpu)
+static void uncore_box_unref(struct intel_uncore_type **types, int id)
 {
-       struct intel_uncore_type *type, **types = uncore_msr_uncores;
+       struct intel_uncore_type *type;
        struct intel_uncore_pmu *pmu;
        struct intel_uncore_box *box;
-       int i, die, target;
+       int i;
+
+       for (; *types; types++) {
+               type = *types;
+               pmu = type->pmus;
+               for (i = 0; i < type->num_boxes; i++, pmu++) {
+                       box = pmu->boxes[id];
+                       if (box && atomic_dec_return(&box->refcnt) == 0)
+                               uncore_box_exit(box);
+               }
+       }
+}
+
+static int uncore_event_cpu_offline(unsigned int cpu)
+{
+       int die, target;
 
        /* Check if exiting cpu is used for collecting uncore events */
        if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
@@ -1168,15 +1183,7 @@ static int uncore_event_cpu_offline(unsigned int cpu)
 unref:
        /* Clear the references */
        die = topology_logical_die_id(cpu);
-       for (; *types; types++) {
-               type = *types;
-               pmu = type->pmus;
-               for (i = 0; i < type->num_boxes; i++, pmu++) {
-                       box = pmu->boxes[die];
-                       if (box && atomic_dec_return(&box->refcnt) == 0)
-                               uncore_box_exit(box);
-               }
-       }
+       uncore_box_unref(uncore_msr_uncores, die);
        return 0;
 }
 
@@ -1219,15 +1226,15 @@ cleanup:
        return -ENOMEM;
 }
 
-static int uncore_event_cpu_online(unsigned int cpu)
+static int uncore_box_ref(struct intel_uncore_type **types,
+                         int id, unsigned int cpu)
 {
-       struct intel_uncore_type *type, **types = uncore_msr_uncores;
+       struct intel_uncore_type *type;
        struct intel_uncore_pmu *pmu;
        struct intel_uncore_box *box;
-       int i, ret, die, target;
+       int i, ret;
 
-       die = topology_logical_die_id(cpu);
-       ret = allocate_boxes(types, die, cpu);
+       ret = allocate_boxes(types, id, cpu);
        if (ret)
                return ret;
 
@@ -1235,11 +1242,22 @@ static int uncore_event_cpu_online(unsigned int cpu)
                type = *types;
                pmu = type->pmus;
                for (i = 0; i < type->num_boxes; i++, pmu++) {
-                       box = pmu->boxes[die];
+                       box = pmu->boxes[id];
                        if (box && atomic_inc_return(&box->refcnt) == 1)
                                uncore_box_init(box);
                }
        }
+       return 0;
+}
+
+static int uncore_event_cpu_online(unsigned int cpu)
+{
+       int ret, die, target;
+
+       die = topology_logical_die_id(cpu);
+       ret = uncore_box_ref(uncore_msr_uncores, die, cpu);
+       if (ret)
+               return ret;
 
        /*
         * Check if there is an online cpu in the package