perf: Fix sibling iteration
authorPeter Zijlstra <peterz@infradead.org>
Thu, 15 Mar 2018 16:36:56 +0000 (17:36 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 16 Mar 2018 14:34:10 +0000 (15:34 +0100)
Mark noticed that the change to sibling_list changed some iteration
semantics; because previously we used group_list as list entry,
sibling events would always have an empty sibling_list.

But because we now use sibling_list for both list head and list entry,
siblings will report as having siblings.

Fix this with a custom for_each_sibling_event() iterator.

Fixes: 8343aae66167 ("perf/core: Remove perf_event::group_entry")
Reported-by: Mark Rutland <mark.rutland@arm.com>
Suggested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: vincent.weaver@maine.edu
Cc: alexander.shishkin@linux.intel.com
Cc: torvalds@linux-foundation.org
Cc: alexey.budankov@linux.intel.com
Cc: valery.cherepennikov@intel.com
Cc: eranian@google.com
Cc: acme@redhat.com
Cc: linux-tip-commits@vger.kernel.org
Cc: davidcc@google.com
Cc: kan.liang@intel.com
Cc: Dmitry.Prohorov@intel.com
Cc: jolsa@redhat.com
Link: https://lkml.kernel.org/r/20180315170129.GX4043@hirez.programming.kicks-ass.net
19 files changed:
arch/alpha/kernel/perf_event.c
arch/arm/mach-imx/mmdc.c
arch/arm/mm/cache-l2x0-pmu.c
arch/mips/kernel/perf_event_mipsxx.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/core-fsl-emb.c
arch/sparc/kernel/perf_event.c
arch/x86/events/core.c
arch/x86/events/intel/uncore.c
drivers/bus/arm-cci.c
drivers/bus/arm-ccn.c
drivers/perf/arm_dsu_pmu.c
drivers/perf/arm_pmu.c
drivers/perf/hisilicon/hisi_uncore_pmu.c
drivers/perf/qcom_l2_pmu.c
drivers/perf/qcom_l3_pmu.c
drivers/perf/xgene_pmu.c
include/linux/perf_event.h
kernel/events/core.c

index 435864c24479876ababb616c958846a005227ad9..5613aa378a83bbb4b9ecdfd301d438c6560abad5 100644 (file)
@@ -351,7 +351,7 @@ static int collect_events(struct perf_event *group, int max_count,
                evtype[n] = group->hw.event_base;
                current_idx[n++] = PMC_NO_INDEX;
        }
-       list_for_each_entry(pe, &group->sibling_list, sibling_list) {
+       for_each_sibling_event(pe, group) {
                if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) {
                        if (n >= max_count)
                                return -1;
index 27a9ca20933e697bf360657d4a74d7d2cf61b18d..04b3bf71de94ba7d24ae0561d2dfab62db189163 100644 (file)
@@ -269,7 +269,7 @@ static bool mmdc_pmu_group_is_valid(struct perf_event *event)
                        return false;
        }
 
-       list_for_each_entry(sibling, &leader->sibling_list, sibling_list) {
+       for_each_sibling_event(sibling, leader) {
                if (!mmdc_pmu_group_event_is_valid(sibling, pmu, &counter_mask))
                        return false;
        }
index 3a89ea4c2b579c4e4a218ca03cbf39d156cd5680..afe5b4c7b164d89721d0fad3c69ff4245cd52dc0 100644 (file)
@@ -293,7 +293,7 @@ static bool l2x0_pmu_group_is_valid(struct perf_event *event)
        else if (!is_software_event(leader))
                return false;
 
-       list_for_each_entry(sibling, &leader->sibling_list, sibling_list) {
+       for_each_sibling_event(sibling, leader) {
                if (sibling->pmu == pmu)
                        num_hw++;
                else if (!is_software_event(sibling))
index 46097ff3208b0188bf09799c1c7998b0c9d4dc33..ee73550f0b9a422c05d527287bb95aa4dd738180 100644 (file)
@@ -711,7 +711,7 @@ static int validate_group(struct perf_event *event)
        if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
                return -EINVAL;
 
-       list_for_each_entry(sibling, &leader->sibling_list, sibling_list) {
+       for_each_sibling_event(sibling, leader) {
                if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
                        return -EINVAL;
        }
index 7c1f66050433dcf0e5e760ab231480634030a302..f8908ea4ea736487a719f52650f6bff380961e62 100644 (file)
@@ -1426,7 +1426,7 @@ static int collect_events(struct perf_event *group, int max_count,
                flags[n] = group->hw.event_base;
                events[n++] = group->hw.config;
        }
-       list_for_each_entry(event, &group->sibling_list, sibling_list) {
+       for_each_sibling_event(event, group) {
                if (event->pmu->task_ctx_nr == perf_hw_context &&
                    event->state != PERF_EVENT_STATE_OFF) {
                        if (n >= max_count)
index 94c2e63662c6fbbcf0c8da64678a14e8348c6211..85f1d18e5fd3099c9d810eb5c5e6211bd144c959 100644 (file)
@@ -277,7 +277,7 @@ static int collect_events(struct perf_event *group, int max_count,
                ctrs[n] = group;
                n++;
        }
-       list_for_each_entry(event, &group->sibling_list, sibling_list) {
+       for_each_sibling_event(event, group) {
                if (!is_software_event(event) &&
                    event->state != PERF_EVENT_STATE_OFF) {
                        if (n >= max_count)
index a0a86d369119d8a169abc6d165a06385b4810ca1..d3149baaa33c6291e679add3bf1c05db5268a8e0 100644 (file)
@@ -1342,7 +1342,7 @@ static int collect_events(struct perf_event *group, int max_count,
                events[n] = group->hw.event_base;
                current_idx[n++] = PIC_NO_INDEX;
        }
-       list_for_each_entry(event, &group->sibling_list, sibling_list) {
+       for_each_sibling_event(event, group) {
                if (!is_software_event(event) &&
                    event->state != PERF_EVENT_STATE_OFF) {
                        if (n >= max_count)
index 77a4125b6b1f06898ffda40f8c2f9f35f0f1a32c..bfc8f43909c172a686f4897a653811bcbd583276 100644 (file)
@@ -990,7 +990,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
        if (!dogrp)
                return n;
 
-       list_for_each_entry(event, &leader->sibling_list, sibling_list) {
+       for_each_sibling_event(event, leader) {
                if (!is_x86_event(event) ||
                    event->state <= PERF_EVENT_STATE_OFF)
                        continue;
index 9e374cd22ad21242c0b9f75443d97f2dc4c58b5e..a7956fc7ca1d873734a0bc4f1be6cd5d1949907c 100644 (file)
@@ -354,7 +354,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
        if (!dogrp)
                return n;
 
-       list_for_each_entry(event, &leader->sibling_list, sibling_list) {
+       for_each_sibling_event(event, leader) {
                if (!is_box_event(box, event) ||
                    event->state <= PERF_EVENT_STATE_OFF)
                        continue;
index c98435bdb64fa2e61655a9babba90305664120b5..c4c0c8560cce57339b23ac44efed9c196170bc76 100644 (file)
@@ -1311,7 +1311,7 @@ validate_group(struct perf_event *event)
        if (!validate_event(event->pmu, &fake_pmu, leader))
                return -EINVAL;
 
-       list_for_each_entry(sibling, &leader->sibling_list, sibling_list) {
+       for_each_sibling_event(sibling, leader) {
                if (!validate_event(event->pmu, &fake_pmu, sibling))
                        return -EINVAL;
        }
index 1c310a4be0003e9b830a950a4c6ff88e53900b20..65b7e4042ece17b1445e4bf6a79b444b809ce0da 100644 (file)
@@ -846,11 +846,11 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
                        !is_software_event(event->group_leader))
                return -EINVAL;
 
-       list_for_each_entry(sibling, &event->group_leader->sibling_list,
-                       sibling_list)
+       for_each_sibling_event(sibling, event->group_leader) {
                if (sibling->pmu != event->pmu &&
                                !is_software_event(sibling))
                        return -EINVAL;
+       }
 
        return 0;
 }
index 660680d78147c2d5dbf949775a5fa8be7a937a37..660cb8ac886ac9bb11319c5205f6dfd451264a8d 100644 (file)
@@ -536,7 +536,7 @@ static bool dsu_pmu_validate_group(struct perf_event *event)
        memset(fake_hw.used_mask, 0, sizeof(fake_hw.used_mask));
        if (!dsu_pmu_validate_event(event->pmu, &fake_hw, leader))
                return false;
-       list_for_each_entry(sibling, &leader->sibling_list, sibling_list) {
+       for_each_sibling_event(sibling, leader) {
                if (!dsu_pmu_validate_event(event->pmu, &fake_hw, sibling))
                        return false;
        }
index 628d7a7b95260662b97a02b8d43f7c497a8eadbe..344e2083e94128fbc695594347b4740e3b470a2c 100644 (file)
@@ -311,7 +311,7 @@ validate_group(struct perf_event *event)
        if (!validate_event(event->pmu, &fake_pmu, leader))
                return -EINVAL;
 
-       list_for_each_entry(sibling, &leader->sibling_list, sibling_list) {
+       for_each_sibling_event(sibling, leader) {
                if (!validate_event(event->pmu, &fake_pmu, sibling))
                        return -EINVAL;
        }
index e3356087fd767d47c2bd2dee44f5f961290f1974..44df61397a380cbe94a179aa514b7663bdc7379f 100644 (file)
@@ -82,7 +82,7 @@ static bool hisi_validate_event_group(struct perf_event *event)
                        counters++;
        }
 
-       list_for_each_entry(sibling, &event->group_leader->sibling_list, sibling_list) {
+       for_each_sibling_event(sibling, event->group_leader) {
                if (is_software_event(sibling))
                        continue;
                if (sibling->pmu != event->pmu)
index 5e535a71896562f7522adbf9ebb3f0372e5a3414..1a7889f63c9a64ec8de1e131b755878a259b0b16 100644 (file)
@@ -534,8 +534,7 @@ static int l2_cache_event_init(struct perf_event *event)
                return -EINVAL;
        }
 
-       list_for_each_entry(sibling, &event->group_leader->sibling_list,
-                           sibling_list)
+       for_each_sibling_event(sibling, event->group_leader) {
                if (sibling->pmu != event->pmu &&
                    !is_software_event(sibling)) {
                        dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
@@ -571,8 +570,7 @@ static int l2_cache_event_init(struct perf_event *event)
                return -EINVAL;
        }
 
-       list_for_each_entry(sibling, &event->group_leader->sibling_list,
-                           sibling_list) {
+       for_each_sibling_event(sibling, event->group_leader) {
                if ((sibling != event) &&
                    !is_software_event(sibling) &&
                    (L2_EVT_GROUP(sibling->attr.config) ==
index 5dedf4b1a552de698e922cf92f580e7dc36d9d93..2dc63d61f2ea885991977bb3c830b0f9b2f6cc27 100644 (file)
@@ -468,7 +468,7 @@ static bool qcom_l3_cache__validate_event_group(struct perf_event *event)
        counters = event_num_counters(event);
        counters += event_num_counters(leader);
 
-       list_for_each_entry(sibling, &leader->sibling_list, sibling_list) {
+       for_each_sibling_event(sibling, leader) {
                if (is_software_event(sibling))
                        continue;
                if (sibling->pmu != event->pmu)
index f1f4a56cab5e36fd6115dfd0915039b20a2336a6..6bdb1dad805f8198a6879aeab21a5ff9051b1510 100644 (file)
@@ -949,11 +949,11 @@ static int xgene_perf_event_init(struct perf_event *event)
                        !is_software_event(event->group_leader))
                return -EINVAL;
 
-       list_for_each_entry(sibling, &event->group_leader->sibling_list,
-                       sibling_list)
+       for_each_sibling_event(sibling, event->group_leader) {
                if (sibling->pmu != event->pmu &&
                                !is_software_event(sibling))
                        return -EINVAL;
+       }
 
        return 0;
 }
index 2bb200e1bbea16476bb4a2c5ea7c2a6bce692d26..ff39ab011376854c5359790dc9d328617df9c175 100644 (file)
@@ -536,6 +536,10 @@ struct pmu_event_list {
        struct list_head        list;
 };
 
+#define for_each_sibling_event(sibling, event)                 \
+       if ((event)->group_leader == (event))                   \
+               list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
+
 /**
  * struct perf_event - performance event kernel representation:
  */
index 3b4c7792a6ac495f8e6665b978a32b57d59168aa..4d7a460d6669faf7c0003dccd862adb73f2772bb 100644 (file)
@@ -643,7 +643,7 @@ static void perf_event_update_sibling_time(struct perf_event *leader)
 {
        struct perf_event *sibling;
 
-       list_for_each_entry(sibling, &leader->sibling_list, sibling_list)
+       for_each_sibling_event(sibling, leader)
                perf_event_update_time(sibling);
 }
 
@@ -1828,7 +1828,7 @@ static void perf_group_attach(struct perf_event *event)
 
        perf_event__header_size(group_leader);
 
-       list_for_each_entry(pos, &group_leader->sibling_list, sibling_list)
+       for_each_sibling_event(pos, group_leader)
                perf_event__header_size(pos);
 }
 
@@ -1928,7 +1928,7 @@ static void perf_group_detach(struct perf_event *event)
 out:
        perf_event__header_size(event->group_leader);
 
-       list_for_each_entry(tmp, &event->group_leader->sibling_list, sibling_list)
+       for_each_sibling_event(tmp, event->group_leader)
                perf_event__header_size(tmp);
 }
 
@@ -1951,13 +1951,13 @@ static inline int __pmu_filter_match(struct perf_event *event)
  */
 static inline int pmu_filter_match(struct perf_event *event)
 {
-       struct perf_event *child;
+       struct perf_event *sibling;
 
        if (!__pmu_filter_match(event))
                return 0;
 
-       list_for_each_entry(child, &event->sibling_list, sibling_list) {
-               if (!__pmu_filter_match(child))
+       for_each_sibling_event(sibling, event) {
+               if (!__pmu_filter_match(sibling))
                        return 0;
        }
 
@@ -2031,7 +2031,7 @@ group_sched_out(struct perf_event *group_event,
        /*
         * Schedule out siblings (if any):
         */
-       list_for_each_entry(event, &group_event->sibling_list, sibling_list)
+       for_each_sibling_event(event, group_event)
                event_sched_out(event, cpuctx, ctx);
 
        perf_pmu_enable(ctx->pmu);
@@ -2310,7 +2310,7 @@ group_sched_in(struct perf_event *group_event,
        /*
         * Schedule in siblings as one group (if any):
         */
-       list_for_each_entry(event, &group_event->sibling_list, sibling_list) {
+       for_each_sibling_event(event, group_event) {
                if (event_sched_in(event, cpuctx, ctx)) {
                        partial_group = event;
                        goto group_error;
@@ -2326,7 +2326,7 @@ group_error:
         * partial group before returning:
         * The events up to the failed event are scheduled out normally.
         */
-       list_for_each_entry(event, &group_event->sibling_list, sibling_list) {
+       for_each_sibling_event(event, group_event) {
                if (event == partial_group)
                        break;
 
@@ -3863,7 +3863,7 @@ static void __perf_event_read(void *info)
 
        pmu->read(event);
 
-       list_for_each_entry(sub, &event->sibling_list, sibling_list) {
+       for_each_sibling_event(sub, event) {
                if (sub->state == PERF_EVENT_STATE_ACTIVE) {
                        /*
                         * Use sibling's PMU rather than @event's since
@@ -4711,7 +4711,7 @@ static int __perf_read_group_add(struct perf_event *leader,
        if (read_format & PERF_FORMAT_ID)
                values[n++] = primary_event_id(leader);
 
-       list_for_each_entry(sub, &leader->sibling_list, sibling_list) {
+       for_each_sibling_event(sub, leader) {
                values[n++] += perf_event_count(sub);
                if (read_format & PERF_FORMAT_ID)
                        values[n++] = primary_event_id(sub);
@@ -4905,7 +4905,7 @@ static void perf_event_for_each(struct perf_event *event,
        event = event->group_leader;
 
        perf_event_for_each_child(event, func);
-       list_for_each_entry(sibling, &event->sibling_list, sibling_list)
+       for_each_sibling_event(sibling, event)
                perf_event_for_each_child(sibling, func);
 }
 
@@ -6077,7 +6077,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
 
        __output_copy(handle, values, n * sizeof(u64));
 
-       list_for_each_entry(sub, &leader->sibling_list, sibling_list) {
+       for_each_sibling_event(sub, leader) {
                n = 0;
 
                if ((sub != event) &&
@@ -10662,8 +10662,7 @@ SYSCALL_DEFINE5(perf_event_open,
                perf_remove_from_context(group_leader, 0);
                put_ctx(gctx);
 
-               list_for_each_entry(sibling, &group_leader->sibling_list,
-                                   sibling_list) {
+               for_each_sibling_event(sibling, group_leader) {
                        perf_remove_from_context(sibling, 0);
                        put_ctx(gctx);
                }
@@ -10684,8 +10683,7 @@ SYSCALL_DEFINE5(perf_event_open,
                 * By installing siblings first we NO-OP because they're not
                 * reachable through the group lists.
                 */
-               list_for_each_entry(sibling, &group_leader->sibling_list,
-                                   sibling_list) {
+               for_each_sibling_event(sibling, group_leader) {
                        perf_event__state_init(sibling);
                        perf_install_in_context(ctx, sibling, sibling->cpu);
                        get_ctx(ctx);
@@ -11324,7 +11322,7 @@ static int inherit_group(struct perf_event *parent_event,
         * case inherit_event() will create individual events, similar to what
         * perf_group_detach() would do anyway.
         */
-       list_for_each_entry(sub, &parent_event->sibling_list, sibling_list) {
+       for_each_sibling_event(sub, parent_event) {
                child_ctr = inherit_event(sub, parent, parent_ctx,
                                            child, leader, child_ctx);
                if (IS_ERR(child_ctr))