perf/smmuv3: Validate groups for global filtering
authorRobin Murphy <robin.murphy@arm.com>
Thu, 1 Aug 2019 15:22:45 +0000 (16:22 +0100)
committerWill Deacon <will@kernel.org>
Tue, 27 Aug 2019 18:37:04 +0000 (19:37 +0100)
With global filtering, it becomes possible for users to construct
self-contradictory groups with conflicting filters. Make sure we
cover that when initially validating events.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
drivers/perf/arm_smmuv3_pmu.c

index c65c197b52a7aa30741e97375ce0981521210720..abcf54f7d19c94cc7031e1561b91c6eb42d91cb7 100644 (file)
@@ -113,8 +113,6 @@ struct smmu_pmu {
        u64 counter_mask;
        u32 options;
        bool global_filter;
-       u32 global_filter_span;
-       u32 global_filter_sid;
 };
 
 #define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
@@ -260,6 +258,19 @@ static void smmu_pmu_set_event_filter(struct perf_event *event,
        smmu_pmu_set_smr(smmu_pmu, idx, sid);
 }
 
+static bool smmu_pmu_check_global_filter(struct perf_event *curr,
+                                        struct perf_event *new)
+{
+       if (get_filter_enable(new) != get_filter_enable(curr))
+               return false;
+
+       if (!get_filter_enable(new))
+               return true;
+
+       return get_filter_span(new) == get_filter_span(curr) &&
+              get_filter_stream_id(new) == get_filter_stream_id(curr);
+}
+
 static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
                                       struct perf_event *event, int idx)
 {
@@ -279,17 +290,14 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
        }
 
        /* Requested settings same as current global settings*/
-       if (span == smmu_pmu->global_filter_span &&
-           sid == smmu_pmu->global_filter_sid)
+       idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
+       if (idx == num_ctrs ||
+           smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) {
+               smmu_pmu_set_event_filter(event, 0, span, sid);
                return 0;
+       }
 
-       if (!bitmap_empty(smmu_pmu->used_counters, num_ctrs))
-               return -EAGAIN;
-
-       smmu_pmu_set_event_filter(event, 0, span, sid);
-       smmu_pmu->global_filter_span = span;
-       smmu_pmu->global_filter_sid = sid;
-       return 0;
+       return -EAGAIN;
 }
 
 static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
@@ -312,6 +320,19 @@ static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
        return idx;
 }
 
+static bool smmu_pmu_events_compatible(struct perf_event *curr,
+                                      struct perf_event *new)
+{
+       if (new->pmu != curr->pmu)
+               return false;
+
+       if (to_smmu_pmu(new->pmu)->global_filter &&
+           !smmu_pmu_check_global_filter(curr, new))
+               return false;
+
+       return true;
+}
+
 /*
  * Implementation of abstract pmu functionality required by
  * the core perf events code.
@@ -349,7 +370,7 @@ static int smmu_pmu_event_init(struct perf_event *event)
 
        /* Don't allow groups with mixed PMUs, except for s/w events */
        if (!is_software_event(event->group_leader)) {
-               if (event->group_leader->pmu != event->pmu)
+               if (!smmu_pmu_events_compatible(event->group_leader, event))
                        return -EINVAL;
 
                if (++group_num_events > smmu_pmu->num_counters)
@@ -360,7 +381,7 @@ static int smmu_pmu_event_init(struct perf_event *event)
                if (is_software_event(sibling))
                        continue;
 
-               if (sibling->pmu != event->pmu)
+               if (!smmu_pmu_events_compatible(sibling, event))
                        return -EINVAL;
 
                if (++group_num_events > smmu_pmu->num_counters)