Merge branch 'perf/urgent' into perf/core, to pick up fixed and resolve conflicts
authorIngo Molnar <mingo@kernel.org>
Mon, 5 Sep 2016 10:09:59 +0000 (12:09 +0200)
committerIngo Molnar <mingo@kernel.org>
Mon, 5 Sep 2016 10:09:59 +0000 (12:09 +0200)
 Conflicts:
kernel/events/core.c

Signed-off-by: Ingo Molnar <mingo@kernel.org>
1  2 
kernel/events/core.c

index 3f07e6cfc1b62b6ff76b5d3f8267483c6b966b15,07ac8596a72846c9e4fdc89ba02f797e2888424f..dff00c787867d5f7082fa9538ca8d28c63179542
@@@ -3573,15 -3549,18 +3573,23 @@@ static int perf_event_read(struct perf_
                        .group = group,
                        .ret = 0,
                };
-               ret = smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1);
-               /* The event must have been read from an online CPU: */
-               WARN_ON_ONCE(ret);
-               ret = ret ? : data.ret;
 +
 +              local_cpu = get_cpu();
 +              cpu_to_read = find_cpu_to_read(event, local_cpu);
 +              put_cpu();
 +
 -              (void)smp_call_function_single(event->oncpu, __perf_event_read, &data, 1);
+               /*
+                * Purposely ignore the smp_call_function_single() return
+                * value.
+                *
+                * If event->oncpu isn't a valid CPU it means the event got
+                * scheduled out and that will have updated the event count.
+                *
+                * Therefore, either way, we'll have an up-to-date event count
+                * after this.
+                */
++              (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1);
+               ret = data.ret;
        } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
                struct perf_event_context *ctx = event->ctx;
                unsigned long flags;