drm/i915/perf: Assert locking for i915_init_oa_perf_state()
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 30 Aug 2019 18:19:29 +0000 (19:19 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Sat, 31 Aug 2019 15:08:28 +0000 (16:08 +0100)
We use the context->pin_mutex to serialise updates to the OA config and
the registers values written into each new context. Document this
relationship and assert we do hold the context->pin_mutex as used by
gen8_configure_all_contexts() to serialise updates to the OA config
itself.

v2: Add a white-lie for when we call intel_gt_resume() from init.
v3: Lie while we have the context pinned inside atomic reset.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com> #v1
Link: https://patchwork.freedesktop.org/patch/msgid/20190830181929.18663-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_gt_pm.c
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/i915_perf.c

index 1363e069ec83099cac0d0af5bd32abc81610adb3..aa6cf0152ce74e8bcc31ffe5019b13013584536c 100644 (file)
@@ -6,6 +6,7 @@
 
 #include "i915_drv.h"
 #include "i915_params.h"
+#include "intel_context.h"
 #include "intel_engine_pm.h"
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
@@ -142,8 +143,12 @@ int intel_gt_resume(struct intel_gt *gt)
                intel_engine_pm_get(engine);
 
                ce = engine->kernel_context;
-               if (ce)
+               if (ce) {
+                       GEM_BUG_ON(!intel_context_is_pinned(ce));
+                       mutex_acquire(&ce->pin_mutex.dep_map, 0, 0, _THIS_IP_);
                        ce->ops->reset(ce);
+                       mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_);
+               }
 
                engine->serial++; /* kernel context lost */
                err = engine->resume(engine);
index 0e28263c7b87c01bd5060a8c54779f9f331af8ea..87b7473a6dfb1aa68990107260f19bc6b93393d6 100644 (file)
@@ -2383,6 +2383,11 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
        ce = rq->hw_context;
        GEM_BUG_ON(i915_active_is_idle(&ce->active));
        GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
+
+       /* Proclaim we have exclusive access to the context image! */
+       GEM_BUG_ON(!intel_context_is_pinned(ce));
+       mutex_acquire(&ce->pin_mutex.dep_map, 2, 0, _THIS_IP_);
+
        rq = active_request(rq);
        if (!rq) {
                ce->ring->head = ce->ring->tail;
@@ -2442,6 +2447,7 @@ out_replay:
                  engine->name, ce->ring->head, ce->ring->tail);
        intel_ring_update_space(ce->ring);
        __execlists_update_reg_state(ce, engine);
+       mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_);
 
 unwind:
        /* Push back any incomplete requests for replay after the reset. */
@@ -3920,6 +3926,9 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
                            u32 head,
                            bool scrub)
 {
+       GEM_BUG_ON(!intel_context_is_pinned(ce));
+       mutex_acquire(&ce->pin_mutex.dep_map, 2, 0, _THIS_IP_);
+
        /*
         * We want a simple context + ring to execute the breadcrumb update.
         * We cannot rely on the context being intact across the GPU hang,
@@ -3944,6 +3953,7 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
        intel_ring_update_space(ce->ring);
 
        __execlists_update_reg_state(ce, engine);
+       mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
index 2c9f46e126223f3ce05d703954a55f0e32c24925..c1b764233761515e429ffcfea8634b06ee88a9c0 100644 (file)
@@ -2305,6 +2305,9 @@ void i915_oa_init_reg_state(struct intel_engine_cs *engine,
 {
        struct i915_perf_stream *stream;
 
+       /* perf.exclusive_stream serialised by gen8_configure_all_contexts() */
+       lockdep_assert_held(&ce->pin_mutex);
+
        if (engine->class != RENDER_CLASS)
                return;