drm/i915/selftests: Check that whitelisted registers are accessible
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 1 Mar 2019 16:01:08 +0000 (16:01 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 1 Mar 2019 16:53:39 +0000 (16:53 +0000)
There is no point in whitelisting a register that the user then cannot
write to, so check the register exists before merging such patches.

v2: Mark SLICE_COMMON_ECO_CHICKEN1 [731c] as write-only
v3: Use different variables for different meanings!

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Dale B Stimson <dale.b.stimson@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Reviewed-by: Michał Winiarski <michal.winiarski@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190301140404.26690-6-chris@chris-wilson.co.uk
Link: https://patchwork.freedesktop.org/patch/msgid/20190301160108.19039-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/selftests/intel_workarounds.c

index e6ffc8ac22dc659567fd49afd8bd08b1f4c49707..9f12a0ec804b1dfd87e0213735bf58ee9bb85bb3 100644 (file)
 #include "igt_spinner.h"
 #include "igt_wedge_me.h"
 #include "mock_context.h"
+#include "mock_drm.h"
+
+static const struct wo_register {
+       enum intel_platform platform;
+       u32 reg;
+} wo_registers[] = {
+       { INTEL_GEMINILAKE, 0x731c }
+};
 
 #define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
 struct wa_lists {
@@ -74,7 +82,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
        if (IS_ERR(result))
                return result;
 
-       i915_gem_object_set_cache_level(result, I915_CACHE_LLC);
+       i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
 
        cs = i915_gem_object_pin_map(result, I915_MAP_WB);
        if (IS_ERR(cs)) {
@@ -331,6 +339,373 @@ out:
        return err;
 }
 
+static struct i915_vma *create_scratch(struct i915_gem_context *ctx)
+{
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       void *ptr;
+       int err;
+
+       obj = i915_gem_object_create_internal(ctx->i915, PAGE_SIZE);
+       if (IS_ERR(obj))
+               return ERR_CAST(obj);
+
+       i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+
+       ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+       if (IS_ERR(ptr)) {
+               err = PTR_ERR(ptr);
+               goto err_obj;
+       }
+       memset(ptr, 0xc5, PAGE_SIZE);
+       i915_gem_object_unpin_map(obj);
+
+       vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+       if (IS_ERR(vma)) {
+               err = PTR_ERR(vma);
+               goto err_obj;
+       }
+
+       err = i915_vma_pin(vma, 0, 0, PIN_USER);
+       if (err)
+               goto err_obj;
+
+       err = i915_gem_object_set_to_cpu_domain(obj, false);
+       if (err)
+               goto err_obj;
+
+       return vma;
+
+err_obj:
+       i915_gem_object_put(obj);
+       return ERR_PTR(err);
+}
+
+static struct i915_vma *create_batch(struct i915_gem_context *ctx)
+{
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       int err;
+
+       obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
+       if (IS_ERR(obj))
+               return ERR_CAST(obj);
+
+       vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+       if (IS_ERR(vma)) {
+               err = PTR_ERR(vma);
+               goto err_obj;
+       }
+
+       err = i915_vma_pin(vma, 0, 0, PIN_USER);
+       if (err)
+               goto err_obj;
+
+       err = i915_gem_object_set_to_wc_domain(obj, true);
+       if (err)
+               goto err_obj;
+
+       return vma;
+
+err_obj:
+       i915_gem_object_put(obj);
+       return ERR_PTR(err);
+}
+
+static u32 reg_write(u32 old, u32 new, u32 rsvd)
+{
+       if (rsvd == 0x0000ffff) {
+               old &= ~(new >> 16);
+               old |= new & (new >> 16);
+       } else {
+               old &= ~rsvd;
+               old |= new & rsvd;
+       }
+
+       return old;
+}
+
+static bool wo_register(struct intel_engine_cs *engine, u32 reg)
+{
+       enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
+               if (wo_registers[i].platform == platform &&
+                   wo_registers[i].reg == reg)
+                       return true;
+       }
+
+       return false;
+}
+
+static int check_dirty_whitelist(struct i915_gem_context *ctx,
+                                struct intel_engine_cs *engine)
+{
+       const u32 values[] = {
+               0x00000000,
+               0x01010101,
+               0x10100101,
+               0x03030303,
+               0x30300303,
+               0x05050505,
+               0x50500505,
+               0x0f0f0f0f,
+               0xf00ff00f,
+               0x10101010,
+               0xf0f01010,
+               0x30303030,
+               0xa0a03030,
+               0x50505050,
+               0xc0c05050,
+               0xf0f0f0f0,
+               0x11111111,
+               0x33333333,
+               0x55555555,
+               0x0000ffff,
+               0x00ff00ff,
+               0xff0000ff,
+               0xffff00ff,
+               0xffffffff,
+       };
+       struct i915_vma *scratch;
+       struct i915_vma *batch;
+       int err = 0, i, v;
+       u32 *cs, *results;
+
+       scratch = create_scratch(ctx);
+       if (IS_ERR(scratch))
+               return PTR_ERR(scratch);
+
+       batch = create_batch(ctx);
+       if (IS_ERR(batch)) {
+               err = PTR_ERR(batch);
+               goto out_scratch;
+       }
+
+       for (i = 0; i < engine->whitelist.count; i++) {
+               u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+               u64 addr = scratch->node.start;
+               struct i915_request *rq;
+               u32 srm, lrm, rsvd;
+               u32 expect;
+               int idx;
+
+               if (wo_register(engine, reg))
+                       continue;
+
+               srm = MI_STORE_REGISTER_MEM;
+               lrm = MI_LOAD_REGISTER_MEM;
+               if (INTEL_GEN(ctx->i915) >= 8)
+                       lrm++, srm++;
+
+               pr_debug("%s: Writing garbage to %x\n",
+                        engine->name, reg);
+
+               cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+               if (IS_ERR(cs)) {
+                       err = PTR_ERR(cs);
+                       goto out_batch;
+               }
+
+               /* SRM original */
+               *cs++ = srm;
+               *cs++ = reg;
+               *cs++ = lower_32_bits(addr);
+               *cs++ = upper_32_bits(addr);
+
+               idx = 1;
+               for (v = 0; v < ARRAY_SIZE(values); v++) {
+                       /* LRI garbage */
+                       *cs++ = MI_LOAD_REGISTER_IMM(1);
+                       *cs++ = reg;
+                       *cs++ = values[v];
+
+                       /* SRM result */
+                       *cs++ = srm;
+                       *cs++ = reg;
+                       *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
+                       *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
+                       idx++;
+               }
+               for (v = 0; v < ARRAY_SIZE(values); v++) {
+                       /* LRI garbage */
+                       *cs++ = MI_LOAD_REGISTER_IMM(1);
+                       *cs++ = reg;
+                       *cs++ = ~values[v];
+
+                       /* SRM result */
+                       *cs++ = srm;
+                       *cs++ = reg;
+                       *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
+                       *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
+                       idx++;
+               }
+               GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
+
+               /* LRM original -- don't leave garbage in the context! */
+               *cs++ = lrm;
+               *cs++ = reg;
+               *cs++ = lower_32_bits(addr);
+               *cs++ = upper_32_bits(addr);
+
+               *cs++ = MI_BATCH_BUFFER_END;
+
+               i915_gem_object_unpin_map(batch->obj);
+               i915_gem_chipset_flush(ctx->i915);
+
+               rq = i915_request_alloc(engine, ctx);
+               if (IS_ERR(rq)) {
+                       err = PTR_ERR(rq);
+                       goto out_batch;
+               }
+
+               if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
+                       err = engine->emit_init_breadcrumb(rq);
+                       if (err)
+                               goto err_request;
+               }
+
+               err = engine->emit_bb_start(rq,
+                                           batch->node.start, PAGE_SIZE,
+                                           0);
+               if (err)
+                       goto err_request;
+
+err_request:
+               i915_request_add(rq);
+               if (err)
+                       goto out_batch;
+
+               if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+                       pr_err("%s: Futzing %x timedout; cancelling test\n",
+                              engine->name, reg);
+                       i915_gem_set_wedged(ctx->i915);
+                       err = -EIO;
+                       goto out_batch;
+               }
+
+               results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+               if (IS_ERR(results)) {
+                       err = PTR_ERR(results);
+                       goto out_batch;
+               }
+
+               GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
+               rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */
+               if (!rsvd) {
+                       pr_err("%s: Unable to write to whitelisted register %x\n",
+                              engine->name, reg);
+                       err = -EINVAL;
+                       goto out_unpin;
+               }
+
+               expect = results[0];
+               idx = 1;
+               for (v = 0; v < ARRAY_SIZE(values); v++) {
+                       expect = reg_write(expect, values[v], rsvd);
+                       if (results[idx] != expect)
+                               err++;
+                       idx++;
+               }
+               for (v = 0; v < ARRAY_SIZE(values); v++) {
+                       expect = reg_write(expect, ~values[v], rsvd);
+                       if (results[idx] != expect)
+                               err++;
+                       idx++;
+               }
+               if (err) {
+                       pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
+                              engine->name, err, reg);
+
+                       pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
+                               engine->name, reg, results[0], rsvd);
+
+                       expect = results[0];
+                       idx = 1;
+                       for (v = 0; v < ARRAY_SIZE(values); v++) {
+                               u32 w = values[v];
+
+                               expect = reg_write(expect, w, rsvd);
+                               pr_info("Wrote %08x, read %08x, expect %08x\n",
+                                       w, results[idx], expect);
+                               idx++;
+                       }
+                       for (v = 0; v < ARRAY_SIZE(values); v++) {
+                               u32 w = ~values[v];
+
+                               expect = reg_write(expect, w, rsvd);
+                               pr_info("Wrote %08x, read %08x, expect %08x\n",
+                                       w, results[idx], expect);
+                               idx++;
+                       }
+
+                       err = -EINVAL;
+               }
+out_unpin:
+               i915_gem_object_unpin_map(scratch->obj);
+               if (err)
+                       break;
+       }
+
+       if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
+               err = -EIO;
+out_batch:
+       i915_vma_unpin_and_release(&batch, 0);
+out_scratch:
+       i915_vma_unpin_and_release(&scratch, 0);
+       return err;
+}
+
+static int live_dirty_whitelist(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct intel_engine_cs *engine;
+       struct i915_gem_context *ctx;
+       enum intel_engine_id id;
+       intel_wakeref_t wakeref;
+       struct drm_file *file;
+       int err = 0;
+
+       /* Can the user write to the whitelisted registers? */
+
+       if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
+               return 0;
+
+       wakeref = intel_runtime_pm_get(i915);
+
+       mutex_unlock(&i915->drm.struct_mutex);
+       file = mock_file(i915);
+       mutex_lock(&i915->drm.struct_mutex);
+       if (IS_ERR(file)) {
+               err = PTR_ERR(file);
+               goto out_rpm;
+       }
+
+       ctx = live_context(i915, file);
+       if (IS_ERR(ctx)) {
+               err = PTR_ERR(ctx);
+               goto out_file;
+       }
+
+       for_each_engine(engine, i915, id) {
+               if (engine->whitelist.count == 0)
+                       continue;
+
+               err = check_dirty_whitelist(ctx, engine);
+               if (err)
+                       goto out_file;
+       }
+
+out_file:
+       mutex_unlock(&i915->drm.struct_mutex);
+       mock_file_free(i915, file);
+       mutex_lock(&i915->drm.struct_mutex);
+out_rpm:
+       intel_runtime_pm_put(i915, wakeref);
+       return err;
+}
+
 static int live_reset_whitelist(void *arg)
 {
        struct drm_i915_private *i915 = arg;
@@ -504,6 +879,7 @@ err:
 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
+               SUBTEST(live_dirty_whitelist),
                SUBTEST(live_reset_whitelist),
                SUBTEST(live_gpu_reset_gt_engine_workarounds),
                SUBTEST(live_engine_reset_gt_engine_workarounds),