drm/i915/gvt: Select appropriate mmio list at initialization time
authorChangbin Du <changbin.du@intel.com>
Fri, 8 Dec 2017 06:56:21 +0000 (14:56 +0800)
committerZhenyu Wang <zhenyuw@linux.intel.com>
Fri, 8 Dec 2017 08:18:19 +0000 (16:18 +0800)
Select appropriate mmio list at initialization time, so we don't need to
do duplicated work at where requires the mmio list.

V2:
  - Add a termination mark of mmio list.

Signed-off-by: Changbin Du <changbin.du@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/render.c
drivers/gpu/drm/i915/gvt/render.h

index 9a5dce3aa10ab2bfe2aa884af4974517ebf9f5c0..643bb961d40dff4745614c640228a91bed0bfdbd 100644 (file)
@@ -386,6 +386,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
        if (ret)
                goto out_clean_idr;
 
+       intel_gvt_init_engine_mmio_context(gvt);
+
        ret = intel_gvt_load_firmware(gvt);
        if (ret)
                goto out_clean_mmio_info;
index 77df9bad5dea1d6a8dc76293e1ddd12a947b8cc8..39c2f3a4588e52a17cc395d0d4c6075feafcec05 100644 (file)
@@ -310,6 +310,8 @@ struct intel_gvt {
        wait_queue_head_t service_thread_wq;
        unsigned long service_request;
 
+       struct engine_mmio *engine_mmio_list;
+
        struct dentry *debugfs_root;
 };
 
index 43abca5dbe75210f46a6511f64df7fb1684faa4f..3e675f81815f1efcf9fb1b5c0c331e519e895d4b 100644 (file)
 #include "gvt.h"
 #include "trace.h"
 
-struct render_mmio {
-       int ring_id;
-       i915_reg_t reg;
-       u32 mask;
-       bool in_context;
-       u32 value;
-};
-
 /**
  * Defined in Intel Open Source PRM.
  * Ref: https://01.org/linuxgraphics/documentation/hardware-specification-prms
@@ -59,7 +51,7 @@ struct render_mmio {
 #define VF_GUARDBAND           _MMIO(0x83a4)
 
 /* Raw offset is appened to each line for convenience. */
-static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = {
+static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
        {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
        {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
        {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
@@ -88,9 +80,10 @@ static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = {
        {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
        {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
        {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
+       { /* Terminated */ }
 };
 
-static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = {
+static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
        {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
        {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
        {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
@@ -153,6 +146,7 @@ static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = {
        {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
        {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
        {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
+       { /* Terminated */ }
 };
 
 static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
@@ -282,21 +276,14 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
        u32 inhibit_mask =
                _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
        i915_reg_t last_reg = _MMIO(0);
-       struct render_mmio *mmio;
+       struct engine_mmio *mmio;
        u32 v;
-       int i, array_size;
 
-       if (IS_SKYLAKE(vgpu->gvt->dev_priv)
-               || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
-               mmio = gen9_render_mmio_list;
-               array_size = ARRAY_SIZE(gen9_render_mmio_list);
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
                load_mocs(vgpu, ring_id);
-       } else {
-               mmio = gen8_render_mmio_list;
-               array_size = ARRAY_SIZE(gen8_render_mmio_list);
-       }
 
-       for (i = 0; i < array_size; i++, mmio++) {
+       mmio = vgpu->gvt->engine_mmio_list;
+       while (i915_mmio_reg_offset((mmio++)->reg)) {
                if (mmio->ring_id != ring_id)
                        continue;
 
@@ -326,7 +313,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
        }
 
        /* Make sure the swiched MMIOs has taken effect. */
-       if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
+       if (likely(i915_mmio_reg_offset(last_reg)))
                I915_READ_FW(last_reg);
 
        handle_tlb_pending_event(vgpu, ring_id);
@@ -336,21 +323,15 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
 static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-       struct render_mmio *mmio;
        i915_reg_t last_reg = _MMIO(0);
+       struct engine_mmio *mmio;
        u32 v;
-       int i, array_size;
 
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
-               mmio = gen9_render_mmio_list;
-               array_size = ARRAY_SIZE(gen9_render_mmio_list);
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
                restore_mocs(vgpu, ring_id);
-       } else {
-               mmio = gen8_render_mmio_list;
-               array_size = ARRAY_SIZE(gen8_render_mmio_list);
-       }
 
-       for (i = 0; i < array_size; i++, mmio++) {
+       mmio = vgpu->gvt->engine_mmio_list;
+       while (i915_mmio_reg_offset((mmio++)->reg)) {
                if (mmio->ring_id != ring_id)
                        continue;
 
@@ -374,7 +355,7 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
        }
 
        /* Make sure the swiched MMIOs has taken effect. */
-       if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
+       if (likely(i915_mmio_reg_offset(last_reg)))
                I915_READ_FW(last_reg);
 }
 
@@ -419,3 +400,16 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
+
+/**
+ * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list
+ * @gvt: GVT device
+ *
+ */
+void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
+{
+       if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+               gvt->engine_mmio_list = gen9_engine_mmio_list;
+       else
+               gvt->engine_mmio_list = gen8_engine_mmio_list;
+}
index 91db1d39d28f65e7ee42d17cbcc9e09cabe0ddf0..ca2c6a745673cf7d6d9be75f7ec4305686ed5ba7 100644 (file)
 #ifndef __GVT_RENDER_H__
 #define __GVT_RENDER_H__
 
+struct engine_mmio {
+       int ring_id;
+       i915_reg_t reg;
+       u32 mask;
+       bool in_context;
+       u32 value;
+};
+
 void intel_gvt_switch_mmio(struct intel_vgpu *pre,
                           struct intel_vgpu *next, int ring_id);
 
+void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
 
 #endif