drm/i915/gvt: only copy the first page for restore inhibit context
authorZhao Yan <yan.y.zhao@intel.com>
Wed, 1 Aug 2018 04:15:31 +0000 (00:15 -0400)
committerZhenyu Wang <zhenyuw@linux.intel.com>
Thu, 2 Aug 2018 05:27:54 +0000 (13:27 +0800)
if a context is a restore inhibit context, gfx hw only load the first page
for ring context, so we only need to copy from guest the 1 page too.

v3: use "return" instead of "goto" for inhibit case. (zhenyu wang)
v2: move judgement of restore inhibit to a macro in  mmio_context.h

Signed-off-by: Zhao Yan <yan.y.zhao@intel.com>
Acked-by: Hang Yuan <hang.yuan@linux.intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
drivers/gpu/drm/i915/gvt/mmio_context.h
drivers/gpu/drm/i915/gvt/scheduler.c

index 5c3b9ff9f96aa979edebd277a8b000715a3a7d72..f7eaa442403f7177824ac8f10c47cf0de5c07d85 100644 (file)
@@ -53,5 +53,8 @@ bool is_inhibit_context(struct intel_context *ce);
 
 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
                                       struct i915_request *req);
+#define IS_RESTORE_INHIBIT(a)  \
+       (_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \
+       ((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)))
 
 #endif
index 928818f218f7fb91091208fbb365c5a53846e395..049c0ea324faa4c4c1cf08147b76e9f1135b3a70 100644 (file)
@@ -132,35 +132,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
        unsigned long context_gpa, context_page_num;
        int i;
 
-       gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
-                       workload->ctx_desc.lrca);
-
-       context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
-
-       context_page_num = context_page_num >> PAGE_SHIFT;
-
-       if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
-               context_page_num = 19;
-
-       i = 2;
-
-       while (i < context_page_num) {
-               context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
-                               (u32)((workload->ctx_desc.lrca + i) <<
-                               I915_GTT_PAGE_SHIFT));
-               if (context_gpa == INTEL_GVT_INVALID_ADDR) {
-                       gvt_vgpu_err("Invalid guest context descriptor\n");
-                       return -EFAULT;
-               }
-
-               page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
-               dst = kmap(page);
-               intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
-                               I915_GTT_PAGE_SIZE);
-               kunmap(page);
-               i++;
-       }
-
        page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
        shadow_ring_context = kmap(page);
 
@@ -195,6 +166,37 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 
        sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
        kunmap(page);
+
+       if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
+               return 0;
+
+       gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
+                       workload->ctx_desc.lrca);
+
+       context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+
+       context_page_num = context_page_num >> PAGE_SHIFT;
+
+       if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+               context_page_num = 19;
+
+       i = 2;
+       while (i < context_page_num) {
+               context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+                               (u32)((workload->ctx_desc.lrca + i) <<
+                               I915_GTT_PAGE_SHIFT));
+               if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+                       gvt_vgpu_err("Invalid guest context descriptor\n");
+                       return -EFAULT;
+               }
+
+               page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
+               dst = kmap(page);
+               intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
+                               I915_GTT_PAGE_SIZE);
+               kunmap(page);
+               i++;
+       }
        return 0;
 }