drm/i915/gvt: Checking workload's gma earlier
authorXiong Zhang <xiong.y.zhang@intel.com>
Mon, 27 May 2019 05:45:53 +0000 (13:45 +0800)
committerZhenyu Wang <zhenyuw@linux.intel.com>
Tue, 30 Jul 2019 06:29:48 +0000 (14:29 +0800)
Workload contains RB and WA_CTX which are in ggtt space,
if they aren't in valid ggtt space, the workload shouldn't be
shadowed and scanned. So checking them earlier to avoid shadow
them.

Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: Xiong Zhang <xiong.y.zhang@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/scheduler.c

index 6ea88270c818ff62680e56f95cfef8a3c3c1f1f3..b09dc315e2dab7674275482b8908b3c902962e85 100644 (file)
@@ -2674,11 +2674,6 @@ static int scan_workload(struct intel_vgpu_workload *workload)
                gma_head == gma_tail)
                return 0;
 
-       if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        ret = ip_gma_set(&s, gma_head);
        if (ret)
                goto out;
@@ -2724,11 +2719,6 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
        s.workload = workload;
        s.is_ctx_wa = true;
 
-       if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        ret = ip_gma_set(&s, gma_head);
        if (ret)
                goto out;
index 2144fb46d0e1ce5f45c125cf3286694e78ff8708..6469366c1753b75d86ef45ec037977269048705d 100644 (file)
@@ -1492,6 +1492,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
        intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
                        RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
 
+       if (!intel_gvt_ggtt_validate_range(vgpu, start,
+                               _RING_CTL_BUF_SIZE(ctl))) {
+               gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start);
+               return ERR_PTR(-EINVAL);
+       }
+
        workload = alloc_workload(vgpu);
        if (IS_ERR(workload))
                return workload;
@@ -1516,9 +1522,31 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
                workload->wa_ctx.indirect_ctx.size =
                        (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
                        CACHELINE_BYTES;
+
+               if (workload->wa_ctx.indirect_ctx.size != 0) {
+                       if (!intel_gvt_ggtt_validate_range(vgpu,
+                               workload->wa_ctx.indirect_ctx.guest_gma,
+                               workload->wa_ctx.indirect_ctx.size)) {
+                               kmem_cache_free(s->workloads, workload);
+                               gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
+                                   workload->wa_ctx.indirect_ctx.guest_gma);
+                               return ERR_PTR(-EINVAL);
+                       }
+               }
+
                workload->wa_ctx.per_ctx.guest_gma =
                        per_ctx & PER_CTX_ADDR_MASK;
                workload->wa_ctx.per_ctx.valid = per_ctx & 1;
+               if (workload->wa_ctx.per_ctx.valid) {
+                       if (!intel_gvt_ggtt_validate_range(vgpu,
+                               workload->wa_ctx.per_ctx.guest_gma,
+                               CACHELINE_BYTES)) {
+                               kmem_cache_free(s->workloads, workload);
+                               gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
+                                       workload->wa_ctx.per_ctx.guest_gma);
+                               return ERR_PTR(-EINVAL);
+                       }
+               }
        }
 
        gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",