atomic_t running_workload_num;
struct i915_gem_context *shadow_ctx;
DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
+ DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
};
struct intel_vgpu {
/* 1/2K for each reserve ring buffer */
void *reserve_ring_buffer_va[I915_NUM_ENGINES];
int reserve_ring_buffer_size[I915_NUM_ENGINES];
- DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
default:
return -EINVAL;
}
- set_bit(id, (void *)vgpu->tlb_handle_pending);
+ set_bit(id, (void *)vgpu->submission.tlb_handle_pending);
return 0;
}
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_vgpu_submission *s = &vgpu->submission;
enum forcewake_domains fw;
i915_reg_t reg;
u32 regs[] = {
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending))
+ if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
return;
reg = _MMIO(regs[ring_id]);
INIT_LIST_HEAD(&s->workload_q_head[i]);
atomic_set(&s->running_workload_num, 0);
+ bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
return 0;
vgpu->handle = param->handle;
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
- bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
intel_vgpu_init_cfg_space(vgpu, param->primary);