i915_gem_obj_finish_shmem_access(bb->obj);
bb->accessing = false;
- i915_vma_move_to_active(bb->vma, workload->req, 0);
+ ret = i915_vma_move_to_active(bb->vma,
+ workload->req,
+ 0);
+ if (ret)
+ goto err;
}
}
return 0;
}
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-void i915_vma_move_to_active(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags);
+int __must_check i915_vma_move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
goto err_request;
GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
- i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unpin(batch);
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (err)
+ goto skip_request;
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto skip_request;
rq->batch = batch;
+ i915_vma_unpin(batch);
cache->rq = rq;
cache->rq_cmd = cmd;
/* Return with batch mapping (cmd) still pinned */
return 0;
+skip_request:
+ i915_request_skip(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
unsigned int flags = eb->flags[i];
struct i915_vma *vma = eb->vma[i];
- i915_vma_move_to_active(vma, eb->request, flags);
+ err = i915_vma_move_to_active(vma, eb->request, flags);
+ if (unlikely(err)) {
+ i915_request_skip(eb->request, err);
+ return err;
+ }
__eb_unreserve_vma(vma, flags);
vma->exec_flags = NULL;
reservation_object_unlock(resv);
}
-void i915_vma_move_to_active(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags)
+int i915_vma_move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags)
{
struct drm_i915_gem_object *obj = vma->obj;
const unsigned int idx = rq->engine->id;
i915_gem_active_set(&vma->last_fence, rq);
export_fence(vma, rq, flags);
+ return 0;
}
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
goto err_unpin;
}
- i915_vma_move_to_active(so.vma, rq, 0);
+ err = i915_vma_move_to_active(so.vma, rq, 0);
err_unpin:
i915_vma_unpin(so.vma);
err_vma:
goto err_request;
}
- i915_vma_move_to_active(batch, rq, 0);
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (err)
+ goto err_request;
+
i915_gem_object_set_active_reference(batch->obj);
i915_vma_unpin(batch);
i915_vma_close(batch);
if (err)
goto err_request;
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ i915_request_skip(rq, err);
err_request:
i915_request_add(rq);
}
intel_ring_advance(rq, cs);
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unpin(vma);
i915_request_add(rq);
- return 0;
+ return err;
}
static bool always_valid(struct drm_i915_private *i915)
if (err)
goto err_request;
- i915_vma_move_to_active(batch, rq, 0);
+ err = i915_vma_move_to_active(batch, rq, 0);
+ if (err)
+ goto skip_request;
+
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto skip_request;
+
i915_gem_object_set_active_reference(batch->obj);
i915_vma_unpin(batch);
i915_vma_close(batch);
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unpin(vma);
i915_request_add(rq);
return 0;
+skip_request:
+ i915_request_skip(rq, err);
err_request:
i915_request_add(rq);
err_batch:
return PTR_ERR(rq);
}
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_request_add(rq);
- i915_gem_object_set_active_reference(obj);
+ __i915_gem_object_release_unless_active(obj);
i915_vma_unpin(vma);
- return 0;
+
+ return err;
}
static bool assert_mmap_offset(struct drm_i915_private *i915,
i915_gem_object_set_active_reference(batch->obj);
}
- i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_vma_move_to_active(batch, request[id], 0);
+ GEM_BUG_ON(err);
+
i915_request_get(request[id]);
i915_request_add(request[id]);
}
GEM_BUG_ON(err);
request[id]->batch = batch;
- i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_vma_move_to_active(batch, request[id], 0);
+ GEM_BUG_ON(err);
+
i915_gem_object_set_active_reference(batch->obj);
i915_vma_get(batch);
if (err)
goto unpin_vma;
- i915_vma_move_to_active(vma, rq, 0);
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto unpin_hws;
+
if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj);
i915_gem_object_set_active_reference(vma->obj);
}
- i915_vma_move_to_active(hws, rq, 0);
+ err = i915_vma_move_to_active(hws, rq, 0);
+ if (err)
+ goto unpin_hws;
+
if (!i915_gem_object_has_active_reference(hws->obj)) {
i915_gem_object_get(hws->obj);
i915_gem_object_set_active_reference(hws->obj);
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
if (err)
goto unpin_vma;
- i915_vma_move_to_active(vma, rq, 0);
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto unpin_hws;
+
if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj);
i915_gem_object_set_active_reference(vma->obj);
}
- i915_vma_move_to_active(hws, rq, 0);
+ err = i915_vma_move_to_active(hws, rq, 0);
+ if (err)
+ goto unpin_hws;
+
if (!i915_gem_object_has_active_reference(hws->obj)) {
i915_gem_object_get(hws->obj);
i915_gem_object_set_active_reference(hws->obj);
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
goto err_pin;
}
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_req;
+
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
if (INTEL_GEN(ctx->i915) >= 8)
srm++;
}
intel_ring_advance(rq, cs);
- i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-
i915_gem_object_get(result);
i915_gem_object_set_active_reference(result);