static void clear_pages_worker(struct work_struct *work)
{
struct clear_pages_work *w = container_of(work, typeof(*w), work);
- struct drm_i915_private *i915 = w->ce->engine->i915;
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
struct i915_request *rq;
obj->read_domains = I915_GEM_GPU_DOMAINS;
obj->write_domain = 0;
- /* XXX: we need to kill this */
- mutex_lock(&i915->drm.struct_mutex);
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (unlikely(err))
- goto out_unlock;
+ goto out_signal;
batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
if (IS_ERR(batch)) {
intel_emit_vma_release(w->ce, batch);
out_unpin:
i915_vma_unpin(vma);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
out_signal:
if (unlikely(err)) {
dma_fence_set_error(&w->dma, err);
}
static int
-__intel_context_reconfigure_sseu(struct intel_context *ce,
- struct intel_sseu sseu)
+intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
{
int ret;
return ret;
}
-static int
-intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
-{
- struct drm_i915_private *i915 = ce->engine->i915;
- int ret;
-
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- return ret;
-
- ret = __intel_context_reconfigure_sseu(ce, sseu);
-
- mutex_unlock(&i915->drm.struct_mutex);
-
- return ret;
-}
-
static int
user_to_context_sseu(struct drm_i915_private *i915,
const struct drm_i915_gem_context_param_sseu *user,
struct drm_i915_private *i915 =
container_of(work, typeof(*i915), gem.retire_work.work);
- /* Come back later if the device is busy... */
- if (mutex_trylock(&i915->drm.struct_mutex)) {
- i915_retire_requests(i915);
- mutex_unlock(&i915->drm.struct_mutex);
- }
+ i915_retire_requests(i915);
queue_delayed_work(i915->wq,
&i915->gem.retire_work,
{
bool result = !intel_gt_is_wedged(gt);
- do {
- if (i915_gem_wait_for_idle(gt->i915,
- I915_WAIT_LOCKED |
- I915_WAIT_FOR_IDLE_BOOST,
- I915_GEM_IDLE_TIMEOUT) == -ETIME) {
- /* XXX hide warning from gem_eio */
- if (i915_modparams.reset) {
- dev_err(gt->i915->drm.dev,
- "Failed to idle engines, declaring wedged!\n");
- GEM_TRACE_DUMP();
- }
-
- /*
- * Forcibly cancel outstanding work and leave
- * the gpu quiet.
- */
- intel_gt_set_wedged(gt);
- result = false;
+ if (i915_gem_wait_for_idle(gt->i915,
+ I915_WAIT_FOR_IDLE_BOOST,
+ I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+ /* XXX hide warning from gem_eio */
+ if (i915_modparams.reset) {
+ dev_err(gt->i915->drm.dev,
+ "Failed to idle engines, declaring wedged!\n");
+ GEM_TRACE_DUMP();
}
- } while (i915_retire_requests(gt->i915) && result);
+
+ /*
+ * Forcibly cancel outstanding work and leave
+ * the gpu quiet.
+ */
+ intel_gt_set_wedged(gt);
+ result = false;
+ }
if (intel_gt_pm_wait_for_idle(gt))
result = false;
user_forcewake(&i915->gt, true);
- mutex_lock(&i915->drm.struct_mutex);
-
/*
* We have to flush all the executing contexts to main memory so
* that they can saved in the hibernation image. To ensure the last
*/
switch_to_kernel_context_sync(&i915->gt);
- mutex_unlock(&i915->drm.struct_mutex);
-
cancel_delayed_work_sync(&i915->gt.hangcheck.work);
i915_gem_drain_freed_objects(i915);
#include <linux/prime_numbers.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
{
struct i915_vma *vma;
u32 __iomem *map;
- int err;
+ int err = 0;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true);
if (IS_ERR(vma))
return PTR_ERR(vma);
+ intel_gt_pm_get(vma->vm->gt);
+
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out_rpm;
+ }
iowrite32(v, &map[offset / sizeof(*map)]);
i915_vma_unpin_iomap(vma);
- return 0;
+out_rpm:
+ intel_gt_pm_put(vma->vm->gt);
+ return err;
}
static int gtt_get(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma;
u32 __iomem *map;
- int err;
+ int err = 0;
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
if (IS_ERR(vma))
return PTR_ERR(vma);
+ intel_gt_pm_get(vma->vm->gt);
+
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out_rpm;
+ }
*v = ioread32(&map[offset / sizeof(*map)]);
i915_vma_unpin_iomap(vma);
- return 0;
+out_rpm:
+ intel_gt_pm_put(vma->vm->gt);
+ return err;
}
static int wc_set(struct drm_i915_gem_object *obj,
struct drm_i915_private *i915 = arg;
const struct igt_coherency_mode *read, *write, *over;
struct drm_i915_gem_object *obj;
- intel_wakeref_t wakeref;
unsigned long count, n;
u32 *offsets, *values;
int err = 0;
values = offsets + ncachelines;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
continue;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
- goto unlock;
+ goto free;
}
i915_random_reorder(offsets, ncachelines, &prng);
}
}
}
-unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+free:
kfree(offsets);
return err;
put_object:
i915_gem_object_put(obj);
- goto unlock;
+ goto free;
}
int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
static int __live_parallel_switch1(void *data)
{
struct parallel_switch *arg = data;
- struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
IGT_TIMEOUT(end_time);
unsigned long count;
for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
i915_request_put(rq);
- mutex_lock(&i915->drm.struct_mutex);
rq = i915_request_create(arg->ce[n]);
- if (IS_ERR(rq)) {
- mutex_unlock(&i915->drm.struct_mutex);
+ if (IS_ERR(rq))
return PTR_ERR(rq);
- }
i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(&i915->drm.struct_mutex);
}
err = 0;
static int __live_parallel_switchN(void *data)
{
struct parallel_switch *arg = data;
- struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
IGT_TIMEOUT(end_time);
unsigned long count;
int n;
for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
struct i915_request *rq;
- mutex_lock(&i915->drm.struct_mutex);
rq = i915_request_create(arg->ce[n]);
- if (IS_ERR(rq)) {
- mutex_unlock(&i915->drm.struct_mutex);
+ if (IS_ERR(rq))
return PTR_ERR(rq);
- }
i915_request_add(rq);
- mutex_unlock(&i915->drm.struct_mutex);
}
count++;
if (ret)
return ret;
- ret = __intel_context_reconfigure_sseu(ce, sseu);
+ ret = intel_context_reconfigure_sseu(ce, sseu);
if (ret)
goto out_spin;
goto out_fail;
out_fail:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
ret = -EIO;
intel_context_unpin(ce);
static void restore_retire_worker(struct drm_i915_private *i915)
{
+ igt_flush_test(i915);
intel_gt_pm_put(&i915->gt);
-
- mutex_lock(&i915->drm.struct_mutex);
- igt_flush_test(i915, I915_WAIT_LOCKED);
- mutex_unlock(&i915->drm.struct_mutex);
-
i915_gem_driver_register__shrinker(i915);
}
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
obj->cache_dirty = true;
- mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_object_fill_blt(obj, ce, val);
- mutex_unlock(&i915->drm.struct_mutex);
if (err)
goto err_unpin;
if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
dst->cache_dirty = true;
- mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_object_copy_blt(src, dst, ce);
- mutex_unlock(&i915->drm.struct_mutex);
if (err)
goto err_unpin;
static void wait_for_idle(struct intel_gt *gt)
{
- mutex_lock(>->i915->drm.struct_mutex); /* XXX */
- do {
- if (i915_gem_wait_for_idle(gt->i915,
- I915_WAIT_LOCKED,
- I915_GEM_IDLE_TIMEOUT) == -ETIME) {
- /* XXX hide warning from gem_eio */
- if (i915_modparams.reset) {
- dev_err(gt->i915->drm.dev,
- "Failed to idle engines, declaring wedged!\n");
- GEM_TRACE_DUMP();
- }
-
- /*
- * Forcibly cancel outstanding work and leave
- * the gpu quiet.
- */
- intel_gt_set_wedged(gt);
- }
- } while (i915_retire_requests(gt->i915));
- mutex_unlock(>->i915->drm.struct_mutex);
+ if (i915_gem_wait_for_idle(gt->i915, 0,
+ I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+ /*
+ * Forcibly cancel outstanding work and leave
+ * the gpu quiet.
+ */
+ intel_gt_set_wedged(gt);
+ }
intel_gt_pm_wait_for_idle(gt);
}
if (err)
break;
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
if (err)
break;
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
memset(h, 0, sizeof(*h));
h->gt = gt;
+ mutex_lock(>->i915->drm.struct_mutex);
h->ctx = kernel_context(gt->i915);
+ mutex_unlock(>->i915->drm.struct_mutex);
if (IS_ERR(h->ctx))
return PTR_ERR(h->ctx);
kernel_context_close(h->ctx);
- igt_flush_test(h->gt->i915, I915_WAIT_LOCKED);
+ igt_flush_test(h->gt->i915);
}
static bool wait_until_running(struct hang *h, struct i915_request *rq)
/* Basic check that we can execute our hanging batch */
- mutex_lock(>->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
- goto unlock;
+ return err;
for_each_engine(engine, gt->i915, id) {
struct intel_wedge_me w;
fini:
hang_fini(&h);
-unlock:
- mutex_unlock(>->i915->drm.struct_mutex);
return err;
}
reset_count = i915_reset_count(global);
count = 0;
do {
- mutex_lock(>->i915->drm.struct_mutex);
-
for_each_engine(engine, gt->i915, id) {
int i;
intel_gt_reset(gt, ALL_ENGINES, NULL);
igt_global_reset_unlock(gt);
- mutex_unlock(>->i915->drm.struct_mutex);
if (intel_gt_is_wedged(gt)) {
err = -EIO;
break;
break;
}
- err = igt_flush_test(gt->i915, 0);
+ err = igt_flush_test(gt->i915);
if (err)
break;
} while (time_before(jiffies, end_time));
pr_info("%s: %d resets\n", __func__, count);
- mutex_lock(>->i915->drm.struct_mutex);
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
- mutex_unlock(>->i915->drm.struct_mutex);
-
+ err = igt_flush_test(gt->i915);
out:
mock_file_free(gt->i915, file);
if (intel_gt_is_wedged(gt))
break;
}
- mutex_lock(>->i915->drm.struct_mutex);
for (i = 0; i < 16; i++) {
struct i915_request *rq;
i915_request_add(rq);
}
err = intel_engine_reset(engine, NULL);
- mutex_unlock(>->i915->drm.struct_mutex);
if (err) {
pr_err("i915_reset_engine failed\n");
break;
if (err)
break;
- err = igt_flush_test(gt->i915, 0);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
- mutex_lock(>->i915->drm.struct_mutex);
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
- mutex_unlock(>->i915->drm.struct_mutex);
-
+ err = igt_flush_test(gt->i915);
out:
mock_file_free(gt->i915, file);
if (intel_gt_is_wedged(gt))
return 0;
if (active) {
- mutex_lock(>->i915->drm.struct_mutex);
err = hang_init(&h, gt);
- mutex_unlock(>->i915->drm.struct_mutex);
if (err)
return err;
}
if (active) {
struct i915_request *rq;
- mutex_lock(>->i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- mutex_unlock(>->i915->drm.struct_mutex);
break;
}
i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(>->i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
if (err)
break;
- err = igt_flush_test(gt->i915, 0);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
if (intel_gt_is_wedged(gt))
err = -EIO;
- if (active) {
- mutex_lock(>->i915->drm.struct_mutex);
+ if (active)
hang_fini(&h);
- mutex_unlock(>->i915->drm.struct_mutex);
- }
return err;
}
struct i915_request *old = rq[idx];
struct i915_request *new;
- mutex_lock(&engine->i915->drm.struct_mutex);
new = igt_request_alloc(ctx[idx], engine);
if (IS_ERR(new)) {
- mutex_unlock(&engine->i915->drm.struct_mutex);
err = PTR_ERR(new);
break;
}
rq[idx] = i915_request_get(new);
i915_request_add(new);
- mutex_unlock(&engine->i915->drm.struct_mutex);
err = active_request_put(old);
if (err)
return 0;
if (flags & TEST_ACTIVE) {
- mutex_lock(>->i915->drm.struct_mutex);
err = hang_init(&h, gt);
- mutex_unlock(>->i915->drm.struct_mutex);
if (err)
return err;
struct i915_request *rq = NULL;
if (flags & TEST_ACTIVE) {
- mutex_lock(>->i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- mutex_unlock(>->i915->drm.struct_mutex);
break;
}
i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(>->i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
if (err)
break;
- mutex_lock(>->i915->drm.struct_mutex);
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
- mutex_unlock(>->i915->drm.struct_mutex);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
if (intel_gt_is_wedged(gt))
err = -EIO;
- if (flags & TEST_ACTIVE) {
- mutex_lock(>->i915->drm.struct_mutex);
+ if (flags & TEST_ACTIVE)
hang_fini(&h);
- mutex_unlock(>->i915->drm.struct_mutex);
- }
return err;
}
igt_global_reset_lock(gt);
- mutex_lock(>->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
goto unlock;
fini:
hang_fini(&h);
unlock:
- mutex_unlock(>->i915->drm.struct_mutex);
igt_global_reset_unlock(gt);
if (intel_gt_is_wedged(gt))
/* Check that we can recover an unbind stuck on a hanging request */
- mutex_lock(>->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
- goto unlock;
+ return err;
obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
if (IS_ERR(obj)) {
if (err)
goto out_rq;
- mutex_unlock(>->i915->drm.struct_mutex);
-
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
put_task_struct(tsk);
}
- mutex_lock(>->i915->drm.struct_mutex);
out_rq:
i915_request_put(rq);
out_obj:
i915_gem_object_put(obj);
fini:
hang_fini(&h);
-unlock:
- mutex_unlock(>->i915->drm.struct_mutex);
-
if (intel_gt_is_wedged(gt))
return -EIO;
igt_global_reset_lock(gt);
- mutex_lock(>->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
goto unlock;
i915_request_put(prev);
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
fini:
hang_fini(&h);
unlock:
- mutex_unlock(>->i915->drm.struct_mutex);
igt_global_reset_unlock(gt);
if (intel_gt_is_wedged(gt))
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
- mutex_lock(>->i915->drm.struct_mutex);
-
err = hang_init(&h, gt);
if (err)
- goto err_unlock;
+ return err;
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
goto err_request;
}
- mutex_unlock(>->i915->drm.struct_mutex);
-
/* Temporarily disable error capture */
error = xchg(&global->first_error, (void *)-1);
xchg(&global->first_error, error);
- mutex_lock(>->i915->drm.struct_mutex);
-
if (rq->fence.error != -EIO) {
pr_err("Guilty request not identified!\n");
err = -EINVAL;
i915_request_put(rq);
err_fini:
hang_fini(&h);
-err_unlock:
- mutex_unlock(>->i915->drm.struct_mutex);
return err;
}
return 0;
igt_global_reset_lock(gt);
- mutex_lock(>->i915->drm.struct_mutex);
/* Flush any requests before we get started and check basics */
if (!igt_force_reset(gt))
out:
/* As we poke around the guts, do a full reset before continuing. */
igt_force_reset(gt);
-
unlock:
- mutex_unlock(>->i915->drm.struct_mutex);
igt_global_reset_unlock(gt);
return err;
err = intel_gt_live_subtests(tests, gt);
- mutex_lock(>->i915->drm.struct_mutex);
- igt_flush_test(gt->i915, I915_WAIT_LOCKED);
- mutex_unlock(>->i915->drm.struct_mutex);
-
i915_modparams.enable_hangcheck = saved_hangcheck;
intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
}
igt_spinner_end(&spin);
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ if (igt_flush_test(i915)) {
err = -EIO;
goto err_ctx;
}
if (err)
goto out;
- if (i915_request_wait(head,
- I915_WAIT_LOCKED,
+ if (i915_request_wait(head, 0,
2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) {
pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
count, n);
if (err)
goto err_pin;
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ if (igt_flush_test(i915)) {
err = -EIO;
goto err_pin;
}
goto err_wedged;
}
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
goto err_wedged;
}
if (!intel_engine_has_preemption(engine))
continue;
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
goto err_wedged;
intel_engine_pm_get(engine);
}
intel_engine_pm_put(engine);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
goto err_wedged;
}
for (i = 0; i < ARRAY_SIZE(client); i++)
igt_spinner_end(&client[i].spin);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
goto err_wedged;
if (engine->execlists.preempt_hang.count) {
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ if (igt_flush_test(i915)) {
err = -EIO;
goto err_ctx_lo;
}
prime, div64_u64(ktime_to_ns(times[1]), prime));
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
for (nc = 0; nc < nctx; nc++) {
goto out;
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
for (n = 0; n < nsibling; n++)
out:
for (n = 0; !IS_ERR(rq[n]); n++)
i915_request_put(rq[n]);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
kernel_context_close(ctx);
#include <linux/prime_numbers.h>
-#include "gem/i915_gem_pm.h"
+#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "../selftests/i915_random.h"
goto err_put;
}
- mutex_lock(&state.i915->drm.struct_mutex);
for (p = phases; p->name; p++) {
pr_debug("%s(%s)\n", __func__, p->name);
for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) {
out:
for (na = 0; na < state.max; na++)
__mock_hwsp_record(&state, na, NULL);
- mutex_unlock(&state.i915->drm.struct_mutex);
kfree(state.history);
err_put:
drm_dev_put(&state.i915->drm);
struct i915_request *rq;
int err;
- lockdep_assert_held(&tl->gt->i915->drm.struct_mutex); /* lazy rq refs */
-
err = intel_timeline_pin(tl);
if (err) {
rq = ERR_PTR(err);
if (IS_ERR(rq))
goto out_unpin;
+ i915_request_get(rq);
+
err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
i915_request_add(rq);
- if (err)
+ if (err) {
+ i915_request_put(rq);
rq = ERR_PTR(err);
+ }
out_unpin:
intel_timeline_unpin(tl);
struct intel_timeline **timelines;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
unsigned long count, n;
int err = 0;
if (!timelines)
return -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
count = 0;
for_each_engine(engine, i915, id) {
if (!intel_engine_can_store_dword(engine))
continue;
+ intel_engine_pm_get(engine);
+
for (n = 0; n < NUM_TIMELINES; n++) {
struct intel_timeline *tl;
struct i915_request *rq;
tl = checked_intel_timeline_create(i915);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
- goto out;
+ break;
}
rq = tl_write(tl, engine, count);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
- goto out;
+ break;
}
timelines[count++] = tl;
+ i915_request_put(rq);
}
+
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
}
-out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
for (n = 0; n < count; n++) {
intel_timeline_put(tl);
}
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
kvfree(timelines);
-
return err;
#undef NUM_TIMELINES
}
struct intel_timeline **timelines;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
unsigned long count, n;
int err = 0;
if (!timelines)
return -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
count = 0;
for (n = 0; n < NUM_TIMELINES; n++) {
for_each_engine(engine, i915, id) {
tl = checked_intel_timeline_create(i915);
if (IS_ERR(tl)) {
+ intel_engine_pm_put(engine);
err = PTR_ERR(tl);
goto out;
}
+ intel_engine_pm_get(engine);
rq = tl_write(tl, engine, count);
+ intel_engine_pm_put(engine);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
}
timelines[count++] = tl;
+ i915_request_put(rq);
}
}
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
for (n = 0; n < count; n++) {
intel_timeline_put(tl);
}
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
kvfree(timelines);
-
return err;
#undef NUM_TIMELINES
}
struct intel_engine_cs *engine;
struct intel_timeline *tl;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = 0;
/*
* foreign GPU references.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
tl = intel_timeline_create(&i915->gt, NULL);
- if (IS_ERR(tl)) {
- err = PTR_ERR(tl);
- goto out_rpm;
- }
+ if (IS_ERR(tl))
+ return PTR_ERR(tl);
+
if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
goto out_free;
if (!intel_engine_can_store_dword(engine))
continue;
+ intel_engine_pm_get(engine);
rq = i915_request_create(engine->kernel_context);
+ intel_engine_pm_put(engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
}
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
intel_timeline_unpin(tl);
out_free:
intel_timeline_put(tl);
-out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
return err;
}
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
unsigned long count;
int err = 0;
* want to confuse ourselves or the GPU.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
count = 0;
for_each_engine(engine, i915, id) {
IGT_TIMEOUT(end_time);
if (!intel_engine_can_store_dword(engine))
continue;
+ intel_engine_pm_get(engine);
+
do {
struct intel_timeline *tl;
struct i915_request *rq;
tl = checked_intel_timeline_create(i915);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
- goto out;
+ break;
}
rq = tl_write(tl, engine, count);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
- goto out;
+ break;
}
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Wait for timeline writes timed out!\n");
+ i915_request_put(rq);
intel_timeline_put(tl);
err = -EIO;
- goto out;
+ break;
}
if (*tl->hwsp_seqno != count) {
err = -EINVAL;
}
+ i915_request_put(rq);
intel_timeline_put(tl);
count++;
if (err)
- goto out;
+ break;
} while (!__igt_timeout(end_time, NULL));
- }
-out:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+ }
return err;
}
break;
}
- if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(ctx->i915))
err = -EIO;
out_batch:
i915_vma_unpin_and_release(&batch, 0);
kernel_context_close(client[i].ctx);
}
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
return err;
igt_global_reset_unlock(&i915->gt);
kernel_context_close(ctx);
- igt_flush_test(i915, I915_WAIT_LOCKED);
+ igt_flush_test(i915);
return ret;
}
i915_drop_caches_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
+ int ret;
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL);
I915_IDLE_ENGINES_TIMEOUT))
intel_gt_set_wedged(&i915->gt);
- /* No need to check and wait for gpu resets, only libdrm auto-restarts
- * on ioctls on -EAGAIN. */
- if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
- int ret;
+ if (val & DROP_RETIRE)
+ i915_retire_requests(i915);
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (val & (DROP_IDLE | DROP_ACTIVE)) {
+ ret = i915_gem_wait_for_idle(i915,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
+ }
- /*
- * To finish the flush of the idle_worker, we must complete
- * the switch-to-kernel-context, which requires a double
- * pass through wait_for_idle: first queues the switch,
- * second waits for the switch.
- */
- if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
-
- if (ret == 0 && val & DROP_IDLE)
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
-
- if (val & DROP_RETIRE)
- i915_retire_requests(i915);
-
- mutex_unlock(&i915->drm.struct_mutex);
-
- if (ret == 0 && val & DROP_IDLE)
- ret = intel_gt_pm_wait_for_idle(&i915->gt);
+ if (val & DROP_IDLE) {
+ ret = intel_gt_pm_wait_for_idle(&i915->gt);
+ if (ret)
+ return ret;
}
if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt))
if (!intel_gt_pm_is_awake(gt))
return 0;
- GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
- flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
- timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
-
- timeout = wait_for_timelines(gt, flags, timeout);
- if (timeout < 0)
- return timeout;
+ do {
+ timeout = wait_for_timelines(gt, flags, timeout);
+ if (timeout < 0)
+ return timeout;
- if (flags & I915_WAIT_LOCKED) {
- lockdep_assert_held(&i915->drm.struct_mutex);
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
- i915_retire_requests(i915);
- }
+ } while (i915_retire_requests(i915));
return 0;
}
long timeout)
__attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0)
-#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
-#define I915_WAIT_PRIORITY BIT(2) /* small priority bump for the request */
-#define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */
-#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
+#define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
+#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
+#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
static inline bool i915_request_signaled(const struct i915_request *rq)
{
__live_put(active);
- mutex_lock(&i915->drm.struct_mutex);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
return PTR_ERR(active);
/* waits for & retires all requests */
- mutex_lock(&i915->drm.struct_mutex);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
if (!READ_ONCE(active->retired)) {
pr_err("i915_active not retired after flushing!\n");
mutex_lock(&i915->ggtt.vm.mutex);
out_locked:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
while (reserved) {
struct reserved *next = reserved->next;
err = i915_subtests(tests, ggtt);
- mutex_lock(&i915->drm.struct_mutex);
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
-
i915_gem_drain_freed_objects(i915);
-
mock_fini_ggtt(ggtt);
kfree(ggtt);
out_put:
}
}
end:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
i915_gem_context_unlock_engines(ctx);
i915_gem_object_unpin_map(out);
{
struct drm_i915_private *i915 = arg;
struct i915_request *request;
- int err = -ENOMEM;
/* Basic preliminary test to create a request and let it loose! */
- mutex_lock(&i915->drm.struct_mutex);
request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10);
if (!request)
- goto out_unlock;
+ return -ENOMEM;
i915_request_add(request);
- err = 0;
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- return err;
+ return 0;
}
static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
- mutex_lock(&i915->drm.struct_mutex);
request = mock_request(i915->engine[RCS0]->kernel_context, T);
- if (!request) {
- err = -ENOMEM;
- goto out_unlock;
- }
+ if (!request)
+ return -ENOMEM;
+
i915_request_get(request);
if (i915_request_wait(request, 0, 0) != -ETIME) {
err = 0;
out_request:
i915_request_put(request);
-out_unlock:
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
/* Submit a request, treat it as a fence and wait upon it */
- mutex_lock(&i915->drm.struct_mutex);
request = mock_request(i915->engine[RCS0]->kernel_context, T);
- if (!request) {
- err = -ENOMEM;
- goto out_locked;
- }
+ if (!request)
+ return -ENOMEM;
if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
pr_err("fence wait success before submit (expected timeout)!\n");
- goto out_locked;
+ goto out;
}
i915_request_add(request);
- mutex_unlock(&i915->drm.struct_mutex);
if (dma_fence_is_signaled(&request->fence)) {
pr_err("fence signaled immediately!\n");
- goto out_device;
+ goto out;
}
if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
pr_err("fence wait success after submit (expected timeout)!\n");
- goto out_device;
+ goto out;
}
if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
pr_err("fence wait timed out (expected success)!\n");
- goto out_device;
+ goto out;
}
if (!dma_fence_is_signaled(&request->fence)) {
pr_err("fence unsignaled after waiting!\n");
- goto out_device;
+ goto out;
}
if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
pr_err("fence wait timed out when complete (expected success)!\n");
- goto out_device;
+ goto out;
}
err = 0;
-out_device:
- mutex_lock(&i915->drm.struct_mutex);
-out_locked:
+out:
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
mutex_lock(&i915->drm.struct_mutex);
ctx[0] = mock_context(i915, "A");
+ mutex_unlock(&i915->drm.struct_mutex);
+
ce = i915_gem_context_get_engine(ctx[0], RCS0);
GEM_BUG_ON(IS_ERR(ce));
request = mock_request(ce, 2 * HZ);
i915_request_get(request);
i915_request_add(request);
+ mutex_lock(&i915->drm.struct_mutex);
ctx[1] = mock_context(i915, "B");
+ mutex_unlock(&i915->drm.struct_mutex);
+
ce = i915_gem_context_get_engine(ctx[1], RCS0);
GEM_BUG_ON(IS_ERR(ce));
vip = mock_request(ce, 0);
request->engine->submit_request(request);
rcu_read_unlock();
- mutex_unlock(&i915->drm.struct_mutex);
if (i915_request_wait(vip, 0, HZ) == -ETIME) {
pr_err("timed out waiting for high priority request\n");
err = 0;
err:
i915_request_put(vip);
- mutex_lock(&i915->drm.struct_mutex);
err_context_1:
mock_context_close(ctx[1]);
i915_request_put(request);
err_context_0:
mock_context_close(ctx[0]);
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
static int __igt_breadcrumbs_smoketest(void *arg)
{
struct smoketest *t = arg;
- struct mutex * const BKL = &t->engine->i915->drm.struct_mutex;
const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
const unsigned int total = 4 * t->ncontexts + 1;
unsigned int num_waits = 0, num_fences = 0;
struct i915_request *rq;
struct intel_context *ce;
- mutex_lock(BKL);
-
ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx);
GEM_BUG_ON(IS_ERR(ce));
rq = t->request_alloc(ce);
intel_context_put(ce);
if (IS_ERR(rq)) {
- mutex_unlock(BKL);
err = PTR_ERR(rq);
count = n;
break;
requests[n] = i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(BKL);
-
if (err >= 0)
err = i915_sw_fence_await_dma_fence(wait,
&rq->fence,
goto out_threads;
}
- mutex_lock(&t.engine->i915->drm.struct_mutex);
for (n = 0; n < t.ncontexts; n++) {
+ mutex_lock(&t.engine->i915->drm.struct_mutex);
t.contexts[n] = mock_context(t.engine->i915, "mock");
+ mutex_unlock(&t.engine->i915->drm.struct_mutex);
if (!t.contexts[n]) {
ret = -ENOMEM;
goto out_contexts;
}
}
- mutex_unlock(&t.engine->i915->drm.struct_mutex);
for (n = 0; n < ncpus; n++) {
threads[n] = kthread_run(__igt_breadcrumbs_smoketest,
atomic_long_read(&t.num_fences),
ncpus);
- mutex_lock(&t.engine->i915->drm.struct_mutex);
out_contexts:
for (n = 0; n < t.ncontexts; n++) {
if (!t.contexts[n])
break;
mock_context_close(t.contexts[n]);
}
- mutex_unlock(&t.engine->i915->drm.struct_mutex);
kfree(t.contexts);
out_threads:
kfree(threads);
-
return ret;
}
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
struct igt_live_test t;
unsigned int id;
int err = -ENODEV;
* the overhead of submitting requests to the hardware.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
for_each_engine(engine, i915, id) {
- struct i915_request *request = NULL;
unsigned long n, prime;
IGT_TIMEOUT(end_time);
ktime_t times[2] = {};
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
- goto out_unlock;
+ return err;
for_each_prime_number_from(prime, 1, 8192) {
+ struct i915_request *request = NULL;
+
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
+ i915_request_put(request);
request = i915_request_create(engine->kernel_context);
- if (IS_ERR(request)) {
- err = PTR_ERR(request);
- goto out_unlock;
- }
+ if (IS_ERR(request))
+ return PTR_ERR(request);
/* This space is left intentionally blank.
*
* for latency.
*/
+ i915_request_get(request);
i915_request_add(request);
}
i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(request);
times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 1)
err = igt_live_test_end(&t);
if (err)
- goto out_unlock;
+ return err;
pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
engine->name,
prime, div64_u64(ktime_to_ns(times[1]), prime));
}
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
if (err)
goto out_request;
+ i915_request_get(request);
out_request:
i915_request_add(request);
return err ? ERR_PTR(err) : request;
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
struct igt_live_test t;
struct i915_vma *batch;
unsigned int id;
* the overhead of submitting requests to the hardware.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
batch = empty_batch(i915);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto out_unlock;
- }
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
for_each_engine(engine, i915, id) {
IGT_TIMEOUT(end_time);
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
+ i915_request_put(request);
request = empty_request(engine, batch);
if (IS_ERR(request)) {
err = PTR_ERR(request);
if (__igt_timeout(end_time, NULL))
break;
}
+ i915_request_put(request);
err = igt_live_test_end(&t);
if (err)
out_batch:
i915_vma_unpin(batch);
i915_vma_put(batch);
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct i915_request *request[I915_NUM_ENGINES];
- intel_wakeref_t wakeref;
struct igt_live_test t;
struct i915_vma *batch;
unsigned int id;
* block doing so, and that they don't complete too soon.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ return err;
batch = recursive_batch(i915);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
- goto out_unlock;
+ return err;
}
for_each_engine(engine, i915, id) {
i915_request_put(request[id]);
i915_vma_unpin(batch);
i915_vma_put(batch);
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
struct i915_request *request[I915_NUM_ENGINES] = {};
struct i915_request *prev = NULL;
struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
struct igt_live_test t;
unsigned int id;
int err;
* they are running on independent engines.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ return err;
for_each_engine(engine, i915, id) {
struct i915_vma *batch;
err = PTR_ERR(batch);
pr_err("%s: Unable to create batch for %s, err=%d\n",
__func__, engine->name, err);
- goto out_unlock;
+ return err;
}
request[id] = i915_request_create(engine->kernel_context);
i915_vma_put(request[id]->batch);
i915_request_put(request[id]);
}
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
struct i915_request *rq;
int err;
- mutex_lock(&engine->i915->drm.struct_mutex);
rq = i915_request_create(engine->kernel_context);
- if (IS_ERR(rq)) {
- mutex_unlock(&engine->i915->drm.struct_mutex);
+ if (IS_ERR(rq))
return PTR_ERR(rq);
- }
i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(&engine->i915->drm.struct_mutex);
err = 0;
if (i915_request_wait(rq, 0, HZ / 5) < 0)
do {
struct i915_request *rq;
- mutex_lock(&engine->i915->drm.struct_mutex);
rq = i915_request_create(engine->kernel_context);
- if (IS_ERR(rq)) {
- mutex_unlock(&engine->i915->drm.struct_mutex);
+ if (IS_ERR(rq))
return PTR_ERR(rq);
- }
i915_request_add(rq);
- mutex_unlock(&engine->i915->drm.struct_mutex);
-
count++;
} while (!__igt_timeout(end_time, NULL));
struct task_struct *tsk[I915_NUM_ENGINES] = {};
struct igt_live_test t;
- mutex_lock(&i915->drm.struct_mutex);
err = igt_live_test_begin(&t, i915, __func__, "");
- mutex_unlock(&i915->drm.struct_mutex);
if (err)
break;
put_task_struct(tsk[id]);
}
- mutex_lock(&i915->drm.struct_mutex);
if (igt_live_test_end(&t))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
}
return err;
goto out_threads;
}
- mutex_lock(&i915->drm.struct_mutex);
for (n = 0; n < t[0].ncontexts; n++) {
+ mutex_lock(&i915->drm.struct_mutex);
t[0].contexts[n] = live_context(i915, file);
+ mutex_unlock(&i915->drm.struct_mutex);
if (!t[0].contexts[n]) {
ret = -ENOMEM;
goto out_contexts;
t[id].max_batch = max_batches(t[0].contexts[0], engine);
if (t[id].max_batch < 0) {
ret = t[id].max_batch;
- mutex_unlock(&i915->drm.struct_mutex);
goto out_flush;
}
/* One ring interleaved between requests from all cpus */
&t[id], "igt/%d.%d", id, n);
if (IS_ERR(tsk)) {
ret = PTR_ERR(tsk);
- mutex_unlock(&i915->drm.struct_mutex);
goto out_flush;
}
threads[id * ncpus + n] = tsk;
}
}
- mutex_unlock(&i915->drm.struct_mutex);
msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
- mutex_lock(&i915->drm.struct_mutex);
ret = igt_live_test_end(&live) ?: ret;
out_contexts:
- mutex_unlock(&i915->drm.struct_mutex);
kfree(t[0].contexts);
out_threads:
kfree(threads);
{
struct drm_i915_private *i915 = data;
- mutex_lock(&i915->drm.struct_mutex);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
{
struct intel_gt *gt = data;
- mutex_lock(>->i915->drm.struct_mutex);
- if (igt_flush_test(gt->i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
- mutex_unlock(>->i915->drm.struct_mutex);
i915_gem_drain_freed_objects(gt->i915);
err = i915_subtests(tests, ggtt);
- mutex_lock(&i915->drm.struct_mutex);
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
-
i915_gem_drain_freed_objects(i915);
-
mock_fini_ggtt(ggtt);
kfree(ggtt);
out_put:
#include "igt_flush_test.h"
-int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
+int igt_flush_test(struct drm_i915_private *i915)
{
int ret = intel_gt_is_wedged(&i915->gt) ? -EIO : 0;
- int repeat = !!(flags & I915_WAIT_LOCKED);
cond_resched();
- do {
- if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) {
- pr_err("%pS timed out, cancelling all further testing.\n",
- __builtin_return_address(0));
+ i915_retire_requests(i915);
+ if (i915_gem_wait_for_idle(i915, 0, HZ / 5) == -ETIME) {
+ pr_err("%pS timed out, cancelling all further testing.\n",
+ __builtin_return_address(0));
- GEM_TRACE("%pS timed out.\n",
- __builtin_return_address(0));
- GEM_TRACE_DUMP();
+ GEM_TRACE("%pS timed out.\n",
+ __builtin_return_address(0));
+ GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
- repeat = 0;
- ret = -EIO;
- }
-
- /* Ensure we also flush after wedging. */
- if (flags & I915_WAIT_LOCKED)
- i915_retire_requests(i915);
- } while (repeat--);
+ intel_gt_set_wedged(&i915->gt);
+ ret = -EIO;
+ }
+ i915_retire_requests(i915);
return ret;
}
struct drm_i915_private;
-int igt_flush_test(struct drm_i915_private *i915, unsigned int flags);
+int igt_flush_test(struct drm_i915_private *i915);
#endif /* IGT_FLUSH_TEST_H */
enum intel_engine_id id;
int err;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
t->i915 = i915;
t->func = func;
t->name = name;
err = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
+ I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
if (err) {
pr_err("%s(%s): failed to idle before, with err=%d!",
struct intel_engine_cs *engine;
enum intel_engine_id id;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
return -EIO;
if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
do {
for_each_engine(engine, i915, id)
mock_engine_flush(engine);
struct intel_engine_cs *engine;
enum intel_engine_id id;
- mutex_lock(&i915->drm.struct_mutex);
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
flush_work(&i915->gem.idle_work);
i915_gem_drain_workqueue(i915);