drm/i915/guc: Remove preemption support for current fw
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 10 Jul 2019 00:54:26 +0000 (17:54 -0700)
committerChris Wilson <chris@chris-wilson.co.uk>
Thu, 11 Jul 2019 10:09:33 +0000 (11:09 +0100)
Preemption via GuC submission is not being supported with its current
legacy incarnation. The current FW does support a similar pre-emption
flow via H2G, but it is class-based instead of being instance-based,
which doesn't fit well with the i915 tracking. To fix this, the
firmware is being updated to better support our needs with a new flow,
so we can safely remove the old code.

v2 (Daniele): resurrect & rebase, reword commit message, remove
preempt_context as well

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: John Harrison <John.C.Harrison@Intel.com>
Acked-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: MichaƂ Winiarski <michal.winiarski@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190710005437.3496-2-daniele.ceraolospurio@intel.com
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_engine_types.h
drivers/gpu/drm/i915/gt/intel_gt_pm.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/intel_guc.c
drivers/gpu/drm/i915/intel_guc.h
drivers/gpu/drm/i915/intel_guc_submission.c
drivers/gpu/drm/i915/selftests/intel_guc.c

index e367dce2a696f6726943a23b4870aa7573028411..078592912d974820d411e6685c6aea826bcfa13d 100644 (file)
@@ -644,18 +644,12 @@ static void init_contexts(struct drm_i915_private *i915)
        init_llist_head(&i915->contexts.free_list);
 }
 
-static bool needs_preempt_context(struct drm_i915_private *i915)
-{
-       return USES_GUC_SUBMISSION(i915);
-}
-
 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 {
        struct i915_gem_context *ctx;
 
        /* Reassure ourselves we are only called once */
        GEM_BUG_ON(dev_priv->kernel_context);
-       GEM_BUG_ON(dev_priv->preempt_context);
 
        init_contexts(dev_priv);
 
@@ -676,15 +670,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
        GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
        dev_priv->kernel_context = ctx;
 
-       /* highest priority; preempting task */
-       if (needs_preempt_context(dev_priv)) {
-               ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
-               if (!IS_ERR(ctx))
-                       dev_priv->preempt_context = ctx;
-               else
-                       DRM_ERROR("Failed to create preempt context; disabling preemption\n");
-       }
-
        DRM_DEBUG_DRIVER("%s context support initialized\n",
                         DRIVER_CAPS(dev_priv)->has_logical_contexts ?
                         "logical" : "fake");
@@ -695,8 +680,6 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
 {
        lockdep_assert_held(&i915->drm.struct_mutex);
 
-       if (i915->preempt_context)
-               destroy_kernel_context(&i915->preempt_context);
        destroy_kernel_context(&i915->kernel_context);
 
        /* Must free all deferred contexts (via flush_workqueue) first */
index bdf279fa3b2e1e2de2494800c683eac2afa7ad7d..76b5c068a26d4f372e74b368138bd39ab1d3d289 100644 (file)
@@ -841,15 +841,6 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
        if (ret)
                return ret;
 
-       /*
-        * Similarly the preempt context must always be available so that
-        * we can interrupt the engine at any time. However, as preemption
-        * is optional, we allow it to fail.
-        */
-       if (i915->preempt_context)
-               pin_context(i915->preempt_context, engine,
-                           &engine->preempt_context);
-
        ret = measure_breadcrumb_dw(engine);
        if (ret < 0)
                goto err_unpin;
@@ -861,8 +852,6 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
        return 0;
 
 err_unpin:
-       if (engine->preempt_context)
-               intel_context_unpin(engine->preempt_context);
        intel_context_unpin(engine->kernel_context);
        return ret;
 }
@@ -887,8 +876,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
        if (engine->default_state)
                i915_gem_object_put(engine->default_state);
 
-       if (engine->preempt_context)
-               intel_context_unpin(engine->preempt_context);
        intel_context_unpin(engine->kernel_context);
        GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
 
index 7e056114344e4bc8ba35af5ac7cba3bd5dfe8a16..8be63019d70776bca653807a796e31a518361562 100644 (file)
@@ -288,7 +288,6 @@ struct intel_engine_cs {
        struct llist_head barrier_tasks;
 
        struct intel_context *kernel_context; /* pinned */
-       struct intel_context *preempt_context; /* pinned; optional */
 
        intel_engine_mask_t saturated; /* submitting semaphores too late? */
 
index 36ba80e6a0b7fda224253061d17c2b3faf6e95fc..da81b3a92d16f66e2ecf959eb3801d2ac28cc50a 100644 (file)
@@ -145,10 +145,6 @@ int intel_gt_resume(struct intel_gt *gt)
                if (ce)
                        ce->ops->reset(ce);
 
-               ce = engine->preempt_context;
-               if (ce)
-                       ce->ops->reset(ce);
-
                engine->serial++; /* kernel context lost */
                err = engine->resume(engine);
 
index 3e4f58f193628f57bd1b77713e437ceb580250a8..b4d1956778776b28e039aa1413cfa1ea40fc1cd1 100644 (file)
@@ -2010,11 +2010,6 @@ static int i915_guc_info(struct seq_file *m, void *data)
 
        seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
        i915_guc_client_info(m, dev_priv, guc->execbuf_client);
-       if (guc->preempt_client) {
-               seq_printf(m, "\nGuC preempt client @ %p:\n",
-                          guc->preempt_client);
-               i915_guc_client_info(m, dev_priv, guc->preempt_client);
-       }
 
        /* Add more as required ... */
 
index a9381e404fd5e7d5c4758398fd2d3b7e5ffac08b..2fa1d35efcb8be99879a2aa391ffe7c0b55651d8 100644 (file)
@@ -1378,8 +1378,6 @@ struct drm_i915_private {
        struct intel_engine_cs *engine[I915_NUM_ENGINES];
        /* Context used internally to idle the GPU and setup initial state */
        struct i915_gem_context *kernel_context;
-       /* Context only to be used for injecting preemption commands */
-       struct i915_gem_context *preempt_context;
        struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
                                            [MAX_ENGINE_INSTANCE + 1];
 
index c40a6efdd33a8130b0fcb91be090d93615e3a298..501b74f44374655824e6e712c0be6ced3d321bfe 100644 (file)
@@ -101,8 +101,6 @@ void intel_guc_init_early(struct intel_guc *guc)
 
 static int guc_init_wq(struct intel_guc *guc)
 {
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
        /*
         * GuC log buffer flush work item has to do register access to
         * send the ack to GuC and this work item, if not synced before
@@ -122,31 +120,6 @@ static int guc_init_wq(struct intel_guc *guc)
                return -ENOMEM;
        }
 
-       /*
-        * Even though both sending GuC action, and adding a new workitem to
-        * GuC workqueue are serialized (each with its own locking), since
-        * we're using mutliple engines, it's possible that we're going to
-        * issue a preempt request with two (or more - each for different
-        * engine) workitems in GuC queue. In this situation, GuC may submit
-        * all of them, which will make us very confused.
-        * Our preemption contexts may even already be complete - before we
-        * even had the chance to sent the preempt action to GuC!. Rather
-        * than introducing yet another lock, we can just use ordered workqueue
-        * to make sure we're always sending a single preemption request with a
-        * single workitem.
-        */
-       if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
-           USES_GUC_SUBMISSION(dev_priv)) {
-               guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
-                                                         WQ_HIGHPRI);
-               if (!guc->preempt_wq) {
-                       destroy_workqueue(guc->log.relay.flush_wq);
-                       DRM_ERROR("Couldn't allocate workqueue for GuC "
-                                 "preemption\n");
-                       return -ENOMEM;
-               }
-       }
-
        return 0;
 }
 
@@ -154,10 +127,6 @@ static void guc_fini_wq(struct intel_guc *guc)
 {
        struct workqueue_struct *wq;
 
-       wq = fetch_and_zero(&guc->preempt_wq);
-       if (wq)
-               destroy_workqueue(wq);
-
        wq = fetch_and_zero(&guc->log.relay.flush_wq);
        if (wq)
                destroy_workqueue(wq);
index d91c96679dbb991275f72d43da0f5b9d90cad22e..ec1038c1f50e3327001dd92b20fa62056d0b2096 100644 (file)
 
 struct __guc_ads_blob;
 
-struct guc_preempt_work {
-       struct work_struct work;
-       struct intel_engine_cs *engine;
-};
-
 /*
  * Top level structure of GuC. It handles firmware loading and manages client
  * pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy
@@ -76,10 +71,6 @@ struct intel_guc {
        void *shared_data_vaddr;
 
        struct intel_guc_client *execbuf_client;
-       struct intel_guc_client *preempt_client;
-
-       struct guc_preempt_work preempt_work[I915_NUM_ENGINES];
-       struct workqueue_struct *preempt_wq;
 
        DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
        /* Cyclic counter mod pagesize  */
index f104b94c14ef8da564cea8f10d96a0cc712f61a8..8520bb224175558a1438a893d82a6069cd693d68 100644 (file)
@@ -46,11 +46,10 @@ enum {
  *
  * GuC client:
  * A intel_guc_client refers to a submission path through GuC. Currently, there
- * are two clients. One of them (the execbuf_client) is charged with all
- * submissions to the GuC, the other one (preempt_client) is responsible for
- * preempting the execbuf_client. This struct is the owner of a doorbell, a
- * process descriptor and a workqueue (all of them inside a single gem object
- * that contains all required pages for these elements).
+ * is only one client, which is charged with all submissions to the GuC. This
+ * struct is the owner of a doorbell, a process descriptor and a workqueue (all
+ * of them inside a single gem object that contains all required pages for these
+ * elements).
  *
  * GuC stage descriptor:
  * During initialization, the driver allocates a static pool of 1024 such
@@ -88,12 +87,6 @@ enum {
  *
  */
 
-static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
-{
-       return (i915_ggtt_offset(engine->status_page.vma) +
-               I915_GEM_HWS_PREEMPT_ADDR);
-}
-
 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
 {
        return rb_entry(rb, struct i915_priolist, node);
@@ -563,126 +556,6 @@ static void flush_ggtt_writes(struct i915_vma *vma)
                intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS);
 }
 
-static void inject_preempt_context(struct work_struct *work)
-{
-       struct guc_preempt_work *preempt_work =
-               container_of(work, typeof(*preempt_work), work);
-       struct intel_engine_cs *engine = preempt_work->engine;
-       struct intel_guc *guc = container_of(preempt_work, typeof(*guc),
-                                            preempt_work[engine->id]);
-       struct intel_guc_client *client = guc->preempt_client;
-       struct guc_stage_desc *stage_desc = __get_stage_desc(client);
-       struct intel_context *ce = engine->preempt_context;
-       u32 data[7];
-
-       if (!ce->ring->emit) { /* recreate upon load/resume */
-               u32 addr = intel_hws_preempt_done_address(engine);
-               u32 *cs;
-
-               cs = ce->ring->vaddr;
-               if (engine->class == RENDER_CLASS) {
-                       cs = gen8_emit_ggtt_write_rcs(cs,
-                                                     GUC_PREEMPT_FINISHED,
-                                                     addr,
-                                                     PIPE_CONTROL_CS_STALL);
-               } else {
-                       cs = gen8_emit_ggtt_write(cs,
-                                                 GUC_PREEMPT_FINISHED,
-                                                 addr,
-                                                 0);
-                       *cs++ = MI_NOOP;
-                       *cs++ = MI_NOOP;
-               }
-               *cs++ = MI_USER_INTERRUPT;
-               *cs++ = MI_NOOP;
-
-               ce->ring->emit = GUC_PREEMPT_BREADCRUMB_BYTES;
-               GEM_BUG_ON((void *)cs - ce->ring->vaddr != ce->ring->emit);
-
-               flush_ggtt_writes(ce->ring->vma);
-       }
-
-       spin_lock_irq(&client->wq_lock);
-       guc_wq_item_append(client, engine->guc_id, lower_32_bits(ce->lrc_desc),
-                          GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0);
-       spin_unlock_irq(&client->wq_lock);
-
-       /*
-        * If GuC firmware performs an engine reset while that engine had
-        * a preemption pending, it will set the terminated attribute bit
-        * on our preemption stage descriptor. GuC firmware retains all
-        * pending work items for a high-priority GuC client, unlike the
-        * normal-priority GuC client where work items are dropped. It
-        * wants to make sure the preempt-to-idle work doesn't run when
-        * scheduling resumes, and uses this bit to inform its scheduler
-        * and presumably us as well. Our job is to clear it for the next
-        * preemption after reset, otherwise that and future preemptions
-        * will never complete. We'll just clear it every time.
-        */
-       stage_desc->attribute &= ~GUC_STAGE_DESC_ATTR_TERMINATED;
-
-       data[0] = INTEL_GUC_ACTION_REQUEST_PREEMPTION;
-       data[1] = client->stage_id;
-       data[2] = INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q |
-                 INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q;
-       data[3] = engine->guc_id;
-       data[4] = guc->execbuf_client->priority;
-       data[5] = guc->execbuf_client->stage_id;
-       data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
-
-       if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) {
-               intel_write_status_page(engine,
-                                       I915_GEM_HWS_PREEMPT,
-                                       GUC_PREEMPT_NONE);
-               tasklet_schedule(&engine->execlists.tasklet);
-       }
-
-       (void)I915_SELFTEST_ONLY(engine->execlists.preempt_hang.count++);
-}
-
-/*
- * We're using user interrupt and HWSP value to mark that preemption has
- * finished and GPU is idle. Normally, we could unwind and continue similar to
- * execlists submission path. Unfortunately, with GuC we also need to wait for
- * it to finish its own postprocessing, before attempting to submit. Otherwise
- * GuC may silently ignore our submissions, and thus we risk losing request at
- * best, executing out-of-order and causing kernel panic at worst.
- */
-#define GUC_PREEMPT_POSTPROCESS_DELAY_MS 10
-static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
-{
-       struct intel_guc *guc = &engine->i915->guc;
-       struct guc_shared_ctx_data *data = guc->shared_data_vaddr;
-       struct guc_ctx_report *report =
-               &data->preempt_ctx_report[engine->guc_id];
-
-       if (wait_for_atomic(report->report_return_status ==
-                           INTEL_GUC_REPORT_STATUS_COMPLETE,
-                           GUC_PREEMPT_POSTPROCESS_DELAY_MS))
-               DRM_ERROR("Timed out waiting for GuC preemption report\n");
-       /*
-        * GuC is expecting that we're also going to clear the affected context
-        * counter, let's also reset the return status to not depend on GuC
-        * resetting it after recieving another preempt action
-        */
-       report->affected_count = 0;
-       report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN;
-}
-
-static void complete_preempt_context(struct intel_engine_cs *engine)
-{
-       struct intel_engine_execlists *execlists = &engine->execlists;
-
-       if (inject_preempt_hang(execlists))
-               return;
-
-       execlists_cancel_port_requests(execlists);
-       execlists_unwind_incomplete_requests(execlists);
-
-       wait_for_guc_preempt_report(engine);
-       intel_write_status_page(engine, I915_GEM_HWS_PREEMPT, GUC_PREEMPT_NONE);
-}
-
 static void guc_submit(struct intel_engine_cs *engine,
                       struct i915_request **out,
                       struct i915_request **end)
@@ -707,16 +580,6 @@ static inline int rq_prio(const struct i915_request *rq)
        return rq->sched.attr.priority | __NO_PREEMPTION;
 }
 
-static inline int effective_prio(const struct i915_request *rq)
-{
-       int prio = rq_prio(rq);
-
-       if (i915_request_has_nopreempt(rq))
-               prio = I915_PRIORITY_UNPREEMPTABLE;
-
-       return prio;
-}
-
 static struct i915_request *schedule_in(struct i915_request *rq, int idx)
 {
        trace_i915_request_in(rq, idx);
@@ -752,22 +615,6 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
        lockdep_assert_held(&engine->active.lock);
 
        if (last) {
-               if (intel_engine_has_preemption(engine)) {
-                       struct guc_preempt_work *preempt_work =
-                               &engine->i915->guc.preempt_work[engine->id];
-                       int prio = execlists->queue_priority_hint;
-
-                       if (i915_scheduler_need_preempt(prio,
-                                                       effective_prio(last))) {
-                               intel_write_status_page(engine,
-                                                       I915_GEM_HWS_PREEMPT,
-                                                       GUC_PREEMPT_INPROGRESS);
-                               queue_work(engine->i915->guc.preempt_wq,
-                                          &preempt_work->work);
-                               return;
-                       }
-               }
-
                if (*++first)
                        return;
 
@@ -831,12 +678,7 @@ static void guc_submission_tasklet(unsigned long data)
                memmove(execlists->inflight, port, rem * sizeof(*port));
        }
 
-       if (intel_read_status_page(engine, I915_GEM_HWS_PREEMPT) ==
-           GUC_PREEMPT_FINISHED)
-               complete_preempt_context(engine);
-
-       if (!intel_read_status_page(engine, I915_GEM_HWS_PREEMPT))
-               __guc_dequeue(engine);
+       __guc_dequeue(engine);
 
        spin_unlock_irqrestore(&engine->active.lock, flags);
 }
@@ -857,16 +699,6 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
         * prevents the race.
         */
        __tasklet_disable_sync_once(&execlists->tasklet);
-
-       /*
-        * We're using worker to queue preemption requests from the tasklet in
-        * GuC submission mode.
-        * Even though tasklet was disabled, we may still have a worker queued.
-        * Let's make sure that all workers scheduled before disabling the
-        * tasklet are completed before continuing with the reset.
-        */
-       if (engine->i915->guc.preempt_wq)
-               flush_workqueue(engine->i915->guc.preempt_wq);
 }
 
 static void guc_reset(struct intel_engine_cs *engine, bool stalled)
@@ -1123,7 +955,6 @@ static int guc_clients_create(struct intel_guc *guc)
        struct intel_guc_client *client;
 
        GEM_BUG_ON(guc->execbuf_client);
-       GEM_BUG_ON(guc->preempt_client);
 
        client = guc_client_alloc(dev_priv,
                                  INTEL_INFO(dev_priv)->engine_mask,
@@ -1135,20 +966,6 @@ static int guc_clients_create(struct intel_guc *guc)
        }
        guc->execbuf_client = client;
 
-       if (dev_priv->preempt_context) {
-               client = guc_client_alloc(dev_priv,
-                                         INTEL_INFO(dev_priv)->engine_mask,
-                                         GUC_CLIENT_PRIORITY_KMD_HIGH,
-                                         dev_priv->preempt_context);
-               if (IS_ERR(client)) {
-                       DRM_ERROR("Failed to create GuC client for preemption!\n");
-                       guc_client_free(guc->execbuf_client);
-                       guc->execbuf_client = NULL;
-                       return PTR_ERR(client);
-               }
-               guc->preempt_client = client;
-       }
-
        return 0;
 }
 
@@ -1156,10 +973,6 @@ static void guc_clients_destroy(struct intel_guc *guc)
 {
        struct intel_guc_client *client;
 
-       client = fetch_and_zero(&guc->preempt_client);
-       if (client)
-               guc_client_free(client);
-
        client = fetch_and_zero(&guc->execbuf_client);
        if (client)
                guc_client_free(client);
@@ -1202,28 +1015,11 @@ static void __guc_client_disable(struct intel_guc_client *client)
 
 static int guc_clients_enable(struct intel_guc *guc)
 {
-       int ret;
-
-       ret = __guc_client_enable(guc->execbuf_client);
-       if (ret)
-               return ret;
-
-       if (guc->preempt_client) {
-               ret = __guc_client_enable(guc->preempt_client);
-               if (ret) {
-                       __guc_client_disable(guc->execbuf_client);
-                       return ret;
-               }
-       }
-
-       return 0;
+       return __guc_client_enable(guc->execbuf_client);
 }
 
 static void guc_clients_disable(struct intel_guc *guc)
 {
-       if (guc->preempt_client)
-               __guc_client_disable(guc->preempt_client);
-
        if (guc->execbuf_client)
                __guc_client_disable(guc->execbuf_client);
 }
@@ -1234,9 +1030,6 @@ static void guc_clients_disable(struct intel_guc *guc)
  */
 int intel_guc_submission_init(struct intel_guc *guc)
 {
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
        int ret;
 
        if (guc->stage_desc_pool)
@@ -1256,11 +1049,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
        if (ret)
                goto err_pool;
 
-       for_each_engine(engine, dev_priv, id) {
-               guc->preempt_work[id].engine = engine;
-               INIT_WORK(&guc->preempt_work[id].work, inject_preempt_context);
-       }
-
        return 0;
 
 err_pool:
@@ -1270,13 +1058,6 @@ err_pool:
 
 void intel_guc_submission_fini(struct intel_guc *guc)
 {
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-
-       for_each_engine(engine, dev_priv, id)
-               cancel_work_sync(&guc->preempt_work[id].work);
-
        guc_clients_destroy(guc);
        WARN_ON(!guc_verify_doorbells(guc));
 
index 6ca8584cd64c41aef023dbf56a8cb47755c5296f..1a1915e44f6bace1971cdf73c0b5b895a821af4e 100644 (file)
@@ -103,13 +103,10 @@ static int ring_doorbell_nop(struct intel_guc_client *client)
 /*
  * Basic client sanity check, handy to validate create_clients.
  */
-static int validate_client(struct intel_guc_client *client,
-                          int client_priority,
-                          bool is_preempt_client)
+static int validate_client(struct intel_guc_client *client, int client_priority)
 {
        struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
-       struct i915_gem_context *ctx_owner = is_preempt_client ?
-                       dev_priv->preempt_context : dev_priv->kernel_context;
+       struct i915_gem_context *ctx_owner = dev_priv->kernel_context;
 
        if (client->owner != ctx_owner ||
            client->engines != INTEL_INFO(dev_priv)->engine_mask ||
@@ -163,7 +160,7 @@ static int igt_guc_clients(void *args)
         */
        guc_clients_disable(guc);
        guc_clients_destroy(guc);
-       if (guc->execbuf_client || guc->preempt_client) {
+       if (guc->execbuf_client) {
                pr_err("guc_clients_destroy lied!\n");
                err = -EINVAL;
                goto unlock;
@@ -177,24 +174,14 @@ static int igt_guc_clients(void *args)
        GEM_BUG_ON(!guc->execbuf_client);
 
        err = validate_client(guc->execbuf_client,
-                             GUC_CLIENT_PRIORITY_KMD_NORMAL, false);
+                             GUC_CLIENT_PRIORITY_KMD_NORMAL);
        if (err) {
                pr_err("execbug client validation failed\n");
                goto out;
        }
 
-       if (guc->preempt_client) {
-               err = validate_client(guc->preempt_client,
-                                     GUC_CLIENT_PRIORITY_KMD_HIGH, true);
-               if (err) {
-                       pr_err("preempt client validation failed\n");
-                       goto out;
-               }
-       }
-
-       /* each client should now have reserved a doorbell */
-       if (!has_doorbell(guc->execbuf_client) ||
-           (guc->preempt_client && !has_doorbell(guc->preempt_client))) {
+       /* the client should now have reserved a doorbell */
+       if (!has_doorbell(guc->execbuf_client)) {
                pr_err("guc_clients_create didn't reserve doorbells\n");
                err = -EINVAL;
                goto out;
@@ -204,8 +191,7 @@ static int igt_guc_clients(void *args)
        guc_clients_enable(guc);
 
        /* each client should now have received a doorbell */
-       if (!client_doorbell_in_sync(guc->execbuf_client) ||
-           !client_doorbell_in_sync(guc->preempt_client)) {
+       if (!client_doorbell_in_sync(guc->execbuf_client)) {
                pr_err("failed to initialize the doorbells\n");
                err = -EINVAL;
                goto out;
@@ -300,8 +286,7 @@ static int igt_guc_doorbells(void *arg)
                        goto out;
                }
 
-               err = validate_client(clients[i],
-                                     i % GUC_CLIENT_PRIORITY_NUM, false);
+               err = validate_client(clients[i], i % GUC_CLIENT_PRIORITY_NUM);
                if (err) {
                        pr_err("[%d] client_alloc sanity check failed!\n", i);
                        err = -EINVAL;