drm/i915: Move intel_engine_mask_t around for use by i915_request_types.h
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 1 Apr 2019 16:26:39 +0000 (17:26 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 2 Apr 2019 14:09:08 +0000 (15:09 +0100)
We want to use intel_engine_mask_t inside i915_request.h, which means
extracting it from the general header file mess and placing it inside a
types.h. A knock on effect is that the compiler wants to warn about
type-contraction of ALL_ENGINES into intel_engine_maskt_t, so prepare
for the worst.

v2: Use intel_engine_mask_t consistently
v3: Move I915_NUM_ENGINES to its natural home at the end of the enum

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: John Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190401162641.10963-1-chris@chris-wilson.co.uk
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
29 files changed:
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/execlist.h
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.h
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_context.h
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_gpu_error.h
drivers/gpu/drm/i915/i915_reset.c
drivers/gpu/drm/i915/i915_reset.h
drivers/gpu/drm/i915/i915_scheduler.h
drivers/gpu/drm/i915/i915_scheduler_types.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_timeline.h
drivers/gpu/drm/i915/i915_timeline_types.h
drivers/gpu/drm/i915/intel_device_info.h
drivers/gpu/drm/i915/intel_engine_types.h
drivers/gpu/drm/i915/intel_guc_submission.h
drivers/gpu/drm/i915/intel_hangcheck.c
drivers/gpu/drm/i915/selftests/i915_gem_context.c
drivers/gpu/drm/i915/selftests/intel_hangcheck.c
drivers/gpu/drm/i915/test_i915_scheduler_types_standalone.c [new file with mode: 0644]

index 60de05f3fa6084626a3668ebdb82de9afb722e29..1f3e8b145fc0a1227921a9dd96a618a3a15c34b1 100644 (file)
@@ -61,6 +61,7 @@ i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
 i915-$(CONFIG_DRM_I915_WERROR) += \
        test_i915_active_types_standalone.o \
        test_i915_gem_context_types_standalone.o \
+       test_i915_scheduler_types_standalone.o \
        test_i915_timeline_types_standalone.o \
        test_intel_context_types_standalone.o \
        test_intel_engine_types_standalone.o \
index 1a93472cb34eff6170643a1aa76cc33943661b5f..f21b8fb5b37e11a4cfb071d3adff9fe19245422f 100644 (file)
@@ -526,12 +526,13 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
        vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
 }
 
-static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
+static void clean_execlist(struct intel_vgpu *vgpu,
+                          intel_engine_mask_t engine_mask)
 {
-       unsigned int tmp;
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        struct intel_engine_cs *engine;
        struct intel_vgpu_submission *s = &vgpu->submission;
+       intel_engine_mask_t tmp;
 
        for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
                kfree(s->ring_scan_buffer[engine->id]);
@@ -541,18 +542,18 @@ static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
 }
 
 static void reset_execlist(struct intel_vgpu *vgpu,
-               unsigned long engine_mask)
+                          intel_engine_mask_t engine_mask)
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        struct intel_engine_cs *engine;
-       unsigned int tmp;
+       intel_engine_mask_t tmp;
 
        for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
                init_vgpu_execlist(vgpu, engine->id);
 }
 
 static int init_execlist(struct intel_vgpu *vgpu,
-                        unsigned long engine_mask)
+                        intel_engine_mask_t engine_mask)
 {
        reset_execlist(vgpu, engine_mask);
        return 0;
index 714d709829a2a11ab4a83c45c1a3e6a3ecaca976..5ccc2c69584838ecb93960857f1c851589b13c87 100644 (file)
@@ -180,6 +180,6 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
 int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
 
 void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
-               unsigned long engine_mask);
+                              intel_engine_mask_t engine_mask);
 
 #endif /*_GVT_EXECLIST_H_*/
index 8bce09de4b822354a68c57c10caef5fc01266757..7a4e1a6387e58641378b9a7a334bdc190df4d58f 100644 (file)
@@ -144,9 +144,9 @@ enum {
 
 struct intel_vgpu_submission_ops {
        const char *name;
-       int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
-       void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
-       void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
+       int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
+       void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
+       void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
 };
 
 struct intel_vgpu_submission {
@@ -488,7 +488,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
 void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
-                                unsigned int engine_mask);
+                                intel_engine_mask_t engine_mask);
 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
index dbc74961792261492c9245cb37bcf079b3a575a5..86761b1def1e1c10fe47491d7a5e8c863d4fe5b0 100644 (file)
@@ -311,7 +311,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
 static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                            void *p_data, unsigned int bytes)
 {
-       unsigned int engine_mask = 0;
+       intel_engine_mask_t engine_mask = 0;
        u32 data;
 
        write_vreg(vgpu, offset, p_data, bytes);
index 3faf2438b9bcf6ad9f05849ffb4f8bd9e316f96e..b385edbeaa30a30bd673f310d0db85b222fca9e1 100644 (file)
@@ -838,13 +838,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 }
 
 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
-                               unsigned long engine_mask)
+                               intel_engine_mask_t engine_mask)
 {
        struct intel_vgpu_submission *s = &vgpu->submission;
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        struct intel_engine_cs *engine;
        struct intel_vgpu_workload *pos, *n;
-       unsigned int tmp;
+       intel_engine_mask_t tmp;
 
        /* free the unsubmited workloads in the queues. */
        for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
@@ -1137,7 +1137,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
  *
  */
 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
-               unsigned long engine_mask)
+                                intel_engine_mask_t engine_mask)
 {
        struct intel_vgpu_submission *s = &vgpu->submission;
 
@@ -1227,7 +1227,7 @@ out_shadow_ctx:
  *
  */
 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
-                                    unsigned long engine_mask,
+                                    intel_engine_mask_t engine_mask,
                                     unsigned int interface)
 {
        struct intel_vgpu_submission *s = &vgpu->submission;
index 0635b2c4bed77a757275e51f126169cb5e781edf..90c6756f54537382d666ba0356ca3c30f915baf8 100644 (file)
@@ -142,12 +142,12 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
 
 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
-                                unsigned long engine_mask);
+                                intel_engine_mask_t engine_mask);
 
 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
 
 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
-                                    unsigned long engine_mask,
+                                    intel_engine_mask_t engine_mask,
                                     unsigned int interface);
 
 extern const struct intel_vgpu_submission_ops
@@ -160,6 +160,6 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
 
 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
-                               unsigned long engine_mask);
+                               intel_engine_mask_t engine_mask);
 
 #endif
index 314e40121e47e0d770fc0a6039bd8bcf4947baae..44ce3c2b9ac13a586075366f15661b13559ca7d9 100644 (file)
@@ -526,11 +526,11 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
  * GPU engines. For FLR, engine_mask is ignored.
  */
 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
-                                unsigned int engine_mask)
+                                intel_engine_mask_t engine_mask)
 {
        struct intel_gvt *gvt = vgpu->gvt;
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-       unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
+       intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
 
        gvt_dbg_core("------------------------------------------\n");
        gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
index 3aef121067e451d17b9e88d2c7664a71c1086955..4dd2d9ae320270740ff901d29e514cf64124bc8b 100644 (file)
@@ -2245,7 +2245,7 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
        const struct intel_guc *guc = &dev_priv->guc;
        struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
        struct intel_guc_client *client = guc->execbuf_client;
-       unsigned int tmp;
+       intel_engine_mask_t tmp;
        int index;
 
        if (!USES_GUC_SUBMISSION(dev_priv))
index 5a94c7430e625deb62788086fe6867c2794d8980..0ab4826921f7e0db7976e1305c735a95f1635152 100644 (file)
@@ -2505,7 +2505,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define IS_GEN9_LP(dev_priv)   (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
 #define IS_GEN9_BC(dev_priv)   (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
 
-#define ALL_ENGINES    (~0u)
 #define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
 
 #define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({               \
index 5c073fe736640bf0902874628cb3072b2a277d19..9074eb1e843f20bcfc3e80958de792ace320a2b6 100644 (file)
@@ -73,8 +73,6 @@ struct drm_i915_private;
 #define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
 #endif
 
-#define I915_NUM_ENGINES 8
-
 #define I915_GEM_IDLE_TIMEOUT (HZ / 5)
 
 void i915_gem_park(struct drm_i915_private *i915);
index 141da4e71e4632e36d303cb5299cd067a92b1c4e..fe7ddb1f59e111311f258aaf289dbc002bad5fa0 100644 (file)
@@ -858,9 +858,9 @@ static void cb_retire(struct i915_active *base)
        kfree(cb);
 }
 
-I915_SELFTEST_DECLARE(static unsigned long context_barrier_inject_fault);
+I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
 static int context_barrier_task(struct i915_gem_context *ctx,
-                               unsigned long engines,
+                               intel_engine_mask_t engines,
                                int (*emit)(struct i915_request *rq, void *data),
                                void (*task)(void *data),
                                void *data)
@@ -922,7 +922,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
 }
 
 int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
-                                     unsigned long mask)
+                                     intel_engine_mask_t mask)
 {
        struct intel_engine_cs *engine;
 
index edc6ba3f028885e5f123c6968a45533d5143ff0b..23dcb01bfd82f50b3f02992b0cd6001e7ecca019 100644 (file)
@@ -142,7 +142,7 @@ void i915_gem_context_close(struct drm_file *file);
 
 int i915_switch_context(struct i915_request *rq);
 int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
-                                     unsigned long engine_mask);
+                                     intel_engine_mask_t engine_mask);
 
 void i915_gem_context_release(struct kref *ctx_ref);
 struct i915_gem_context *
index 83ded9fc761aac5f9b266f81fef011d783bf3132..f597f35b109be26f15dee6cae9c1ae97f9db725b 100644 (file)
@@ -390,7 +390,7 @@ struct i915_hw_ppgtt {
        struct i915_address_space vm;
        struct kref ref;
 
-       unsigned long pd_dirty_engines;
+       intel_engine_mask_t pd_dirty_engines;
        union {
                struct i915_pml4 pml4;          /* GEN8+ & 48b PPGTT */
                struct i915_page_directory_pointer pdp; /* GEN8+ */
index 81a27b80827367f67b823e2277f61cf4e212283b..c65d45bc63ee6b9334fdc4fd0a1bef5065aea4e4 100644 (file)
@@ -1096,7 +1096,7 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
  * It's only a small step better than a random number in its current form.
  */
 static u32 i915_error_generate_code(struct i915_gpu_state *error,
-                                   unsigned long engine_mask)
+                                   intel_engine_mask_t engine_mask)
 {
        /*
         * IPEHR would be an ideal way to detect errors, as it's the gross
@@ -1641,7 +1641,8 @@ static void capture_reg_state(struct i915_gpu_state *error)
 }
 
 static const char *
-error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
+error_msg(struct i915_gpu_state *error,
+         intel_engine_mask_t engines, const char *msg)
 {
        int len;
        int i;
@@ -1651,7 +1652,7 @@ error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
                        engines &= ~BIT(i);
 
        len = scnprintf(error->error_msg, sizeof(error->error_msg),
-                       "GPU HANG: ecode %d:%lx:0x%08x",
+                       "GPU HANG: ecode %d:%x:0x%08x",
                        INTEL_GEN(error->i915), engines,
                        i915_error_generate_code(error, engines));
        if (engines) {
@@ -1790,7 +1791,7 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
  * to pick up.
  */
 void i915_capture_error_state(struct drm_i915_private *i915,
-                             unsigned long engine_mask,
+                             intel_engine_mask_t engine_mask,
                              const char *msg)
 {
        static bool warned;
index 302a14240b456fd42363dcc6beb80c97b591f455..5dc761e85d9dfce9117df1feedcd96c93f1deb57 100644 (file)
@@ -263,7 +263,7 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
 
 struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
 void i915_capture_error_state(struct drm_i915_private *dev_priv,
-                             unsigned long engine_mask,
+                             intel_engine_mask_t engine_mask,
                              const char *error_msg);
 
 static inline struct i915_gpu_state *
index 2f25ed702ba0455beff99c132e675783d9980170..ddc403ee885519550667402941b733850455b9d3 100644 (file)
@@ -144,15 +144,15 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
 }
 
 static void i915_stop_engines(struct drm_i915_private *i915,
-                             unsigned int engine_mask)
+                             intel_engine_mask_t engine_mask)
 {
        struct intel_engine_cs *engine;
-       enum intel_engine_id id;
+       intel_engine_mask_t tmp;
 
        if (INTEL_GEN(i915) < 3)
                return;
 
-       for_each_engine_masked(engine, i915, engine_mask, id)
+       for_each_engine_masked(engine, i915, engine_mask, tmp)
                gen3_stop_engine(engine);
 }
 
@@ -165,7 +165,7 @@ static bool i915_in_reset(struct pci_dev *pdev)
 }
 
 static int i915_do_reset(struct drm_i915_private *i915,
-                        unsigned int engine_mask,
+                        intel_engine_mask_t engine_mask,
                         unsigned int retry)
 {
        struct pci_dev *pdev = i915->drm.pdev;
@@ -194,7 +194,7 @@ static bool g4x_reset_complete(struct pci_dev *pdev)
 }
 
 static int g33_do_reset(struct drm_i915_private *i915,
-                       unsigned int engine_mask,
+                       intel_engine_mask_t engine_mask,
                        unsigned int retry)
 {
        struct pci_dev *pdev = i915->drm.pdev;
@@ -204,7 +204,7 @@ static int g33_do_reset(struct drm_i915_private *i915,
 }
 
 static int g4x_do_reset(struct drm_i915_private *dev_priv,
-                       unsigned int engine_mask,
+                       intel_engine_mask_t engine_mask,
                        unsigned int retry)
 {
        struct pci_dev *pdev = dev_priv->drm.pdev;
@@ -242,7 +242,7 @@ out:
 }
 
 static int ironlake_do_reset(struct drm_i915_private *dev_priv,
-                            unsigned int engine_mask,
+                            intel_engine_mask_t engine_mask,
                             unsigned int retry)
 {
        struct intel_uncore *uncore = &dev_priv->uncore;
@@ -303,7 +303,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
 }
 
 static int gen6_reset_engines(struct drm_i915_private *i915,
-                             unsigned int engine_mask,
+                             intel_engine_mask_t engine_mask,
                              unsigned int retry)
 {
        struct intel_engine_cs *engine;
@@ -319,7 +319,7 @@ static int gen6_reset_engines(struct drm_i915_private *i915,
        if (engine_mask == ALL_ENGINES) {
                hw_mask = GEN6_GRDOM_FULL;
        } else {
-               unsigned int tmp;
+               intel_engine_mask_t tmp;
 
                hw_mask = 0;
                for_each_engine_masked(engine, i915, engine_mask, tmp) {
@@ -429,7 +429,7 @@ static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
 }
 
 static int gen11_reset_engines(struct drm_i915_private *i915,
-                              unsigned int engine_mask,
+                              intel_engine_mask_t engine_mask,
                               unsigned int retry)
 {
        const u32 hw_engine_mask[] = {
@@ -443,7 +443,7 @@ static int gen11_reset_engines(struct drm_i915_private *i915,
                [VECS1] = GEN11_GRDOM_VECS2,
        };
        struct intel_engine_cs *engine;
-       unsigned int tmp;
+       intel_engine_mask_t tmp;
        u32 hw_mask;
        int ret;
 
@@ -496,12 +496,12 @@ static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
 }
 
 static int gen8_reset_engines(struct drm_i915_private *i915,
-                             unsigned int engine_mask,
+                             intel_engine_mask_t engine_mask,
                              unsigned int retry)
 {
        struct intel_engine_cs *engine;
        const bool reset_non_ready = retry >= 1;
-       unsigned int tmp;
+       intel_engine_mask_t tmp;
        int ret;
 
        for_each_engine_masked(engine, i915, engine_mask, tmp) {
@@ -537,7 +537,7 @@ skip_reset:
 }
 
 typedef int (*reset_func)(struct drm_i915_private *,
-                         unsigned int engine_mask,
+                         intel_engine_mask_t engine_mask,
                          unsigned int retry);
 
 static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
@@ -558,7 +558,8 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
                return NULL;
 }
 
-int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
+int intel_gpu_reset(struct drm_i915_private *i915,
+                   intel_engine_mask_t engine_mask)
 {
        const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
        reset_func reset;
@@ -692,7 +693,8 @@ static void gt_revoke(struct drm_i915_private *i915)
        revoke_mmaps(i915);
 }
 
-static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
+static int gt_reset(struct drm_i915_private *i915,
+                   intel_engine_mask_t stalled_mask)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
@@ -951,7 +953,8 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
        return result;
 }
 
-static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
+static int do_reset(struct drm_i915_private *i915,
+                   intel_engine_mask_t stalled_mask)
 {
        int err, i;
 
@@ -986,7 +989,7 @@ static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
  *   - re-init display
  */
 void i915_reset(struct drm_i915_private *i915,
-               unsigned int stalled_mask,
+               intel_engine_mask_t stalled_mask,
                const char *reason)
 {
        struct i915_gpu_error *error = &i915->gpu_error;
@@ -1233,14 +1236,14 @@ void i915_clear_error_registers(struct drm_i915_private *dev_priv)
  * of a ring dump etc.).
  */
 void i915_handle_error(struct drm_i915_private *i915,
-                      u32 engine_mask,
+                      intel_engine_mask_t engine_mask,
                       unsigned long flags,
                       const char *fmt, ...)
 {
        struct i915_gpu_error *error = &i915->gpu_error;
        struct intel_engine_cs *engine;
        intel_wakeref_t wakeref;
-       unsigned int tmp;
+       intel_engine_mask_t tmp;
        char error_msg[80];
        char *msg = NULL;
 
index 16f2389f656f39a32c7581a444bb13b370a214cb..86b1ac8116ce58e7a4594e7d1366353e4c1bce94 100644 (file)
 #include <linux/types.h>
 #include <linux/srcu.h>
 
+#include "intel_engine_types.h"
+
 struct drm_i915_private;
 struct intel_engine_cs;
 struct intel_guc;
 
 __printf(4, 5)
 void i915_handle_error(struct drm_i915_private *i915,
-                      u32 engine_mask,
+                      intel_engine_mask_t engine_mask,
                       unsigned long flags,
                       const char *fmt, ...);
 #define I915_ERROR_CAPTURE BIT(0)
@@ -25,7 +27,7 @@ void i915_handle_error(struct drm_i915_private *i915,
 void i915_clear_error_registers(struct drm_i915_private *i915);
 
 void i915_reset(struct drm_i915_private *i915,
-               unsigned int stalled_mask,
+               intel_engine_mask_t stalled_mask,
                const char *reason);
 int i915_reset_engine(struct intel_engine_cs *engine,
                      const char *reason);
@@ -41,7 +43,8 @@ int i915_terminally_wedged(struct drm_i915_private *i915);
 bool intel_has_gpu_reset(struct drm_i915_private *i915);
 bool intel_has_reset_engine(struct drm_i915_private *i915);
 
-int intel_gpu_reset(struct drm_i915_private *i915, u32 engine_mask);
+int intel_gpu_reset(struct drm_i915_private *i915,
+                   intel_engine_mask_t engine_mask);
 
 int intel_reset_guc(struct drm_i915_private *i915);
 
index 9a1d257f3d6e59e2743036678ae29ab00f16a385..07d243acf553b2bc9f018be31d8072e5929c9a2b 100644 (file)
@@ -8,92 +8,10 @@
 #define _I915_SCHEDULER_H_
 
 #include <linux/bitops.h>
+#include <linux/list.h>
 #include <linux/kernel.h>
 
-#include <uapi/drm/i915_drm.h>
-
-struct drm_i915_private;
-struct i915_request;
-struct intel_engine_cs;
-
-enum {
-       I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
-       I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
-       I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
-
-       I915_PRIORITY_INVALID = INT_MIN
-};
-
-#define I915_USER_PRIORITY_SHIFT 3
-#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
-
-#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
-#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
-
-#define I915_PRIORITY_WAIT             ((u8)BIT(0))
-#define I915_PRIORITY_NEWCLIENT                ((u8)BIT(1))
-#define I915_PRIORITY_NOSEMAPHORE      ((u8)BIT(2))
-
-#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
-
-struct i915_sched_attr {
-       /**
-        * @priority: execution and service priority
-        *
-        * All clients are equal, but some are more equal than others!
-        *
-        * Requests from a context with a greater (more positive) value of
-        * @priority will be executed before those with a lower @priority
-        * value, forming a simple QoS.
-        *
-        * The &drm_i915_private.kernel_context is assigned the lowest priority.
-        */
-       int priority;
-};
-
-/*
- * "People assume that time is a strict progression of cause to effect, but
- * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
- * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
- *
- * Requests exist in a complex web of interdependencies. Each request
- * has to wait for some other request to complete before it is ready to be run
- * (e.g. we have to wait until the pixels have been rendering into a texture
- * before we can copy from it). We track the readiness of a request in terms
- * of fences, but we also need to keep the dependency tree for the lifetime
- * of the request (beyond the life of an individual fence). We use the tree
- * at various points to reorder the requests whilst keeping the requests
- * in order with respect to their various dependencies.
- *
- * There is no active component to the "scheduler". As we know the dependency
- * DAG of each request, we are able to insert it into a sorted queue when it
- * is ready, and are able to reorder its portion of the graph to accommodate
- * dynamic priority changes.
- */
-struct i915_sched_node {
-       struct list_head signalers_list; /* those before us, we depend upon */
-       struct list_head waiters_list; /* those after us, they depend upon us */
-       struct list_head link;
-       struct i915_sched_attr attr;
-       unsigned int flags;
-#define I915_SCHED_HAS_SEMAPHORE       BIT(0)
-};
-
-struct i915_dependency {
-       struct i915_sched_node *signaler;
-       struct list_head signal_link;
-       struct list_head wait_link;
-       struct list_head dfs_link;
-       unsigned long flags;
-#define I915_DEPENDENCY_ALLOC BIT(0)
-};
-
-struct i915_priolist {
-       struct list_head requests[I915_PRIORITY_COUNT];
-       struct rb_node node;
-       unsigned long used;
-       int priority;
-};
+#include "i915_scheduler_types.h"
 
 #define priolist_for_each_request(it, plist, idx) \
        for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
new file mode 100644 (file)
index 0000000..5c94b3e
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_SCHEDULER_TYPES_H_
+#define _I915_SCHEDULER_TYPES_H_
+
+#include <linux/list.h>
+#include <linux/rbtree.h>
+
+#include <uapi/drm/i915_drm.h>
+
+struct drm_i915_private;
+struct i915_request;
+struct intel_engine_cs;
+
+enum {
+       I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
+       I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
+       I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
+
+       I915_PRIORITY_INVALID = INT_MIN
+};
+
+#define I915_USER_PRIORITY_SHIFT 3
+#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
+
+#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
+#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
+
+#define I915_PRIORITY_WAIT             ((u8)BIT(0))
+#define I915_PRIORITY_NEWCLIENT                ((u8)BIT(1))
+#define I915_PRIORITY_NOSEMAPHORE      ((u8)BIT(2))
+
+#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
+
+struct i915_sched_attr {
+       /**
+        * @priority: execution and service priority
+        *
+        * All clients are equal, but some are more equal than others!
+        *
+        * Requests from a context with a greater (more positive) value of
+        * @priority will be executed before those with a lower @priority
+        * value, forming a simple QoS.
+        *
+        * The &drm_i915_private.kernel_context is assigned the lowest priority.
+        */
+       int priority;
+};
+
+/*
+ * "People assume that time is a strict progression of cause to effect, but
+ * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
+ * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
+ *
+ * Requests exist in a complex web of interdependencies. Each request
+ * has to wait for some other request to complete before it is ready to be run
+ * (e.g. we have to wait until the pixels have been rendering into a texture
+ * before we can copy from it). We track the readiness of a request in terms
+ * of fences, but we also need to keep the dependency tree for the lifetime
+ * of the request (beyond the life of an individual fence). We use the tree
+ * at various points to reorder the requests whilst keeping the requests
+ * in order with respect to their various dependencies.
+ *
+ * There is no active component to the "scheduler". As we know the dependency
+ * DAG of each request, we are able to insert it into a sorted queue when it
+ * is ready, and are able to reorder its portion of the graph to accommodate
+ * dynamic priority changes.
+ */
+struct i915_sched_node {
+       struct list_head signalers_list; /* those before us, we depend upon */
+       struct list_head waiters_list; /* those after us, they depend upon us */
+       struct list_head link;
+       struct i915_sched_attr attr;
+       unsigned int flags;
+#define I915_SCHED_HAS_SEMAPHORE       BIT(0)
+};
+
+struct i915_dependency {
+       struct i915_sched_node *signaler;
+       struct list_head signal_link;
+       struct list_head wait_link;
+       struct list_head dfs_link;
+       unsigned long flags;
+#define I915_DEPENDENCY_ALLOC BIT(0)
+};
+
+struct i915_priolist {
+       struct list_head requests[I915_PRIORITY_COUNT];
+       struct rb_node node;
+       unsigned long used;
+       int priority;
+};
+
+#endif /* _I915_SCHEDULER_TYPES_H_ */
index c1e47a423d85010d932ad774655dd05933533a14..4ca7f80bdf6d661b1b4b88dc59a82d9253b6c16a 100644 (file)
@@ -27,6 +27,7 @@
 
 #include <linux/lockdep.h>
 
+#include "i915_active.h"
 #include "i915_syncmap.h"
 #include "i915_timeline_types.h"
 
index 12ba3c573aa0b5c8d5ec774cc53fe8c9f41b698c..1f5b55d9ffb5439cd3af23e2ea05a0e09c622638 100644 (file)
@@ -9,9 +9,10 @@
 
 #include <linux/list.h>
 #include <linux/kref.h>
+#include <linux/mutex.h>
 #include <linux/types.h>
 
-#include "i915_active.h"
+#include "i915_active_types.h"
 
 struct drm_i915_private;
 struct i915_vma;
index 616e9f707877a886ad4db837a449b079fd93504f..0e579f158016979c5a88e69eab4a92f6ae068925 100644 (file)
@@ -27,6 +27,7 @@
 
 #include <uapi/drm/i915_drm.h>
 
+#include "intel_engine_types.h"
 #include "intel_display.h"
 
 struct drm_printer;
@@ -165,8 +166,6 @@ struct sseu_dev_info {
        u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES];
 };
 
-typedef u8 intel_engine_mask_t;
-
 struct intel_device_info {
        u16 gen_mask;
 
index b3249bf6a65ff6a007cbfb1ee4d5a66977082b16..232e37c1f312974965885ae4f08c457003cf177c 100644 (file)
 #include <linux/list.h>
 #include <linux/types.h>
 
+#include "i915_gem.h"
+#include "i915_scheduler_types.h"
+#include "i915_selftest.h"
 #include "i915_timeline_types.h"
-#include "intel_device_info.h"
 #include "intel_workarounds_types.h"
 
 #include "i915_gem_batch_pool.h"
 
 #define I915_CMD_HASH_ORDER 9
 
+struct dma_fence;
 struct drm_i915_reg_table;
 struct i915_gem_context;
 struct i915_request;
 struct i915_sched_attr;
 struct intel_uncore;
 
+typedef u8 intel_engine_mask_t;
+#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
+
 struct intel_hw_status_page {
        struct i915_vma *vma;
        u32 *addr;
@@ -105,8 +111,9 @@ enum intel_engine_id {
        VCS3,
 #define _VCS(n) (VCS0 + (n))
        VECS0,
-       VECS1
+       VECS1,
 #define _VECS(n) (VECS0 + (n))
+       I915_NUM_ENGINES
 };
 
 struct st_preempt_hang {
index 169c54568340e51da5cf9cd95c20a595fbb14f4f..aa5e6749c925e83f45e26d949502bf9bd05fbec7 100644 (file)
@@ -29,6 +29,7 @@
 
 #include "i915_gem.h"
 #include "i915_selftest.h"
+#include "intel_engine_types.h"
 
 struct drm_i915_private;
 
index 59232df11ada7d5f874183e2369f74c46466fbcb..3d51ed1428d4ef8447acdccfe10024cbef6e4481 100644 (file)
@@ -221,8 +221,8 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915,
                                   unsigned int stuck)
 {
        struct intel_engine_cs *engine;
+       intel_engine_mask_t tmp;
        char msg[80];
-       unsigned int tmp;
        int len;
 
        /* If some rings hung but others were still busy, only
index 45f73b8b4e6d13cdf9f0e7d85f61555e4ef8c657..4e1b6efc6b22e9156cec81109689e851ee6b7af1 100644 (file)
@@ -1594,10 +1594,10 @@ out_unlock:
 }
 
 static __maybe_unused const char *
-__engine_name(struct drm_i915_private *i915, unsigned int engines)
+__engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
 {
        struct intel_engine_cs *engine;
-       unsigned int tmp;
+       intel_engine_mask_t tmp;
 
        if (engines == ALL_ENGINES)
                return "all";
@@ -1610,10 +1610,10 @@ __engine_name(struct drm_i915_private *i915, unsigned int engines)
 
 static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
                                          struct i915_gem_context *ctx,
-                                         unsigned int engines)
+                                         intel_engine_mask_t engines)
 {
        struct intel_engine_cs *engine;
-       unsigned int tmp;
+       intel_engine_mask_t tmp;
        int pass;
 
        GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
index 76b4fa150f2e4e059bbbe9ed33ecbe18254fe699..050bd1e19e02ee772d93438d325ef14ca8b9cd25 100644 (file)
@@ -1124,7 +1124,8 @@ static int igt_reset_engines(void *arg)
        return 0;
 }
 
-static u32 fake_hangcheck(struct drm_i915_private *i915, u32 mask)
+static u32 fake_hangcheck(struct drm_i915_private *i915,
+                         intel_engine_mask_t mask)
 {
        u32 count = i915_reset_count(&i915->gpu_error);
 
diff --git a/drivers/gpu/drm/i915/test_i915_scheduler_types_standalone.c b/drivers/gpu/drm/i915/test_i915_scheduler_types_standalone.c
new file mode 100644 (file)
index 0000000..8afa2c3
--- /dev/null
@@ -0,0 +1,7 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_scheduler_types.h"