habanalabs: rename restore to ctx_switch when appropriate
authorOded Gabbay <oded.gabbay@gmail.com>
Thu, 25 Apr 2019 17:15:42 +0000 (20:15 +0300)
committerOded Gabbay <oded.gabbay@gmail.com>
Thu, 25 Apr 2019 17:15:42 +0000 (20:15 +0300)
This patch only does renaming of certain variables and structure members,
and their accompanied comments.

This is done to better reflect the actions these variables and members
represent.

There is no functional change in this patch.

Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
drivers/misc/habanalabs/command_submission.c
drivers/misc/habanalabs/context.c
drivers/misc/habanalabs/device.c
drivers/misc/habanalabs/habanalabs.h

index 02c48da0b64523f1181f2a87d7c24b721ce3bb0b..c4ab694b51b5818d76e9eb66f57bad044cca44dd 100644 (file)
@@ -601,7 +601,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
        void __user *chunks;
        u32 num_chunks;
        u64 cs_seq = ULONG_MAX;
-       int rc, do_restore;
+       int rc, do_ctx_switch;
        bool need_soft_reset = false;
 
        if (hl_device_disabled_or_in_reset(hdev)) {
@@ -612,9 +612,9 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
                goto out;
        }
 
-       do_restore = atomic_cmpxchg(&ctx->thread_restore_token, 1, 0);
+       do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
 
-       if (do_restore || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
+       if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
                long ret;
 
                chunks = (void __user *)(uintptr_t)args->in.chunks_restore;
@@ -622,7 +622,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
 
                mutex_lock(&hpriv->restore_phase_mutex);
 
-               if (do_restore) {
+               if (do_ctx_switch) {
                        rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
                        if (rc) {
                                dev_err_ratelimited(hdev->dev,
@@ -678,18 +678,18 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
                        }
                }
 
-               ctx->thread_restore_wait_token = 1;
-       } else if (!ctx->thread_restore_wait_token) {
+               ctx->thread_ctx_switch_wait_token = 1;
+       } else if (!ctx->thread_ctx_switch_wait_token) {
                u32 tmp;
 
                rc = hl_poll_timeout_memory(hdev,
-                       (u64) (uintptr_t) &ctx->thread_restore_wait_token,
+                       (u64) (uintptr_t) &ctx->thread_ctx_switch_wait_token,
                        jiffies_to_usecs(hdev->timeout_jiffies),
                        &tmp);
 
                if (rc || !tmp) {
                        dev_err(hdev->dev,
-                               "restore phase hasn't finished in time\n");
+                               "context switch phase didn't finish in time\n");
                        rc = -ETIMEDOUT;
                        goto out;
                }
index 619ace1c4ef74f5507d8bd909af7493bc2c79fa7..4804cdcf4c482767a294f734a7a702f7a1d52984 100644 (file)
@@ -106,8 +106,8 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
 
        ctx->cs_sequence = 1;
        spin_lock_init(&ctx->cs_lock);
-       atomic_set(&ctx->thread_restore_token, 1);
-       ctx->thread_restore_wait_token = 0;
+       atomic_set(&ctx->thread_ctx_switch_token, 1);
+       ctx->thread_ctx_switch_wait_token = 0;
 
        if (is_kernel_ctx) {
                ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */
index a88f7be23c7fd0f6a194c5edfb1fdb26b68dad53..0e0b9ec71c8027119b95f2f70497db37b2bef040 100644 (file)
@@ -710,10 +710,10 @@ again:
        for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
                hl_cq_reset(hdev, &hdev->completion_queue[i]);
 
-       /* Make sure the setup phase for the user context will run again */
+       /* Make sure the context switch phase will run again */
        if (hdev->user_ctx) {
-               atomic_set(&hdev->user_ctx->thread_restore_token, 1);
-               hdev->user_ctx->thread_restore_wait_token = 0;
+               atomic_set(&hdev->user_ctx->thread_ctx_switch_token, 1);
+               hdev->user_ctx->thread_ctx_switch_wait_token = 0;
        }
 
        /* Finished tear-down, starting to re-initialize */
index e8bbaf0f26c1ade7e885e91a8d8735a674ba418c..a624d1e1e1e508100e9aed1da04126ce1539f8a1 100644 (file)
@@ -615,12 +615,13 @@ struct hl_va_range {
  *                     DRAM mapping.
  * @cs_lock: spinlock to protect cs_sequence.
  * @dram_phys_mem: amount of used physical DRAM memory by this context.
- * @thread_restore_token: token to prevent multiple threads of the same context
- *                             from running the restore phase. Only one thread
- *                             should run it.
- * @thread_restore_wait_token: token to prevent the threads that didn't run
- *                             the restore phase from moving to their execution
- *                             phase before the restore phase has finished.
+ * @thread_ctx_switch_token: token to prevent multiple threads of the same
+ *                             context from running the context switch phase.
+ *                             Only a single thread should run it.
+ * @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
+ *                             the context switch phase from moving to their
+ *                             execution phase before the context switch phase
+ *                             has finished.
  * @asid: context's unique address space ID in the device's MMU.
  */
 struct hl_ctx {
@@ -640,8 +641,8 @@ struct hl_ctx {
        u64                     *dram_default_hops;
        spinlock_t              cs_lock;
        atomic64_t              dram_phys_mem;
-       atomic_t                thread_restore_token;
-       u32                     thread_restore_wait_token;
+       atomic_t                thread_ctx_switch_token;
+       u32                     thread_ctx_switch_wait_token;
        u32                     asid;
 };