drm/amdgpu: cleanup GPU recovery check a bit (v2)
authorChristian König <christian.koenig@amd.com>
Tue, 21 Aug 2018 08:45:29 +0000 (10:45 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 27 Aug 2018 16:11:16 +0000 (11:11 -0500)
Check if we should call the function instead of providing the forced
flag.

v2: rebase on KFD changes (Alex)

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c

index 19ef7711d9441589676098c8fe7f58194ac856d4..340e40d03d54a66dbcc005b846655d54677e410f 100644 (file)
@@ -1158,8 +1158,9 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
 #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
 
 /* Common functions */
+bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
-                             struct amdgpu_job* job, bool force);
+                             struct amdgpu_job* job);
 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
 bool amdgpu_device_need_post(struct amdgpu_device *adev);
 
index f8bbbb3a95043ebee9d5d02471b83e98fb923a0c..3dbe675b6fe1a09a8ef926b374c10dd251c18b63 100644 (file)
@@ -267,7 +267,8 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 
-       amdgpu_device_gpu_recover(adev, NULL, false);
+       if (amdgpu_device_should_recover_gpu(adev))
+               amdgpu_device_gpu_recover(adev, NULL);
 }
 
 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
index c961e781430db2cc8515b3c18b8674eb0fc5eb99..8f431740c424223b6b2418c34b66d7cd18977836 100644 (file)
@@ -3243,32 +3243,44 @@ error:
        return r;
 }
 
+/**
+ * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
+ * a hung GPU.
+ */
+bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
+{
+       if (!amdgpu_device_ip_check_soft_reset(adev)) {
+               DRM_INFO("Timeout, but no hardware hang detected.\n");
+               return false;
+       }
+
+       if (amdgpu_gpu_recovery == 0 || (amdgpu_gpu_recovery == -1  &&
+                                        !amdgpu_sriov_vf(adev))) {
+               DRM_INFO("GPU recovery disabled.\n");
+               return false;
+       }
+
+       return true;
+}
+
 /**
  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
  *
  * @adev: amdgpu device pointer
  * @job: which job trigger hang
- * @force: forces reset regardless of amdgpu_gpu_recovery
  *
  * Attempt to reset the GPU if it has hung (all asics).
  * Returns 0 for success or an error on failure.
  */
 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
-                             struct amdgpu_job *job, bool force)
+                             struct amdgpu_job *job)
 {
        int i, r, resched;
 
-       if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
-               DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
-               return 0;
-       }
-
-       if (!force && (amdgpu_gpu_recovery == 0 ||
-                       (amdgpu_gpu_recovery == -1  && !amdgpu_sriov_vf(adev)))) {
-               DRM_INFO("GPU recovery disabled.\n");
-               return 0;
-       }
-
        dev_info(adev->dev, "GPU reset begin!\n");
 
        mutex_lock(&adev->lock_reset);
index 7056925eb38606fcd896ea2525643261f8495575..da36731460b56c93e16f12461b80f6fb7b589e88 100644 (file)
@@ -701,7 +701,7 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
        struct amdgpu_device *adev = dev->dev_private;
 
        seq_printf(m, "gpu recover\n");
-       amdgpu_device_gpu_recover(adev, NULL, true);
+       amdgpu_device_gpu_recover(adev, NULL);
 
        return 0;
 }
index 1abf5b5bac9e547cda0eb93bcaaf07d407e96007..b927e87985344980c359883b6c76f219923092e0 100644 (file)
@@ -105,8 +105,8 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
        struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
                                                  reset_work);
 
-       if (!amdgpu_sriov_vf(adev))
-               amdgpu_device_gpu_recover(adev, NULL, false);
+       if (!amdgpu_sriov_vf(adev) && amdgpu_device_should_recover_gpu(adev))
+               amdgpu_device_gpu_recover(adev, NULL);
 }
 
 /**
index 391e2f7c03aacdfae679057204e02e10e756cb8a..265ff90f4e01a19dd2aa6a915c778255038ea7db 100644 (file)
@@ -37,7 +37,8 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
                  job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
                  ring->fence_drv.sync_seq);
 
-       amdgpu_device_gpu_recover(ring->adev, job, false);
+       if (amdgpu_device_should_recover_gpu(ring->adev))
+               amdgpu_device_gpu_recover(ring->adev, job);
 }
 
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
index 078f70faedcbb1e15dbf8d737090071d10ea403a..8cbb4655896a3318471c909f9cac0116d74ac055 100644 (file)
@@ -266,8 +266,8 @@ flr_done:
        }
 
        /* Trigger recovery for world switch failure if no TDR */
-       if (amdgpu_lockup_timeout == 0)
-               amdgpu_device_gpu_recover(adev, NULL, true);
+       if (amdgpu_device_should_recover_gpu(adev))
+               amdgpu_device_gpu_recover(adev, NULL);
 }
 
 static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
index 9fc1c37344cebeaa468941d15552fe5e15b102e8..842567b53df56d1824b6ba9e696caa436cdfafad 100644 (file)
@@ -521,7 +521,8 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
        }
 
        /* Trigger recovery due to world switch failure */
-       amdgpu_device_gpu_recover(adev, NULL, false);
+       if (amdgpu_device_should_recover_gpu(adev))
+               amdgpu_device_gpu_recover(adev, NULL);
 }
 
 static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,