drm/amdgpu: create mqd for gfx queues on navi10
authorHawking Zhang <Hawking.Zhang@amd.com>
Tue, 14 Aug 2018 12:54:35 +0000 (20:54 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 21 Jun 2019 02:24:54 +0000 (21:24 -0500)
mqd is the memory queue descriptor for gfx and compute.

Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Jack Xiao <jack.xiao@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c

index cbbf648bc3e8e540e392bdc9b0441c5dc6a55813..61fca1e9a86b4101714fe59b63d509cbb2c524c9 100644 (file)
@@ -389,6 +389,27 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
                                dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
        }
 
+       if (adev->asic_type == CHIP_NAVI10 && amdgpu_async_gfx_ring) {
+               /* create MQD for each KGQ */
+               for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+                       ring = &adev->gfx.gfx_ring[i];
+                       if (!ring->mqd_obj) {
+                               r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
+                                                           AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
+                                                           &ring->mqd_gpu_addr, &ring->mqd_ptr);
+                               if (r) {
+                                       dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
+                                       return r;
+                               }
+
+                               /* prepare MQD backup */
+                               adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
+                               if (!adev->gfx.me.mqd_backup[i])
+                                       dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
+                       }
+               }
+       }
+
        /* create MQD for each KCQ */
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                ring = &adev->gfx.compute_ring[i];
@@ -397,7 +418,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
                                                    AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
                                                    &ring->mqd_gpu_addr, &ring->mqd_ptr);
                        if (r) {
-                               dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
+                               dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
                                return r;
                        }
 
@@ -416,6 +437,16 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
        struct amdgpu_ring *ring = NULL;
        int i;
 
+       if (adev->asic_type == CHIP_NAVI10 && amdgpu_async_gfx_ring) {
+               for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+                       ring = &adev->gfx.gfx_ring[i];
+                       kfree(adev->gfx.me.mqd_backup[i]);
+                       amdgpu_bo_free_kernel(&ring->mqd_obj,
+                                             &ring->mqd_gpu_addr,
+                                             &ring->mqd_ptr);
+               }
+       }
+
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                ring = &adev->gfx.compute_ring[i];
                kfree(adev->gfx.mec.mqd_backup[i]);
@@ -425,6 +456,8 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
        }
 
        ring = &adev->gfx.kiq.ring;
+       if (adev->asic_type == CHIP_NAVI10 && amdgpu_async_gfx_ring)
+               kfree(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]);
        kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
        amdgpu_bo_free_kernel(&ring->mqd_obj,
                              &ring->mqd_gpu_addr,