dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
}
+ if (adev->asic_type == CHIP_NAVI10 && amdgpu_async_gfx_ring) {
+ /* create MQD for each KGQ */
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ if (!ring->mqd_obj) {
+ r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
+ &ring->mqd_gpu_addr, &ring->mqd_ptr);
+ if (r) {
+ dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
+ return r;
+ }
+
+ /* prepare MQD backup */
+ adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
+ if (!adev->gfx.me.mqd_backup[i])
+ dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
+ }
+ }
+ }
+
/* create MQD for each KCQ */
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
- dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
+ dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
return r;
}
struct amdgpu_ring *ring = NULL;
int i;
+ if (adev->asic_type == CHIP_NAVI10 && amdgpu_async_gfx_ring) {
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ kfree(adev->gfx.me.mqd_backup[i]);
+ amdgpu_bo_free_kernel(&ring->mqd_obj,
+ &ring->mqd_gpu_addr,
+ &ring->mqd_ptr);
+ }
+ }
+
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
kfree(adev->gfx.mec.mqd_backup[i]);
}
ring = &adev->gfx.kiq.ring;
+ if (adev->asic_type == CHIP_NAVI10 && amdgpu_async_gfx_ring)
+ kfree(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]);
kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,