int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
int pages, struct page **pagelist,
dma_addr_t *dma_addr, uint32_t flags);
+int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
/*
* GPU MC structures, functions & helpers
}
if (!r) {
amdgpu_irq_gpu_reset_resume_helper(adev);
+ if (need_full_reset && amdgpu_need_backup(adev)) {
+ r = amdgpu_ttm_recover_gart(adev);
+ if (r)
+ DRM_ERROR("gart recovery failed!!!\n");
+ }
r = amdgpu_ib_ring_tests(adev);
if (r) {
dev_err(adev->dev, "ib ring test failed (%d).\n", r);
return 0;
}
+int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
+{
+ struct amdgpu_ttm_tt *gtt, *tmp;
+ struct ttm_mem_reg bo_mem;
+ uint32_t flags;
+ int r;
+
+ bo_mem.mem_type = TTM_PL_TT;
+ spin_lock(&adev->gtt_list_lock);
+ list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
+ flags = amdgpu_ttm_tt_pte_flags(gtt->adev, >t->ttm.ttm, &bo_mem);
+ r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
+ gtt->ttm.ttm.pages, gtt->ttm.dma_address,
+ flags);
+ if (r) {
+ spin_unlock(&adev->gtt_list_lock);
+ DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
+ gtt->ttm.ttm.num_pages, (unsigned)gtt->offset);
+ return r;
+ }
+ }
+ spin_unlock(&adev->gtt_list_lock);
+ return 0;
+}
+
static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;