drm/amdgpu: provide the page fault queue to the VM code
authorChristian König <christian.koenig@amd.com>
Mon, 25 Mar 2019 15:13:44 +0000 (16:13 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 3 Apr 2019 15:00:30 +0000 (10:00 -0500)
We are going to need that for recoverable page faults.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c

index beac15bca526a6ace5c73f4acb34ef10caff5b81..91baf95212a6beefbf8a23bba57c227961d007e1 100644 (file)
@@ -303,6 +303,7 @@ struct amdgpu_vm_manager {
        const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
        struct drm_sched_rq                     *vm_pte_rqs[AMDGPU_MAX_RINGS];
        unsigned                                vm_pte_num_rqs;
+       struct amdgpu_ring                      *page_fault;
 
        /* partial resident texture handling */
        spinlock_t                              prt_lock;
index 7be5228f6c6f79046728cd9031d6001b9a04048f..e858043a3a0e4beba5b13f84c574723f071eb441 100644 (file)
@@ -2291,6 +2291,7 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
                                &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
                }
                adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances - 1;
+               adev->vm_manager.page_fault = &adev->sdma.instance[0].page;
        } else {
                for (i = 0; i < adev->sdma.num_instances; i++) {
                        sched = &adev->sdma.instance[i].ring.sched;