drm/nouveau/mmu/nv41: implement new vmm backend
authorBen Skeggs <bskeggs@redhat.com>
Tue, 31 Oct 2017 17:56:19 +0000 (03:56 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Thu, 2 Nov 2017 03:32:27 +0000 (13:32 +1000)
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c

index cb037f4a1780b69464e4934f4158a416ebe3f40d..457cf509591b7bcdcf0917d8b81551635f552380 100644 (file)
 #include "vmm.h"
 
 #include <core/option.h>
-#include <subdev/timer.h>
 
 #include <nvif/class.h>
 
 #define NV41_GART_SIZE (512 * 1024 * 1024)
-#define NV41_GART_PAGE (  4 * 1024)
-
-/*******************************************************************************
- * VM map/unmap callbacks
- ******************************************************************************/
-
-static void
-nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
-              struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
-       pte = pte * 4;
-       nvkm_kmap(pgt);
-       while (cnt) {
-               u32 page = PAGE_SIZE / NV41_GART_PAGE;
-               u64 phys = (u64)*list++;
-               while (cnt && page--) {
-                       nvkm_wo32(pgt, pte, (phys >> 7) | 1);
-                       phys += NV41_GART_PAGE;
-                       pte += 4;
-                       cnt -= 1;
-               }
-       }
-       nvkm_done(pgt);
-}
-
-static void
-nv41_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
-{
-       pte = pte * 4;
-       nvkm_kmap(pgt);
-       while (cnt--) {
-               nvkm_wo32(pgt, pte, 0x00000000);
-               pte += 4;
-       }
-       nvkm_done(pgt);
-}
-
-static void
-nv41_vm_flush(struct nvkm_vm *vm)
-{
-       struct nvkm_subdev *subdev = &vm->mmu->subdev;
-       struct nvkm_device *device = subdev->device;
-
-       mutex_lock(&subdev->mutex);
-       nvkm_wr32(device, 0x100810, 0x00000022);
-       nvkm_msec(device, 2000,
-               if (nvkm_rd32(device, 0x100810) & 0x00000020)
-                       break;
-       );
-       nvkm_wr32(device, 0x100810, 0x00000000);
-       mutex_unlock(&subdev->mutex);
-}
-
-/*******************************************************************************
- * MMU subdev
- ******************************************************************************/
-
-static int
-nv41_mmu_oneinit(struct nvkm_mmu *mmu)
-{
-       mmu->vmm->pgt[0].mem[0] = mmu->vmm->pd->pt[0]->memory;
-       mmu->vmm->pgt[0].refcount[0] = 1;
-       return 0;
-}
 
 static void
 nv41_mmu_init(struct nvkm_mmu *mmu)
@@ -105,16 +40,12 @@ nv41_mmu_init(struct nvkm_mmu *mmu)
 
 static const struct nvkm_mmu_func
 nv41_mmu = {
-       .oneinit = nv41_mmu_oneinit,
        .init = nv41_mmu_init,
        .limit = NV41_GART_SIZE,
        .dma_bits = 39,
        .pgt_bits = 32 - 12,
        .spg_shift = 12,
        .lpg_shift = 12,
-       .map_sg = nv41_vm_map_sg,
-       .unmap = nv41_vm_unmap,
-       .flush = nv41_vm_flush,
        .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv41_vmm_new, true },
 };
 
index 8a624d7c363ae9025b616494c8bb052bc8cf1acd..b595f130e57388a649ae8aea7945f4bbf1ddd33b 100644 (file)
  */
 #include "vmm.h"
 
+#include <subdev/timer.h>
+
+static void
+nv41_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+                u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+       u32 data = (addr >> 7) | 0x00000001; /* VALID. */
+       while (ptes--) {
+               VMM_WO032(pt, vmm, ptei++ * 4, data);
+               data += 0x00000020;
+       }
+}
+
+static void
+nv41_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+                u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+       VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte);
+}
+
+static void
+nv41_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+                u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+#if PAGE_SHIFT == 12
+       nvkm_kmap(pt->memory);
+       while (ptes--) {
+               const u32 data = (*map->dma++ >> 7) | 0x00000001;
+               VMM_WO032(pt, vmm, ptei++ * 4, data);
+       }
+       nvkm_done(pt->memory);
+#else
+       VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte);
+#endif
+}
+
+static void
+nv41_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+                  struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+       VMM_FO032(pt, vmm, ptei * 4, 0, ptes);
+}
+
 static const struct nvkm_vmm_desc_func
 nv41_vmm_desc_pgt = {
+       .unmap = nv41_vmm_pgt_unmap,
+       .dma = nv41_vmm_pgt_dma,
+       .sgl = nv41_vmm_pgt_sgl,
 };
 
 static const struct nvkm_vmm_desc
@@ -31,8 +77,26 @@ nv41_vmm_desc_12[] = {
        {}
 };
 
+static void
+nv41_vmm_flush(struct nvkm_vmm *vmm, int level)
+{
+       struct nvkm_subdev *subdev = &vmm->mmu->subdev;
+       struct nvkm_device *device = subdev->device;
+
+       mutex_lock(&subdev->mutex);
+       nvkm_wr32(device, 0x100810, 0x00000022);
+       nvkm_msec(device, 2000,
+               if (nvkm_rd32(device, 0x100810) & 0x00000020)
+                       break;
+       );
+       nvkm_wr32(device, 0x100810, 0x00000000);
+       mutex_unlock(&subdev->mutex);
+}
+
 static const struct nvkm_vmm_func
 nv41_vmm = {
+       .valid = nv04_vmm_valid,
+       .flush = nv41_vmm_flush,
        .page = {
                { 12, &nv41_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
                {}