drm/nouveau/fifo/gv100: allocate method buffer
authorBen Skeggs <bskeggs@redhat.com>
Tue, 11 Dec 2018 04:50:02 +0000 (14:50 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Tue, 11 Dec 2018 05:37:49 +0000 (15:37 +1000)
The GPU saves off some stuff to the address specified in this part of RAMFC
when the channel faults, so we should probably point it at a valid address.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c

index 8e28ba6b23074bdfb6cf790b9c18d7adf31996b3..68df966205d084eaf4eb2f12eac4fc1b0c6e8ead 100644 (file)
@@ -14,6 +14,8 @@ struct gk104_fifo_chan {
        struct list_head head;
        bool killed;
 
+       struct nvkm_memory *mthd;
+
        struct {
                struct nvkm_gpuobj *inst;
                struct nvkm_vma *vma;
index 6127e2cf5b938bca0fb79b46f58fce3b7012d17d..728a1edbf98c8cce3f7cb81ea8912b8097d293b5 100644 (file)
@@ -222,6 +222,7 @@ void *
 gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
 {
        struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+       nvkm_memory_unref(&chan->mthd);
        kfree(chan->cgrp);
        return chan;
 }
index 65db8a1be943ac313fa17ea3c49d03f97ba0ab49..ad5d119f6a36f7da909ff3ec55db53deea732f23 100644 (file)
@@ -118,11 +118,13 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
                       const struct nvkm_oclass *oclass,
                       struct nvkm_object **pobject)
 {
+       struct nvkm_device *device = fifo->base.engine.subdev.device;
        struct gk104_fifo_chan *chan;
        int runlist = ffs(*runlists) -1, ret, i;
        unsigned long engm;
        u64 subdevs = 0;
-       u64 usermem;
+       u64 usermem, mthd;
+       u32 size;
 
        if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
                return -EINVAL;
@@ -174,6 +176,20 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
        nvkm_done(fifo->user.mem);
        usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
 
+       /* Allocate fault method buffer (magics come from nvgpu). */
+       size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
+       size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
+       size = roundup(size, PAGE_SIZE);
+
+       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, true,
+                             &chan->mthd);
+       if (ret)
+               return ret;
+
+       mthd = nvkm_memory_bar2(chan->mthd);
+       if (mthd == ~0ULL)
+               return -EFAULT;
+
        /* RAMFC */
        nvkm_kmap(chan->base.inst);
        nvkm_wo32(chan->base.inst, 0x008, lower_32_bits(usermem));
@@ -190,8 +206,8 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
        nvkm_wo32(chan->base.inst, 0x0f4, 0x00001000);
        nvkm_wo32(chan->base.inst, 0x0f8, 0x10003080);
        nvkm_mo32(chan->base.inst, 0x218, 0x00000000, 0x00000000);
-       nvkm_wo32(chan->base.inst, 0x220, 0x020a1000);
-       nvkm_wo32(chan->base.inst, 0x224, 0x00000000);
+       nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(mthd));
+       nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(mthd));
        nvkm_done(chan->base.inst);
        return gv100_fifo_gpfifo_engine_valid(chan, true, true);
 }