const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
struct gk104_fifo_chan *chan;
int runlist = ffs(*runlists) -1, ret, i;
unsigned long engm;
u64 subdevs = 0;
- u64 usermem;
+ u64 usermem, mthd;
+ u32 size;
if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
return -EINVAL;
nvkm_done(fifo->user.mem);
usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
+ /* Allocate fault method buffer (magics come from nvgpu). */
+ size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
+ size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
+ size = roundup(size, PAGE_SIZE);
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, true,
+ &chan->mthd);
+ if (ret)
+ return ret;
+
+ mthd = nvkm_memory_bar2(chan->mthd);
+ if (mthd == ~0ULL)
+ return -EFAULT;
+
/* RAMFC */
nvkm_kmap(chan->base.inst);
nvkm_wo32(chan->base.inst, 0x008, lower_32_bits(usermem));
nvkm_wo32(chan->base.inst, 0x0f4, 0x00001000);
nvkm_wo32(chan->base.inst, 0x0f8, 0x10003080);
nvkm_mo32(chan->base.inst, 0x218, 0x00000000, 0x00000000);
- nvkm_wo32(chan->base.inst, 0x220, 0x020a1000);
- nvkm_wo32(chan->base.inst, 0x224, 0x00000000);
+ nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(mthd));
+ nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(mthd));
nvkm_done(chan->base.inst);
return gv100_fifo_gpfifo_engine_valid(chan, true, true);
}