struct gf100_grctx_oclass *oclass = (void *)nv_engine(gr)->cclass;
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- struct nvkm_gpuobj *chan;
+ struct nvkm_memory *chan;
struct gf100_grctx info;
int ret, i;
+ u64 addr;
/* allocate memory to for a "channel", which we'll use to generate
* the default context values
*/
- ret = nvkm_gpuobj_new(nv_object(gr), NULL, 0x80000 + gr->size,
- 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x80000 + gr->size,
+ 0x1000, true, &chan);
if (ret) {
nvkm_error(subdev, "failed to allocate chan memory, %d\n", ret);
return ret;
}
+ addr = nvkm_memory_addr(chan);
+
/* PGD pointer */
nvkm_kmap(chan);
- nvkm_wo32(chan, 0x0200, lower_32_bits(chan->addr + 0x1000));
- nvkm_wo32(chan, 0x0204, upper_32_bits(chan->addr + 0x1000));
+ nvkm_wo32(chan, 0x0200, lower_32_bits(addr + 0x1000));
+ nvkm_wo32(chan, 0x0204, upper_32_bits(addr + 0x1000));
nvkm_wo32(chan, 0x0208, 0xffffffff);
nvkm_wo32(chan, 0x020c, 0x000000ff);
/* PGT[0] pointer */
nvkm_wo32(chan, 0x1000, 0x00000000);
- nvkm_wo32(chan, 0x1004, 0x00000001 | (chan->addr + 0x2000) >> 8);
+ nvkm_wo32(chan, 0x1004, 0x00000001 | (addr + 0x2000) >> 8);
/* identity-map the whole "channel" into its own vm */
- for (i = 0; i < chan->size / 4096; i++) {
- u64 addr = ((chan->addr + (i * 4096)) >> 8) | 1;
+ for (i = 0; i < nvkm_memory_size(chan) / 4096; i++) {
+ u64 addr = ((nvkm_memory_addr(chan) + (i * 4096)) >> 8) | 1;
nvkm_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
nvkm_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
}
nvkm_wo32(chan, 0x0214, 0x00000000);
nvkm_done(chan);
- nvkm_wr32(device, 0x100cb8, (chan->addr + 0x1000) >> 8);
+ nvkm_wr32(device, 0x100cb8, (addr + 0x1000) >> 8);
nvkm_wr32(device, 0x100cbc, 0x80000001);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x100c80) & 0x00008000)
/* make channel current */
if (gr->firmware) {
nvkm_wr32(device, 0x409840, 0x00000030);
- nvkm_wr32(device, 0x409500, 0x80000000 | chan->addr >> 12);
+ nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
nvkm_wr32(device, 0x409504, 0x00000003);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x409800) & 0x00000010)
nvkm_done(chan);
} else {
nvkm_wr32(device, 0x409840, 0x80000000);
- nvkm_wr32(device, 0x409500, 0x80000000 | chan->addr >> 12);
+ nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
nvkm_wr32(device, 0x409504, 0x00000001);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x409800) & 0x80000000)
}
done:
- nvkm_gpuobj_ref(NULL, &chan);
+ nvkm_memory_del(&chan);
return ret;
}
struct gf100_gr_data *data = gr->mmio_data;
struct gf100_gr_mmio *mmio = gr->mmio_list;
struct gf100_gr_chan *chan;
+ struct nvkm_device *device = gr->base.engine.subdev.device;
struct nvkm_gpuobj *image;
int ret, i;
* fuc to modify some per-context register settings on first load
* of the context.
*/
- ret = nvkm_gpuobj_new(nv_object(chan), NULL, 0x1000, 0x100, 0,
- &chan->mmio);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x100,
+ false, &chan->mmio);
if (ret)
return ret;
- ret = nvkm_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm,
- NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
- &chan->mmio_vma);
+ ret = nvkm_vm_get(vm, 0x1000, 12, NV_MEM_ACCESS_RW |
+ NV_MEM_ACCESS_SYS, &chan->mmio_vma);
if (ret)
return ret;
+ nvkm_memory_map(chan->mmio, &chan->mmio_vma, 0);
+
/* allocate buffers referenced by mmio list */
for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) {
- ret = nvkm_gpuobj_new(nv_object(chan), NULL, data->size,
- data->align, 0, &chan->data[i].mem);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ data->size, data->align, false,
+ &chan->data[i].mem);
if (ret)
return ret;
- ret = nvkm_gpuobj_map_vm(chan->data[i].mem, vm, data->access,
- &chan->data[i].vma);
+ ret = nvkm_vm_get(vm, nvkm_memory_size(chan->data[i].mem),
+ 12, data->access, &chan->data[i].vma);
if (ret)
return ret;
+ nvkm_memory_map(chan->data[i].mem, &chan->data[i].vma, 0);
data++;
}
int i;
for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
- nvkm_gpuobj_unmap(&chan->data[i].vma);
- nvkm_gpuobj_ref(NULL, &chan->data[i].mem);
+ if (chan->data[i].vma.node) {
+ nvkm_vm_unmap(&chan->data[i].vma);
+ nvkm_vm_put(&chan->data[i].vma);
+ }
+ nvkm_memory_del(&chan->data[i].mem);
}
- nvkm_gpuobj_unmap(&chan->mmio_vma);
- nvkm_gpuobj_ref(NULL, &chan->mmio);
+ if (chan->mmio_vma.node) {
+ nvkm_vm_unmap(&chan->mmio_vma);
+ nvkm_vm_put(&chan->mmio_vma);
+ }
+ nvkm_memory_del(&chan->mmio);
nvkm_gr_context_destroy(&chan->base);
}
nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x08b4), gr->unk4188b4->addr >> 8);
- nvkm_wr32(device, GPC_BCAST(0x08b8), gr->unk4188b8->addr >> 8);
+ nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
+ nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
gf100_gr_mmio(gr, oclass->mmio);
gf100_gr_dtor_fw(&gr->fuc41ac);
gf100_gr_dtor_fw(&gr->fuc41ad);
- nvkm_gpuobj_ref(NULL, &gr->unk4188b8);
- nvkm_gpuobj_ref(NULL, &gr->unk4188b4);
+ nvkm_memory_del(&gr->unk4188b8);
+ nvkm_memory_del(&gr->unk4188b4);
nvkm_gr_destroy(&gr->base);
}
gr->firmware = true;
}
- ret = nvkm_gpuobj_new(nv_object(gr), NULL, 0x1000, 256, 0,
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false,
&gr->unk4188b4);
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(gr), NULL, 0x1000, 256, 0,
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false,
&gr->unk4188b8);
if (ret)
return ret;
u8 ppc_nr[GPC_MAX];
u8 ppc_tpc_nr[GPC_MAX][4];
- struct nvkm_gpuobj *unk4188b4;
- struct nvkm_gpuobj *unk4188b8;
+ struct nvkm_memory *unk4188b4;
+ struct nvkm_memory *unk4188b8;
struct gf100_gr_data mmio_data[4];
struct gf100_gr_mmio mmio_list[4096/8];
struct gf100_gr_chan {
struct nvkm_gr_chan base;
- struct nvkm_gpuobj *mmio;
+ struct nvkm_memory *mmio;
struct nvkm_vma mmio_vma;
int mmio_nr;
struct {
- struct nvkm_gpuobj *mem;
+ struct nvkm_memory *mem;
struct nvkm_vma vma;
} data[4];
};
nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x08b4), gr->unk4188b4->addr >> 8);
- nvkm_wr32(device, GPC_BCAST(0x08b8), gr->unk4188b8->addr >> 8);
+ nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
+ nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
gf100_gr_mmio(gr, oclass->mmio);
return ret;
/* MMU debug buffer */
- nvkm_wr32(device, 0x100cc8, gr->unk4188b4->addr >> 8);
- nvkm_wr32(device, 0x100ccc, gr->unk4188b8->addr >> 8);
+ nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
+ nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
if (oclass->init_gpc_mmu)
oclass->init_gpc_mmu(gr);
nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x08b4), gr->unk4188b4->addr >> 8);
- nvkm_wr32(device, GPC_BCAST(0x08b8), gr->unk4188b8->addr >> 8);
+ nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
+ nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
gf100_gr_mmio(gr, oclass->mmio);
nvkm_wr32(device, 0x418880, 0x00001000 | (tmp & 0x00000fff));
nvkm_wr32(device, 0x418890, 0x00000000);
nvkm_wr32(device, 0x418894, 0x00000000);
- nvkm_wr32(device, 0x4188b4, gr->unk4188b4->addr >> 8);
- nvkm_wr32(device, 0x4188b8, gr->unk4188b8->addr >> 8);
+ nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(gr->unk4188b4) >> 8);
+ nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(gr->unk4188b8) >> 8);
nvkm_mask(device, 0x4188b0, 0x00040000, 0x00040000);
/*XXX: belongs in fb */
- nvkm_wr32(device, 0x100cc8, gr->unk4188b4->addr >> 8);
- nvkm_wr32(device, 0x100ccc, gr->unk4188b8->addr >> 8);
+ nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
+ nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
nvkm_mask(device, 0x100cc4, 0x00040000, 0x00040000);
gf100_gr_mmio(gr, oclass->mmio);
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
+ struct nvkm_device *device = (void *)parent;
struct nv20_gr *gr;
int ret;
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(gr), NULL, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &gr->ctxtab);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 32 * 4, 16, true,
+ &gr->ctxtab);
if (ret)
return ret;
nv20_gr_dtor(struct nvkm_object *object)
{
struct nv20_gr *gr = (void *)object;
- nvkm_gpuobj_ref(NULL, &gr->ctxtab);
+ nvkm_memory_del(&gr->ctxtab);
nvkm_gr_destroy(&gr->base);
}
if (ret)
return ret;
- nvkm_wr32(device, NV20_PGRAPH_CHANNEL_CTX_TABLE, gr->ctxtab->addr >> 4);
+ nvkm_wr32(device, NV20_PGRAPH_CHANNEL_CTX_TABLE,
+ nvkm_memory_addr(gr->ctxtab) >> 4);
if (nv_device(gr)->chipset == 0x20) {
nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
struct nv20_gr {
struct nvkm_gr base;
- struct nvkm_gpuobj *ctxtab;
+ struct nvkm_memory *ctxtab;
};
struct nv20_gr_chan {
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
+ struct nvkm_device *device = (void *)parent;
struct nv20_gr *gr;
int ret;
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(gr), NULL, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &gr->ctxtab);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 32 * 4, 16, true,
+ &gr->ctxtab);
if (ret)
return ret;
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
+ struct nvkm_device *device = (void *)parent;
struct nv20_gr *gr;
int ret;
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(gr), NULL, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &gr->ctxtab);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 32 * 4, 16, true,
+ &gr->ctxtab);
if (ret)
return ret;
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
+ struct nvkm_device *device = (void *)parent;
struct nv20_gr *gr;
int ret;
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(gr), NULL, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &gr->ctxtab);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 32 * 4, 16, true,
+ &gr->ctxtab);
if (ret)
return ret;
if (ret)
return ret;
- nvkm_wr32(device, NV20_PGRAPH_CHANNEL_CTX_TABLE, gr->ctxtab->addr >> 4);
+ nvkm_wr32(device, NV20_PGRAPH_CHANNEL_CTX_TABLE,
+ nvkm_memory_addr(gr->ctxtab) >> 4);
nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
+ struct nvkm_device *device = (void *)parent;
struct nv20_gr *gr;
int ret;
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(gr), NULL, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &gr->ctxtab);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 32 * 4, 16, true,
+ &gr->ctxtab);
if (ret)
return ret;
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
+ struct nvkm_device *device = (void *)parent;
struct nv20_gr *gr;
int ret;
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(gr), NULL, 32 * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &gr->ctxtab);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 32 * 4, 16, true,
+ &gr->ctxtab);
if (ret)
return ret;
void
nvkm_vm_put(struct nvkm_vma *vma)
{
- struct nvkm_vm *vm = vma->vm;
- struct nvkm_mmu *mmu = vm->mmu;
+ struct nvkm_mmu *mmu;
+ struct nvkm_vm *vm;
u32 fpde, lpde;
if (unlikely(vma->node == NULL))
return;
+ vm = vma->vm;
+ mmu = vm->mmu;
+
fpde = (vma->node->offset >> mmu->pgt_bits);
lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits;