drm/nouveau/gpuobj: separate allocation from nvkm_object
authorBen Skeggs <bskeggs@redhat.com>
Thu, 20 Aug 2015 04:54:17 +0000 (14:54 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Fri, 28 Aug 2015 02:40:37 +0000 (12:40 +1000)
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
20 files changed:
drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c
drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c
drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c
drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c
drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c

index 7613107c05fa6c4561b5fd53918c6ce0f5e84909..d171535b8eef1989a3880b3a46cc5223de03994c 100644 (file)
@@ -7,30 +7,33 @@ struct nvkm_vma;
 struct nvkm_vm;
 
 #define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
-#define NVOBJ_FLAG_ZERO_FREE  0x00000002
 #define NVOBJ_FLAG_HEAP       0x00000004
 
 struct nvkm_gpuobj {
        struct nvkm_object object;
-       struct nvkm_memory *memory;
+       const struct nvkm_gpuobj_func *func;
        struct nvkm_gpuobj *parent;
+       struct nvkm_memory *memory;
        struct nvkm_mm_node *node;
-       struct nvkm_mm heap;
 
-       u32 flags;
        u64 addr;
        u32 size;
+       struct nvkm_mm heap;
 
-       const struct nvkm_gpuobj_func *func;
+       void __iomem *map;
 };
 
 struct nvkm_gpuobj_func {
-       void (*acquire)(struct nvkm_gpuobj *);
+       void *(*acquire)(struct nvkm_gpuobj *);
        void (*release)(struct nvkm_gpuobj *);
        u32 (*rd32)(struct nvkm_gpuobj *, u32 offset);
        void (*wr32)(struct nvkm_gpuobj *, u32 offset, u32 data);
 };
 
+int  nvkm_gpuobj_new(struct nvkm_device *, u32 size, int align, bool zero,
+                    struct nvkm_gpuobj *parent, struct nvkm_gpuobj **);
+void nvkm_gpuobj_del(struct nvkm_gpuobj **);
+
 static inline struct nvkm_gpuobj *
 nv_gpuobj(void *obj)
 {
@@ -51,12 +54,9 @@ int  nvkm_gpuobj_create_(struct nvkm_object *, struct nvkm_object *,
                            u32 flags, int length, void **);
 void nvkm_gpuobj_destroy(struct nvkm_gpuobj *);
 
-int  nvkm_gpuobj_new(struct nvkm_object *, struct nvkm_object *, u32 size,
-                    u32 align, u32 flags, struct nvkm_gpuobj **);
-int  nvkm_gpuobj_dup(struct nvkm_object *, struct nvkm_memory *,
-                    struct nvkm_gpuobj **);
-int  nvkm_gpuobj_map_vm(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
-                       struct nvkm_vma *);
+int nvkm_gpuobj_wrap(struct nvkm_memory *, struct nvkm_gpuobj **);
+int nvkm_gpuobj_map(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
+                   struct nvkm_vma *);
 void nvkm_gpuobj_unmap(struct nvkm_vma *);
 
 static inline void
index 2c3cc61adb74becb1c2df8b7dc7f62df8664cc78..343d2d73cd943dd493a883900cfa8dcc76714279 100644 (file)
@@ -15,7 +15,7 @@ struct nvkm_dmaeng {
        struct nvkm_engine engine;
 
        /* creates a "physical" dma object from a struct nvkm_dmaobj */
-       int (*bind)(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
+       int (*bind)(struct nvkm_dmaobj *dmaobj, struct nvkm_gpuobj *parent,
                    struct nvkm_gpuobj **);
 };
 
index 650e911dd70420e318ab42d74ee85baba4443538..ebcbe7bbdddf6c3fabf71df0f285a86cb71b60f6 100644 (file)
@@ -141,7 +141,7 @@ nouveau_accel_fini(struct nouveau_drm *drm)
 {
        nouveau_channel_del(&drm->channel);
        nvif_object_fini(&drm->ntfy);
-       nvkm_gpuobj_ref(NULL, &drm->notify);
+       nvkm_gpuobj_del(&drm->notify);
        nvif_object_fini(&drm->nvsw);
        nouveau_channel_del(&drm->cechan);
        nvif_object_fini(&drm->ttm.copy);
@@ -264,8 +264,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
        }
 
        if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
-               ret = nvkm_gpuobj_new(nvxx_object(&drm->device.object), NULL, 32,
-                                     0, 0, &drm->notify);
+               ret = nvkm_gpuobj_new(nvxx_device(&drm->device), 32, 0, false,
+                                     NULL, &drm->notify);
                if (ret) {
                        NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
                        nouveau_accel_fini(drm);
index c14469c3a861e75a44dca28ae557253177aefc97..54b46037f4ba3c84ae3957072697651f00656c09 100644 (file)
 #include <subdev/bar.h>
 #include <subdev/mmu.h>
 
-static void
-nvkm_gpuobj_release(struct nvkm_gpuobj *gpuobj)
+/* fast-path, where backend is able to provide direct pointer to memory */
+static u32
+nvkm_gpuobj_rd32_fast(struct nvkm_gpuobj *gpuobj, u32 offset)
 {
-       if (gpuobj->node) {
-               nvkm_done(gpuobj->parent);
-               return;
-       }
-       nvkm_done(gpuobj->memory);
+       return ioread32_native(gpuobj->map + offset);
 }
 
 static void
-nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
+nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
 {
-       if (gpuobj->node) {
-               nvkm_kmap(gpuobj->parent);
-               return;
-       }
-       nvkm_kmap(gpuobj->memory);
+       iowrite32_native(data, gpuobj->map + offset);
 }
 
+/* accessor functions for gpuobjs allocated directly from instmem */
 static u32
-nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
+nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
 {
-       if (gpuobj->node)
-               return nvkm_ro32(gpuobj->parent, gpuobj->node->offset + offset);
        return nvkm_ro32(gpuobj->memory, offset);
 }
 
 static void
-nvkm_gpuobj_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
+nvkm_gpuobj_heap_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
 {
-       if (gpuobj->node) {
-               nvkm_wo32(gpuobj->parent, gpuobj->node->offset + offset, data);
-               return;
-       }
        nvkm_wo32(gpuobj->memory, offset, data);
 }
 
-void
-nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj)
+static const struct nvkm_gpuobj_func nvkm_gpuobj_heap;
+static void
+nvkm_gpuobj_heap_release(struct nvkm_gpuobj *gpuobj)
 {
-       int i;
+       gpuobj->func = &nvkm_gpuobj_heap;
+       nvkm_done(gpuobj->memory);
+}
 
-       if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) {
-               nvkm_kmap(gpuobj);
-               for (i = 0; i < gpuobj->size; i += 4)
-                       nvkm_wo32(gpuobj, i, 0x00000000);
-               nvkm_done(gpuobj);
-       }
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_heap_fast = {
+       .release = nvkm_gpuobj_heap_release,
+       .rd32 = nvkm_gpuobj_rd32_fast,
+       .wr32 = nvkm_gpuobj_wr32_fast,
+};
 
-       if (gpuobj->node)
-               nvkm_mm_free(&nv_gpuobj(gpuobj->parent)->heap, &gpuobj->node);
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_heap_slow = {
+       .release = nvkm_gpuobj_heap_release,
+       .rd32 = nvkm_gpuobj_heap_rd32,
+       .wr32 = nvkm_gpuobj_heap_wr32,
+};
 
-       if (gpuobj->heap.block_size)
-               nvkm_mm_fini(&gpuobj->heap);
+static void *
+nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
+{
+       gpuobj->map = nvkm_kmap(gpuobj->memory);
+       if (likely(gpuobj->map))
+               gpuobj->func = &nvkm_gpuobj_heap_fast;
+       else
+               gpuobj->func = &nvkm_gpuobj_heap_slow;
+       return gpuobj->map;
+}
 
-       nvkm_memory_del(&gpuobj->memory);
-       nvkm_object_destroy(&gpuobj->object);
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_heap = {
+       .acquire = nvkm_gpuobj_heap_acquire,
+};
+
+/* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
+static u32
+nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
+{
+       return nvkm_ro32(gpuobj->parent, gpuobj->node->offset + offset);
+}
+
+static void
+nvkm_gpuobj_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
+{
+       nvkm_wo32(gpuobj->parent, gpuobj->node->offset + offset, data);
+}
+
+static const struct nvkm_gpuobj_func nvkm_gpuobj_func;
+static void
+nvkm_gpuobj_release(struct nvkm_gpuobj *gpuobj)
+{
+       gpuobj->func = &nvkm_gpuobj_func;
+       nvkm_done(gpuobj->parent);
 }
 
 static const struct nvkm_gpuobj_func
-nvkm_gpuobj_func = {
-       .acquire = nvkm_gpuobj_acquire,
+nvkm_gpuobj_fast = {
+       .release = nvkm_gpuobj_release,
+       .rd32 = nvkm_gpuobj_rd32_fast,
+       .wr32 = nvkm_gpuobj_wr32_fast,
+};
+
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_slow = {
        .release = nvkm_gpuobj_release,
        .rd32 = nvkm_gpuobj_rd32,
        .wr32 = nvkm_gpuobj_wr32,
 };
 
+static void *
+nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
+{
+       gpuobj->map = nvkm_kmap(gpuobj->parent);
+       if (likely(gpuobj->map)) {
+               gpuobj->map  = (u8 *)gpuobj->map + gpuobj->node->offset;
+               gpuobj->func = &nvkm_gpuobj_fast;
+       } else {
+               gpuobj->func = &nvkm_gpuobj_slow;
+       }
+       return gpuobj->map;
+}
+
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_func = {
+       .acquire = nvkm_gpuobj_acquire,
+};
+
+static int
+nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
+                struct nvkm_gpuobj *parent, struct nvkm_gpuobj *gpuobj)
+{
+       u32 offset;
+       int ret;
+
+       if (parent) {
+               if (align >= 0) {
+                       ret = nvkm_mm_head(&parent->heap, 0, 1, size, size,
+                                          max(align, 1), &gpuobj->node);
+               } else {
+                       ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size,
+                                          -align, &gpuobj->node);
+               }
+               if (ret)
+                       return ret;
+
+               gpuobj->parent = parent;
+               gpuobj->func = &nvkm_gpuobj_func;
+               gpuobj->addr = parent->addr + gpuobj->node->offset;
+               gpuobj->size = gpuobj->node->length;
+
+               if (zero) {
+                       nvkm_kmap(gpuobj);
+                       for (offset = 0; offset < gpuobj->size; offset += 4)
+                               nvkm_wo32(gpuobj, offset, 0x00000000);
+                       nvkm_done(gpuobj);
+               }
+       } else {
+               ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size,
+                                     abs(align), zero, &gpuobj->memory);
+               if (ret)
+                       return ret;
+
+               gpuobj->func = &nvkm_gpuobj_heap;
+               gpuobj->addr = nvkm_memory_addr(gpuobj->memory);
+               gpuobj->size = nvkm_memory_size(gpuobj->memory);
+       }
+
+       return nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
+}
+
+void
+nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj)
+{
+       struct nvkm_gpuobj *gpuobj = *pgpuobj;
+       if (gpuobj) {
+               if (gpuobj->parent)
+                       nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
+               nvkm_mm_fini(&gpuobj->heap);
+               nvkm_memory_del(&gpuobj->memory);
+               kfree(*pgpuobj);
+               *pgpuobj = NULL;
+       }
+}
+
+int
+nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
+               struct nvkm_gpuobj *parent, struct nvkm_gpuobj **pgpuobj)
+{
+       struct nvkm_gpuobj *gpuobj;
+       int ret;
+
+       if (!(gpuobj = *pgpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       ret = nvkm_gpuobj_ctor(device, size, align, zero, parent, gpuobj);
+       if (ret)
+               nvkm_gpuobj_del(pgpuobj);
+       return ret;
+}
+
+void
+nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj)
+{
+       if (gpuobj->node)
+               nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
+
+       gpuobj->heap.block_size = 1;
+       nvkm_mm_fini(&gpuobj->heap);
+
+       nvkm_memory_del(&gpuobj->memory);
+       nvkm_object_destroy(&gpuobj->object);
+}
+
 int
 nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
                    struct nvkm_oclass *oclass, u32 pclass,
@@ -103,12 +238,10 @@ nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
                    int length, void **pobject)
 {
        struct nvkm_device *device = nv_device(parent);
-       struct nvkm_memory *memory = NULL;
        struct nvkm_gpuobj *pargpu = NULL;
        struct nvkm_gpuobj *gpuobj;
-       struct nvkm_mm *heap = NULL;
-       int ret, i;
-       u64 addr;
+       const bool zero = (flags & NVOBJ_FLAG_ZERO_ALLOC);
+       int ret;
 
        *pobject = NULL;
 
@@ -122,83 +255,18 @@ nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
                if (WARN_ON(objgpu == NULL))
                        return -EINVAL;
                pargpu = nv_gpuobj(objgpu);
-
-               addr =  pargpu->addr;
-               heap = &pargpu->heap;
-       } else {
-               ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
-                                     size, align, false, &memory);
-               if (ret)
-                       return ret;
-
-               addr = nvkm_memory_addr(memory);
-               size = nvkm_memory_size(memory);
        }
 
        ret = nvkm_object_create_(parent, engine, oclass, pclass |
                                  NV_GPUOBJ_CLASS, length, pobject);
        gpuobj = *pobject;
-       if (ret) {
-               nvkm_memory_del(&memory);
-               return ret;
-       }
-
-       gpuobj->func = &nvkm_gpuobj_func;
-       gpuobj->memory = memory;
-       gpuobj->parent = pargpu;
-       gpuobj->flags = flags;
-       gpuobj->addr = addr;
-       gpuobj->size = size;
-
-       if (heap) {
-               ret = nvkm_mm_head(heap, 0, 1, size, size, max(align, (u32)1),
-                                  &gpuobj->node);
-               if (ret)
-                       return ret;
-
-               gpuobj->addr += gpuobj->node->offset;
-       }
-
-       if (gpuobj->flags & NVOBJ_FLAG_HEAP) {
-               ret = nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
-               if (ret)
-                       return ret;
-       }
-
-       if (flags & NVOBJ_FLAG_ZERO_ALLOC) {
-               nvkm_kmap(gpuobj);
-               for (i = 0; i < gpuobj->size; i += 4)
-                       nvkm_wo32(gpuobj, i, 0x00000000);
-               nvkm_done(gpuobj);
-       }
-
-       return ret;
-}
-
-struct nvkm_gpuobj_class {
-       struct nvkm_object *pargpu;
-       u64 size;
-       u32 align;
-       u32 flags;
-};
-
-static int
-_nvkm_gpuobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-                 struct nvkm_oclass *oclass, void *data, u32 size,
-                 struct nvkm_object **pobject)
-{
-       struct nvkm_gpuobj_class *args = data;
-       struct nvkm_gpuobj *object;
-       int ret;
-
-       ret = nvkm_gpuobj_create(parent, engine, oclass, 0, args->pargpu,
-                                args->size, args->align, args->flags,
-                                &object);
-       *pobject = nv_object(object);
        if (ret)
                return ret;
 
-       return 0;
+       ret = nvkm_gpuobj_ctor(device, size, align, zero, pargpu, gpuobj);
+       if (!(flags & NVOBJ_FLAG_HEAP))
+               gpuobj->heap.block_size = 0;
+       return ret;
 }
 
 void
@@ -233,39 +301,9 @@ _nvkm_gpuobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
        nvkm_wo32(gpuobj, addr, data);
 }
 
-static struct nvkm_oclass
-_nvkm_gpuobj_oclass = {
-       .handle = 0x00000000,
-       .ofuncs = &(struct nvkm_ofuncs) {
-               .ctor = _nvkm_gpuobj_ctor,
-               .dtor = _nvkm_gpuobj_dtor,
-               .init = _nvkm_gpuobj_init,
-               .fini = _nvkm_gpuobj_fini,
-               .rd32 = _nvkm_gpuobj_rd32,
-               .wr32 = _nvkm_gpuobj_wr32,
-       },
-};
-
-int
-nvkm_gpuobj_new(struct nvkm_object *parent, struct nvkm_object *pargpu,
-               u32 size, u32 align, u32 flags,
-               struct nvkm_gpuobj **pgpuobj)
-{
-       struct nvkm_gpuobj_class args = {
-               .pargpu = pargpu,
-               .size = size,
-               .align = align,
-               .flags = flags,
-       };
-
-       return nvkm_object_old(parent, &parent->engine->subdev.object,
-                              &_nvkm_gpuobj_oclass, &args, sizeof(args),
-                              (struct nvkm_object **)pgpuobj);
-}
-
 int
-nvkm_gpuobj_map_vm(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
-                  u32 access, struct nvkm_vma *vma)
+nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
+               u32 access, struct nvkm_vma *vma)
 {
        struct nvkm_memory *memory = gpuobj->memory;
        int ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
@@ -288,37 +326,13 @@ nvkm_gpuobj_unmap(struct nvkm_vma *vma)
  * anywhere else.
  */
 
-static void
-nvkm_gpudup_dtor(struct nvkm_object *object)
-{
-       struct nvkm_gpuobj *gpuobj = (void *)object;
-       nvkm_object_destroy(&gpuobj->object);
-}
-
-static struct nvkm_oclass
-nvkm_gpudup_oclass = {
-       .handle = NV_GPUOBJ_CLASS,
-       .ofuncs = &(struct nvkm_ofuncs) {
-               .dtor = nvkm_gpudup_dtor,
-               .init = _nvkm_object_init,
-               .fini = _nvkm_object_fini,
-       },
-};
-
 int
-nvkm_gpuobj_dup(struct nvkm_object *parent, struct nvkm_memory *base,
-               struct nvkm_gpuobj **pgpuobj)
+nvkm_gpuobj_wrap(struct nvkm_memory *memory, struct nvkm_gpuobj **pgpuobj)
 {
-       struct nvkm_gpuobj *gpuobj;
-       int ret;
-
-       ret = nvkm_object_create(parent, &parent->engine->subdev.object,
-                                &nvkm_gpudup_oclass, 0, &gpuobj);
-       *pgpuobj = gpuobj;
-       if (ret)
-               return ret;
+       if (!(*pgpuobj = kzalloc(sizeof(**pgpuobj), GFP_KERNEL)))
+               return -ENOMEM;
 
-       gpuobj->addr = nvkm_memory_addr(base);
-       gpuobj->size = nvkm_memory_size(base);
+       (*pgpuobj)->addr = nvkm_memory_addr(memory);
+       (*pgpuobj)->size = nvkm_memory_size(memory);
        return 0;
 }
index f9f38ee49058d2743065865d1b757a08098f1944..7f9870789a0f3816bfbfeaf78ea62fbf2d064ab1 100644 (file)
 #include "priv.h"
 
 #include <core/client.h>
+#include <core/gpuobj.h>
 #include <subdev/fb.h>
 #include <subdev/instmem.h>
 
 #include <nvif/class.h>
 #include <nvif/unpack.h>
 
+struct hack {
+       struct nvkm_gpuobj object;
+       struct nvkm_gpuobj *parent;
+};
+
+static void
+dtor(struct nvkm_object *object)
+{
+       struct hack *hack = (void *)object;
+       nvkm_gpuobj_del(&hack->parent);
+       nvkm_object_destroy(&hack->object.object);
+}
+
+static struct nvkm_oclass
+hack = {
+       .handle = NV_GPUOBJ_CLASS,
+       .ofuncs = &(struct nvkm_ofuncs) {
+               .dtor = dtor,
+               .init = _nvkm_object_init,
+               .fini = _nvkm_object_fini,
+       },
+};
+
 static int
-nvkm_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
+nvkm_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_gpuobj *pargpu,
                 struct nvkm_gpuobj **pgpuobj)
 {
        const struct nvkm_dmaeng_impl *impl = (void *)
                nv_oclass(nv_object(dmaobj)->engine);
        int ret = 0;
 
-       if (nv_object(dmaobj) == parent) { /* ctor bind */
+       if (&dmaobj->base == &pargpu->object) { /* ctor bind */
+               struct nvkm_object *parent = (void *)pargpu;
+               struct hack *object;
+
                if (nv_mclass(parent->parent) == NV_DEVICE) {
                        /* delayed, or no, binding */
                        return 0;
                }
-               ret = impl->bind(dmaobj, parent, pgpuobj);
-               if (ret == 0)
+
+               pargpu = (void *)nv_pclass((void *)pargpu, NV_GPUOBJ_CLASS);
+
+               ret = nvkm_object_create(parent, NULL, &hack, NV_GPUOBJ_CLASS, &object);
+               if (ret == 0) {
                        nvkm_object_ref(NULL, &parent);
+                       *pgpuobj = &object->object;
+
+                       ret = impl->bind(dmaobj, pargpu, &object->parent);
+                       if (ret)
+                               return ret;
+
+                       object->object.node = object->parent->node;
+                       object->object.addr = object->parent->addr;
+                       object->object.size = object->parent->size;
+                       return 0;
+               }
+
                return ret;
        }
 
-       return impl->bind(dmaobj, parent, pgpuobj);
+       return impl->bind(dmaobj, pargpu, pgpuobj);
 }
 
 int
index 499a7c7e024a5664ad1a663ac08cf04c53f425da..96a604de4b632a4a765f3862607f79370eb59d9a 100644 (file)
@@ -37,25 +37,14 @@ struct gf100_dmaobj {
 };
 
 static int
-gf100_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_object *parent,
+gf100_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_gpuobj *parent,
                  struct nvkm_gpuobj **pgpuobj)
 {
        struct gf100_dmaobj *dmaobj = container_of(obj, typeof(*dmaobj), base);
+       struct nvkm_device *device = dmaobj->base.base.engine->subdev.device;
        int ret;
 
-       if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
-               switch (nv_mclass(parent->parent)) {
-               case GT214_DISP_CORE_CHANNEL_DMA:
-               case GT214_DISP_BASE_CHANNEL_DMA:
-               case GT214_DISP_OVERLAY_CHANNEL_DMA:
-                       break;
-               default:
-                       return -EINVAL;
-               }
-       } else
-               return 0;
-
-       ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+       ret = nvkm_gpuobj_new(device, 24, 32, false, parent, pgpuobj);
        if (ret == 0) {
                nvkm_kmap(*pgpuobj);
                nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | nv_mclass(dmaobj));
@@ -146,7 +135,7 @@ gf100_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
                break;
        }
 
-       return dmaeng->bind(&dmaobj->base, nv_object(dmaobj), (void *)pobject);
+       return dmaeng->bind(&dmaobj->base, (void *)dmaobj, (void *)pobject);
 }
 
 static struct nvkm_ofuncs
index a28cf56454e462e85a43174e9fc0f0ffab6039ab..017c7a2affe518fa838e79795c7be1d735a08364 100644 (file)
@@ -36,32 +36,14 @@ struct gf110_dmaobj {
 };
 
 static int
-gf110_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_object *parent,
+gf110_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_gpuobj *parent,
                  struct nvkm_gpuobj **pgpuobj)
 {
        struct gf110_dmaobj *dmaobj = container_of(obj, typeof(*dmaobj), base);
+       struct nvkm_device *device = dmaobj->base.base.engine->subdev.device;
        int ret;
 
-       if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
-               switch (nv_mclass(parent->parent)) {
-               case GF110_DISP_CORE_CHANNEL_DMA:
-               case GK104_DISP_CORE_CHANNEL_DMA:
-               case GK110_DISP_CORE_CHANNEL_DMA:
-               case GM107_DISP_CORE_CHANNEL_DMA:
-               case GM204_DISP_CORE_CHANNEL_DMA:
-               case GF110_DISP_BASE_CHANNEL_DMA:
-               case GK104_DISP_BASE_CHANNEL_DMA:
-               case GK110_DISP_BASE_CHANNEL_DMA:
-               case GF110_DISP_OVERLAY_CONTROL_DMA:
-               case GK104_DISP_OVERLAY_CONTROL_DMA:
-                       break;
-               default:
-                       return -EINVAL;
-               }
-       } else
-               return 0;
-
-       ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+       ret = nvkm_gpuobj_new(device, 24, 32, false, parent, pgpuobj);
        if (ret == 0) {
                nvkm_kmap(*pgpuobj);
                nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
@@ -135,7 +117,7 @@ gf110_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
                return -EINVAL;
        }
 
-       return dmaeng->bind(&dmaobj->base, nv_object(dmaobj), (void *)pobject);
+       return dmaeng->bind(&dmaobj->base, (void *)dmaobj, (void *)pobject);
 }
 
 static struct nvkm_ofuncs
index 9c68eb0e4a70fc350287c9f97f9dc7a47ba0489e..6be2dfc70cb7104686938780e3a51a5bb63ac442 100644 (file)
@@ -37,41 +37,28 @@ struct nv04_dmaobj {
 };
 
 static int
-nv04_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_object *parent,
+nv04_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_gpuobj *parent,
                 struct nvkm_gpuobj **pgpuobj)
 {
        struct nv04_dmaobj *dmaobj = container_of(obj, typeof(*dmaobj), base);
-       struct nvkm_gpuobj *gpuobj;
+       struct nvkm_device *device = dmaobj->base.base.engine->subdev.device;
        u64 offset = dmaobj->base.start & 0xfffff000;
        u64 adjust = dmaobj->base.start & 0x00000fff;
        u32 length = dmaobj->base.limit - dmaobj->base.start;
        int ret;
 
-       if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
-               switch (nv_mclass(parent->parent)) {
-               case NV03_CHANNEL_DMA:
-               case NV10_CHANNEL_DMA:
-               case NV17_CHANNEL_DMA:
-               case NV40_CHANNEL_DMA:
-                       break;
-               default:
-                       return -EINVAL;
-               }
-       }
-
        if (dmaobj->clone) {
                struct nv04_mmu *mmu = nv04_mmu(dmaobj);
                struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0];
                if (!dmaobj->base.start)
-                       return nvkm_gpuobj_dup(parent, pgt, pgpuobj);
+                       return nvkm_gpuobj_wrap(pgt, pgpuobj);
                nvkm_kmap(pgt);
                offset  = nvkm_ro32(pgt, 8 + (offset >> 10));
                offset &= 0xfffff000;
                nvkm_done(pgt);
        }
 
-       ret = nvkm_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
-       *pgpuobj = gpuobj;
+       ret = nvkm_gpuobj_new(device, 16, 16, false, parent, pgpuobj);
        if (ret == 0) {
                nvkm_kmap(*pgpuobj);
                nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | (adjust << 20));
@@ -134,7 +121,7 @@ nv04_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
                return -EINVAL;
        }
 
-       return dmaeng->bind(&dmaobj->base, nv_object(dmaobj), (void *)pobject);
+       return dmaeng->bind(&dmaobj->base, (void *)dmaobj, (void *)pobject);
 }
 
 static struct nvkm_ofuncs
index 3566fa9b3ba978266d74450cefc83646220848b4..f87134ec5b341454784c89c9df47aada25fe33c2 100644 (file)
@@ -37,37 +37,14 @@ struct nv50_dmaobj {
 };
 
 static int
-nv50_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_object *parent,
+nv50_dmaobj_bind(struct nvkm_dmaobj *obj, struct nvkm_gpuobj *parent,
                 struct nvkm_gpuobj **pgpuobj)
 {
        struct nv50_dmaobj *dmaobj = container_of(obj, typeof(*dmaobj), base);
+       struct nvkm_device *device = dmaobj->base.base.engine->subdev.device;
        int ret;
 
-       if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
-               switch (nv_mclass(parent->parent)) {
-               case NV40_CHANNEL_DMA:
-               case NV50_CHANNEL_GPFIFO:
-               case G82_CHANNEL_GPFIFO:
-               case NV50_DISP_CORE_CHANNEL_DMA:
-               case G82_DISP_CORE_CHANNEL_DMA:
-               case GT206_DISP_CORE_CHANNEL_DMA:
-               case GT200_DISP_CORE_CHANNEL_DMA:
-               case GT214_DISP_CORE_CHANNEL_DMA:
-               case NV50_DISP_BASE_CHANNEL_DMA:
-               case G82_DISP_BASE_CHANNEL_DMA:
-               case GT200_DISP_BASE_CHANNEL_DMA:
-               case GT214_DISP_BASE_CHANNEL_DMA:
-               case NV50_DISP_OVERLAY_CHANNEL_DMA:
-               case G82_DISP_OVERLAY_CHANNEL_DMA:
-               case GT200_DISP_OVERLAY_CHANNEL_DMA:
-               case GT214_DISP_OVERLAY_CHANNEL_DMA:
-                       break;
-               default:
-                       return -EINVAL;
-               }
-       }
-
-       ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+       ret = nvkm_gpuobj_new(device, 24, 32, false, parent, pgpuobj);
        if (ret == 0) {
                nvkm_kmap(*pgpuobj);
                nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | nv_mclass(dmaobj));
@@ -164,7 +141,7 @@ nv50_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
                return -EINVAL;
        }
 
-       return dmaeng->bind(&dmaobj->base, nv_object(dmaobj), (void *)pobject);
+       return dmaeng->bind(&dmaobj->base, (void *)dmaobj, (void *)pobject);
 }
 
 static struct nvkm_ofuncs
index c4c210bdbf7e9ae561d911fd9ade0123dbee8e81..614d9e697bd42ee32658bef61b4040ee0027c480 100644 (file)
@@ -22,7 +22,7 @@ int _nvkm_dmaeng_ctor(struct nvkm_object *, struct nvkm_object *,
 struct nvkm_dmaeng_impl {
        struct nvkm_oclass base;
        struct nvkm_oclass *sclass;
-       int (*bind)(struct nvkm_dmaobj *, struct nvkm_object *,
+       int (*bind)(struct nvkm_dmaobj *, struct nvkm_gpuobj *,
                    struct nvkm_gpuobj **);
 };
 #endif
index f937d705d6687ff4f2aa38a2f0a954ade49cf2b3..1934dfb5a434a7cb33e07175c7fba2e674498666 100644 (file)
@@ -61,6 +61,7 @@ nvkm_fifo_channel_create_(struct nvkm_object *parent,
        struct nvkm_handle *handle;
        struct nvkm_dmaobj *dmaobj;
        struct nvkm_fifo *fifo = (void *)engine;
+       struct nvkm_fifo_base *base = (void *)parent;
        struct nvkm_fifo_chan *chan;
        struct nvkm_dmaeng *dmaeng;
        struct nvkm_subdev *subdev = &fifo->engine.subdev;
@@ -91,7 +92,7 @@ nvkm_fifo_channel_create_(struct nvkm_object *parent,
                        return -EINVAL;
                }
 
-               ret = dmaeng->bind(dmaobj, parent, &chan->pushgpu);
+               ret = dmaeng->bind(dmaobj, &base->gpuobj, &chan->pushgpu);
                if (ret)
                        return ret;
        }
@@ -131,7 +132,7 @@ nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *chan)
        fifo->channel[chan->chid] = NULL;
        spin_unlock_irqrestore(&fifo->lock, flags);
 
-       nvkm_gpuobj_ref(NULL, &chan->pushgpu);
+       nvkm_gpuobj_del(&chan->pushgpu);
        nvkm_namedb_destroy(&chan->namedb);
 }
 
index 96c6835e9f149b2ed05d1405c907f5fa6d28b819..058296b2f2852310da12fd91a253436fe167fb22 100644 (file)
@@ -140,23 +140,25 @@ g84_fifo_object_attach(struct nvkm_object *parent,
        else
                context = 0x00000004; /* just non-zero */
 
-       switch (nv_engidx(object->engine)) {
-       case NVDEV_ENGINE_DMAOBJ:
-       case NVDEV_ENGINE_SW    : context |= 0x00000000; break;
-       case NVDEV_ENGINE_GR    : context |= 0x00100000; break;
-       case NVDEV_ENGINE_MPEG  :
-       case NVDEV_ENGINE_MSPPP : context |= 0x00200000; break;
-       case NVDEV_ENGINE_ME    :
-       case NVDEV_ENGINE_CE0   : context |= 0x00300000; break;
-       case NVDEV_ENGINE_VP    :
-       case NVDEV_ENGINE_MSPDEC: context |= 0x00400000; break;
-       case NVDEV_ENGINE_CIPHER:
-       case NVDEV_ENGINE_SEC   :
-       case NVDEV_ENGINE_VIC   : context |= 0x00500000; break;
-       case NVDEV_ENGINE_BSP   :
-       case NVDEV_ENGINE_MSVLD : context |= 0x00600000; break;
-       default:
-               return -EINVAL;
+       if (object->engine) {
+               switch (nv_engidx(object->engine)) {
+               case NVDEV_ENGINE_DMAOBJ:
+               case NVDEV_ENGINE_SW    : context |= 0x00000000; break;
+               case NVDEV_ENGINE_GR    : context |= 0x00100000; break;
+               case NVDEV_ENGINE_MPEG  :
+               case NVDEV_ENGINE_MSPPP : context |= 0x00200000; break;
+               case NVDEV_ENGINE_ME    :
+               case NVDEV_ENGINE_CE0   : context |= 0x00300000; break;
+               case NVDEV_ENGINE_VP    :
+               case NVDEV_ENGINE_MSPDEC: context |= 0x00400000; break;
+               case NVDEV_ENGINE_CIPHER:
+               case NVDEV_ENGINE_SEC   :
+               case NVDEV_ENGINE_VIC   : context |= 0x00500000; break;
+               case NVDEV_ENGINE_BSP   :
+               case NVDEV_ENGINE_MSVLD : context |= 0x00600000; break;
+               default:
+                       return -EINVAL;
+               }
        }
 
        return nvkm_ramht_insert(chan->ramht, 0, handle, context);
@@ -374,6 +376,7 @@ g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
                      struct nvkm_oclass *oclass, void *data, u32 size,
                      struct nvkm_object **pobject)
 {
+       struct nvkm_device *device = nv_engine(engine)->subdev.device;
        struct nv50_fifo_base *base;
        int ret;
 
@@ -383,13 +386,13 @@ g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        if (ret)
                return ret;
 
-       ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200, 0,
-                             NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
+       ret = nvkm_gpuobj_new(device, 0x0200, 0, true, &base->base.gpuobj,
+                             &base->eng);
        if (ret)
                return ret;
 
-       ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0,
-                             0, &base->pgd);
+       ret = nvkm_gpuobj_new(device, 0x4000, 0, false, &base->base.gpuobj,
+                             &base->pgd);
        if (ret)
                return ret;
 
@@ -397,13 +400,13 @@ g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        if (ret)
                return ret;
 
-       ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1000,
-                             0x400, NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
+       ret = nvkm_gpuobj_new(device, 0x1000, 0x400, true, &base->base.gpuobj,
+                             &base->cache);
        if (ret)
                return ret;
 
-       ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0100,
-                             0x100, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
+       ret = nvkm_gpuobj_new(device, 0x100, 0x100, true, &base->base.gpuobj,
+                             &base->ramfc);
        if (ret)
                return ret;
 
index cbb8d249e7fbde41deda58556d5907a275aea172..77b8df1f57fbab4d7f27b2ba908f25d9a3378cb8 100644 (file)
@@ -130,8 +130,8 @@ gf100_fifo_context_attach(struct nvkm_object *parent,
        }
 
        if (!ectx->vma.node) {
-               ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
-                                        NV_MEM_ACCESS_RW, &ectx->vma);
+               ret = nvkm_gpuobj_map(nv_gpuobj(ectx), base->vm,
+                                     NV_MEM_ACCESS_RW, &ectx->vma);
                if (ret)
                        return ret;
 
@@ -334,6 +334,7 @@ gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
                        struct nvkm_oclass *oclass, void *data, u32 size,
                        struct nvkm_object **pobject)
 {
+       struct nvkm_device *device = nv_engine(engine)->subdev.device;
        struct gf100_fifo_base *base;
        int ret;
 
@@ -344,8 +345,7 @@ gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        if (ret)
                return ret;
 
-       ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
-                             &base->pgd);
+       ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &base->pgd);
        if (ret)
                return ret;
 
@@ -368,7 +368,7 @@ gf100_fifo_context_dtor(struct nvkm_object *object)
 {
        struct gf100_fifo_base *base = (void *)object;
        nvkm_vm_ref(NULL, &base->vm, base->pgd);
-       nvkm_gpuobj_ref(NULL, &base->pgd);
+       nvkm_gpuobj_del(&base->pgd);
        nvkm_fifo_context_destroy(&base->base);
 }
 
index 0e8356d5fa02e038d429e3268758e24be4955c88..39dae1a28dd1e1fe4fe034d181c9cb09270a4660 100644 (file)
@@ -154,8 +154,8 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
        }
 
        if (!ectx->vma.node) {
-               ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
-                                        NV_MEM_ACCESS_RW, &ectx->vma);
+               ret = nvkm_gpuobj_map(nv_gpuobj(ectx), base->vm,
+                                     NV_MEM_ACCESS_RW, &ectx->vma);
                if (ret)
                        return ret;
 
@@ -388,6 +388,7 @@ gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
                        struct nvkm_oclass *oclass, void *data, u32 size,
                        struct nvkm_object **pobject)
 {
+       struct nvkm_device *device = nv_engine(engine)->subdev.device;
        struct gk104_fifo_base *base;
        int ret;
 
@@ -397,8 +398,7 @@ gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        if (ret)
                return ret;
 
-       ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
-                             &base->pgd);
+       ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &base->pgd);
        if (ret)
                return ret;
 
@@ -421,7 +421,7 @@ gk104_fifo_context_dtor(struct nvkm_object *object)
 {
        struct gk104_fifo_base *base = (void *)object;
        nvkm_vm_ref(NULL, &base->vm, base->pgd);
-       nvkm_gpuobj_ref(NULL, &base->pgd);
+       nvkm_gpuobj_del(&base->pgd);
        nvkm_fifo_context_destroy(&base->base);
 }
 
index f0fd1ce8f671b4120e2beee7a21bd4b0f2e04dab..4bec70730558688d4a4c3055bdffd0b8d122f6ff 100644 (file)
@@ -65,19 +65,21 @@ nv04_fifo_object_attach(struct nvkm_object *parent,
        else
                context = 0x00000004; /* just non-zero */
 
-       switch (nv_engidx(object->engine)) {
-       case NVDEV_ENGINE_DMAOBJ:
-       case NVDEV_ENGINE_SW:
-               context |= 0x00000000;
-               break;
-       case NVDEV_ENGINE_GR:
-               context |= 0x00010000;
-               break;
-       case NVDEV_ENGINE_MPEG:
-               context |= 0x00020000;
-               break;
-       default:
-               return -EINVAL;
+       if (object->engine) {
+               switch (nv_engidx(object->engine)) {
+               case NVDEV_ENGINE_DMAOBJ:
+               case NVDEV_ENGINE_SW:
+                       context |= 0x00000000;
+                       break;
+               case NVDEV_ENGINE_GR:
+                       context |= 0x00010000;
+                       break;
+               case NVDEV_ENGINE_MPEG:
+                       context |= 0x00020000;
+                       break;
+               default:
+                       return -EINVAL;
+               }
        }
 
        context |= 0x80000000; /* valid */
index eb6588ec0b6d4803a61080cbdefc5bf859abae73..a2d8da8a9341ea8b45d5405d39b1a1898e2a6841 100644 (file)
@@ -78,19 +78,21 @@ nv40_fifo_object_attach(struct nvkm_object *parent,
        else
                context = 0x00000004; /* just non-zero */
 
-       switch (nv_engidx(object->engine)) {
-       case NVDEV_ENGINE_DMAOBJ:
-       case NVDEV_ENGINE_SW:
-               context |= 0x00000000;
-               break;
-       case NVDEV_ENGINE_GR:
-               context |= 0x00100000;
-               break;
-       case NVDEV_ENGINE_MPEG:
-               context |= 0x00200000;
-               break;
-       default:
-               return -EINVAL;
+       if (object->engine) {
+               switch (nv_engidx(object->engine)) {
+               case NVDEV_ENGINE_DMAOBJ:
+               case NVDEV_ENGINE_SW:
+                       context |= 0x00000000;
+                       break;
+               case NVDEV_ENGINE_GR:
+                       context |= 0x00100000;
+                       break;
+               case NVDEV_ENGINE_MPEG:
+                       context |= 0x00200000;
+                       break;
+               default:
+                       return -EINVAL;
+               }
        }
 
        context |= chid << 23;
index 96e5b61daf10c35993a14c101b73423b0b4d3cca..620c0cfb2453b070c6525b9287203010bc94c4ce 100644 (file)
@@ -171,13 +171,15 @@ nv50_fifo_object_attach(struct nvkm_object *parent,
        else
                context = 0x00000004; /* just non-zero */
 
-       switch (nv_engidx(object->engine)) {
-       case NVDEV_ENGINE_DMAOBJ:
-       case NVDEV_ENGINE_SW    : context |= 0x00000000; break;
-       case NVDEV_ENGINE_GR    : context |= 0x00100000; break;
-       case NVDEV_ENGINE_MPEG  : context |= 0x00200000; break;
-       default:
-               return -EINVAL;
+       if (object->engine) {
+               switch (nv_engidx(object->engine)) {
+               case NVDEV_ENGINE_DMAOBJ:
+               case NVDEV_ENGINE_SW    : context |= 0x00000000; break;
+               case NVDEV_ENGINE_GR    : context |= 0x00100000; break;
+               case NVDEV_ENGINE_MPEG  : context |= 0x00200000; break;
+               default:
+                       return -EINVAL;
+               }
        }
 
        return nvkm_ramht_insert(chan->ramht, 0, handle, context);
@@ -402,6 +404,7 @@ nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
                       struct nvkm_oclass *oclass, void *data, u32 size,
                       struct nvkm_object **pobject)
 {
+       struct nvkm_device *device = nv_engine(engine)->subdev.device;
        struct nv50_fifo_base *base;
        int ret;
 
@@ -411,17 +414,17 @@ nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        if (ret)
                return ret;
 
-       ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200,
-                             0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
+       ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, &base->base.gpuobj,
+                             &base->ramfc);
        if (ret)
                return ret;
 
-       ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0,
-                             NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
+       ret = nvkm_gpuobj_new(device, 0x1200, 0, true, &base->base.gpuobj,
+                             &base->eng);
        if (ret)
                return ret;
 
-       ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0,
+       ret = nvkm_gpuobj_new(device, 0x4000, 0, false, &base->base.gpuobj,
                              &base->pgd);
        if (ret)
                return ret;
@@ -438,10 +441,10 @@ nv50_fifo_context_dtor(struct nvkm_object *object)
 {
        struct nv50_fifo_base *base = (void *)object;
        nvkm_vm_ref(NULL, &base->vm, base->pgd);
-       nvkm_gpuobj_ref(NULL, &base->pgd);
-       nvkm_gpuobj_ref(NULL, &base->eng);
-       nvkm_gpuobj_ref(NULL, &base->ramfc);
-       nvkm_gpuobj_ref(NULL, &base->cache);
+       nvkm_gpuobj_del(&base->pgd);
+       nvkm_gpuobj_del(&base->eng);
+       nvkm_gpuobj_del(&base->ramfc);
+       nvkm_gpuobj_del(&base->cache);
        nvkm_fifo_context_destroy(&base->base);
 }
 
index 8e3574631068f7d466f83ff0da6b6b586a533983..7d304ccc6a64a603b87656ecef1c4107e4070fe2 100644 (file)
@@ -75,8 +75,7 @@ gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
        if (ret)
                return ret;
 
-       ret = nvkm_gpuobj_new(nv_object(bar), NULL, 0x8000, 0, 0,
-                             &bar_vm->pgd);
+       ret = nvkm_gpuobj_new(device, 0x8000, 0, false, NULL, &bar_vm->pgd);
        if (ret)
                return ret;
 
@@ -157,14 +156,14 @@ gf100_bar_dtor(struct nvkm_object *object)
        struct gf100_bar *bar = (void *)object;
 
        nvkm_vm_ref(NULL, &bar->bar[1].vm, bar->bar[1].pgd);
-       nvkm_gpuobj_ref(NULL, &bar->bar[1].pgd);
+       nvkm_gpuobj_del(&bar->bar[1].pgd);
        nvkm_memory_del(&bar->bar[1].mem);
 
        if (bar->bar[0].vm) {
                nvkm_memory_del(&bar->bar[0].vm->pgt[0].mem[0]);
                nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].pgd);
        }
-       nvkm_gpuobj_ref(NULL, &bar->bar[0].pgd);
+       nvkm_gpuobj_del(&bar->bar[0].pgd);
        nvkm_memory_del(&bar->bar[0].mem);
 
        nvkm_bar_destroy(&bar->base);
index 8e17b62fa1daecb629ca94bd4b7fd0dd2a613871..add132b2ed7aab408217d5a1107b15dad60e7dfa 100644 (file)
@@ -99,7 +99,6 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        static struct lock_class_key bar1_lock;
        static struct lock_class_key bar3_lock;
        struct nvkm_device *device = nv_device(parent);
-       struct nvkm_object *heap;
        struct nvkm_vm *vm;
        struct nv50_bar *bar;
        u64 start, limit;
@@ -110,19 +109,17 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        if (ret)
                return ret;
 
-       ret = nvkm_gpuobj_new(nv_object(bar), NULL, 0x20000, 0,
-                             NVOBJ_FLAG_HEAP, &bar->mem);
-       heap = nv_object(bar->mem);
+       ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
        if (ret)
                return ret;
 
-       ret = nvkm_gpuobj_new(nv_object(bar), heap,
-                             (device->chipset == 0x50) ? 0x1400 : 0x0200,
-                             0, 0, &bar->pad);
+       ret = nvkm_gpuobj_new(device, (device->chipset == 0x50) ?
+                             0x1400 : 0x200, 0, false, bar->mem,
+                             &bar->pad);
        if (ret)
                return ret;
 
-       ret = nvkm_gpuobj_new(nv_object(bar), heap, 0x4000, 0, 0, &bar->pgd);
+       ret = nvkm_gpuobj_new(device, 0x4000, 0, false, bar->mem, &bar->pgd);
        if (ret)
                return ret;
 
@@ -145,7 +142,7 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        if (ret)
                return ret;
 
-       ret = nvkm_gpuobj_new(nv_object(bar), heap, 24, 16, 0, &bar->bar3);
+       ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar3);
        if (ret)
                return ret;
 
@@ -174,7 +171,7 @@ nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        if (ret)
                return ret;
 
-       ret = nvkm_gpuobj_new(nv_object(bar), heap, 24, 16, 0, &bar->bar1);
+       ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar1);
        if (ret)
                return ret;
 
@@ -203,16 +200,16 @@ static void
 nv50_bar_dtor(struct nvkm_object *object)
 {
        struct nv50_bar *bar = (void *)object;
-       nvkm_gpuobj_ref(NULL, &bar->bar1);
+       nvkm_gpuobj_del(&bar->bar1);
        nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd);
-       nvkm_gpuobj_ref(NULL, &bar->bar3);
+       nvkm_gpuobj_del(&bar->bar3);
        if (bar->bar3_vm) {
                nvkm_memory_del(&bar->bar3_vm->pgt[0].mem[0]);
                nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd);
        }
-       nvkm_gpuobj_ref(NULL, &bar->pgd);
-       nvkm_gpuobj_ref(NULL, &bar->pad);
-       nvkm_gpuobj_ref(NULL, &bar->mem);
+       nvkm_gpuobj_del(&bar->pgd);
+       nvkm_gpuobj_del(&bar->pad);
+       nvkm_gpuobj_del(&bar->mem);
        nvkm_bar_destroy(&bar->base);
 }
 
index 9c712818528b933cf8ffc0356a290da26974c06c..6fa1bdb02dfdbd828f636e326354740594348d39 100644 (file)
@@ -420,7 +420,7 @@ nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
        if (!vpgd)
                return -ENOMEM;
 
-       nvkm_gpuobj_ref(pgd, &vpgd->obj);
+       vpgd->obj = pgd;
 
        mutex_lock(&vm->mutex);
        for (i = vm->fpde; i <= vm->lpde; i++)
@@ -434,7 +434,6 @@ static void
 nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
 {
        struct nvkm_vm_pgd *vpgd, *tmp;
-       struct nvkm_gpuobj *pgd = NULL;
 
        if (!mpgd)
                return;
@@ -442,15 +441,12 @@ nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
        mutex_lock(&vm->mutex);
        list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
                if (vpgd->obj == mpgd) {
-                       pgd = vpgd->obj;
                        list_del(&vpgd->head);
                        kfree(vpgd);
                        break;
                }
        }
        mutex_unlock(&vm->mutex);
-
-       nvkm_gpuobj_ref(NULL, &pgd);
 }
 
 static void