struct gk20a_instobj {
struct nvkm_memory memory;
- struct nvkm_mem mem;
+ struct nvkm_mm_node *mn;
struct gk20a_instmem *imem;
/* CPU mapping */
static u64
gk20a_instobj_addr(struct nvkm_memory *memory)
{
- return gk20a_instobj(memory)->mem.offset;
+ return (u64)gk20a_instobj(memory)->mn->offset << 12;
}
static u64
gk20a_instobj_size(struct nvkm_memory *memory)
{
- return (u64)gk20a_instobj(memory)->mem.size << 12;
+ return (u64)gk20a_instobj(memory)->mn->length << 12;
}
/*
struct nvkm_vma *vma, void *argv, u32 argc)
{
struct gk20a_instobj *node = gk20a_instobj(memory);
- nvkm_vm_map_at(vma, 0, &node->mem);
- return 0;
+ struct nvkm_vmm_map map = {
+ .memory = &node->memory,
+ .offset = offset,
+ .mem = node->mn,
+ };
+
+ if (vma->vm) {
+ struct nvkm_mem mem = {
+ .mem = node->mn,
+ .memory = &node->memory,
+ };
+ nvkm_vm_map_at(vma, 0, &mem);
+ return 0;
+ }
+
+ return nvkm_vmm_map(vmm, vma, argv, argc, &map);
}
static void *
if (unlikely(!node->base.vaddr))
goto out;
- dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr,
- node->handle, imem->attrs);
+ dma_free_attrs(dev, (u64)node->base.mn->length << PAGE_SHIFT,
+ node->base.vaddr, node->handle, imem->attrs);
out:
return node;
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
struct gk20a_instmem *imem = node->base.imem;
struct device *dev = imem->base.subdev.device->dev;
- struct nvkm_mm_node *r = node->base.mem.mem;
+ struct nvkm_mm_node *r = node->base.mn;
int i;
if (unlikely(!r))
r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
/* Unmap pages from GPU address space and free them */
- for (i = 0; i < node->base.mem.size; i++) {
+ for (i = 0; i < node->base.mn->length; i++) {
iommu_unmap(imem->domain,
(r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
node->r.offset = node->handle >> 12;
node->r.length = (npages << PAGE_SHIFT) >> 12;
- node->base.mem.offset = node->handle;
- node->base.mem.mem = &node->r;
+ node->base.mn = &node->r;
return 0;
}
/* IOMMU bit tells that an address is to be resolved through the IOMMU */
r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
- node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
- node->base.mem.mem = r;
+ node->base.mn = r;
return 0;
release_area:
node->imem = imem;
- /* present memory for being mapped using small pages */
- node->mem.size = size >> 12;
- node->mem.memtype = 0;
- node->mem.memory = &node->memory;
-
nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
- size, align, node->mem.offset);
+ size, align, (u64)node->mn->offset << 12);
return 0;
}
struct nvkm_instobj base;
struct nv50_instmem *imem;
struct nvkm_memory *ram;
- struct nvkm_vma bar;
+ struct nvkm_vma *bar;
refcount_t maps;
void *map;
struct list_head lru;
struct nvkm_memory *memory = &iobj->base.memory;
struct nvkm_subdev *subdev = &imem->base.subdev;
struct nvkm_device *device = subdev->device;
- struct nvkm_vma bar = {}, ebar;
+ struct nvkm_vma *bar = NULL, *ebar;
u64 size = nvkm_memory_size(memory);
void *emap;
int ret;
* to the possibility of recursion for page table allocation.
*/
mutex_unlock(&subdev->mutex);
- while ((ret = nvkm_vm_get(vmm, size, 12, NV_MEM_ACCESS_RW, &bar))) {
+ while ((ret = nvkm_vmm_get(vmm, 12, size, &bar))) {
/* Evict unused mappings, and keep retrying until we either
* succeed,or there's no more objects left on the LRU.
*/
nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n",
nvkm_memory_addr(&eobj->base.memory),
nvkm_memory_size(&eobj->base.memory),
- eobj->bar.offset);
+ eobj->bar->addr);
list_del_init(&eobj->lru);
ebar = eobj->bar;
- eobj->bar.node = NULL;
+ eobj->bar = NULL;
emap = eobj->map;
eobj->map = NULL;
}
if (!eobj)
break;
iounmap(emap);
- nvkm_vm_put(&ebar);
+ nvkm_vmm_put(vmm, &ebar);
}
if (ret == 0)
- ret = nvkm_memory_map(memory, 0, vmm, &bar, NULL, 0);
+ ret = nvkm_memory_map(memory, 0, vmm, bar, NULL, 0);
mutex_lock(&subdev->mutex);
- if (ret || iobj->bar.node) {
+ if (ret || iobj->bar) {
/* We either failed, or another thread beat us. */
mutex_unlock(&subdev->mutex);
- nvkm_vm_put(&bar);
+ nvkm_vmm_put(vmm, &bar);
mutex_lock(&subdev->mutex);
return;
}
/* Make the mapping visible to the host. */
iobj->bar = bar;
iobj->map = ioremap_wc(device->func->resource_addr(device, 3) +
- (u32)iobj->bar.offset, size);
+ (u32)iobj->bar->addr, size);
if (!iobj->map) {
nvkm_warn(subdev, "PRAMIN ioremap failed\n");
- nvkm_vm_put(&iobj->bar);
+ nvkm_vmm_put(vmm, &iobj->bar);
}
}
{
struct nv50_instobj *iobj = nv50_instobj(memory);
struct nvkm_instmem *imem = &iobj->imem->base;
- struct nvkm_vma bar;
+ struct nvkm_vma *bar;
void *map = map;
mutex_lock(&imem->subdev.mutex);
mutex_unlock(&imem->subdev.mutex);
if (map) {
+ struct nvkm_vmm *vmm = nvkm_bar_bar2_vmm(imem->subdev.device);
iounmap(map);
- nvkm_vm_put(&bar);
+ if (likely(vmm)) /* Can be NULL during BAR destructor. */
+ nvkm_vmm_put(vmm, &bar);
}
nvkm_memory_unref(&iobj->ram);