#ifndef __NVKM_OS_H__
#define __NVKM_OS_H__
#include <nvif/os.h>
+#define nvkm_vmm nvkm_vm
#endif
bool iomap_uncached;
};
+struct nvkm_vmm *nvkm_bar_bar1_vmm(struct nvkm_device *);
void nvkm_bar_bar2_init(struct nvkm_device *);
void nvkm_bar_bar2_fini(struct nvkm_device *);
void nvkm_bar_flush(struct nvkm_bar *);
struct nvkm_vm *nvkm_bar_kmap(struct nvkm_bar *);
-int nvkm_bar_umap(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
int nv50_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int g84_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
reg->bus.base = device->func->resource_addr(device, 1);
reg->bus.is_iomem = true;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- struct nvkm_bar *bar = nvxx_bar(&drm->client.device);
+ struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
int page_shift = 12;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
page_shift = mem->page_shift;
- ret = nvkm_bar_umap(bar, mem->size << 12, page_shift,
- &mem->bar_vma);
+ ret = nvkm_vm_get(bar, mem->size << 12, page_shift,
+ NV_MEM_ACCESS_RW, &mem->bar_vma);
if (ret)
return ret;
#include <core/enum.h>
#include <core/gpuobj.h>
#include <subdev/bar.h>
+#include <subdev/fb.h>
#include <engine/sw.h>
#include <nvif/class.h>
struct gf100_fifo *fifo = gf100_fifo(base);
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
+ struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
int ret;
/* Determine number of PBDMAs by checking valid enable bits. */
if (ret)
return ret;
- ret = nvkm_bar_umap(device->bar, 128 * 0x1000, 12, &fifo->user.bar);
+ ret = nvkm_vm_get(bar, nvkm_memory_size(fifo->user.mem), 12,
+ NV_MEM_ACCESS_RW, &fifo->user.bar);
if (ret)
return ret;
#include <core/client.h>
#include <core/gpuobj.h>
#include <subdev/bar.h>
+#include <subdev/fb.h>
#include <subdev/timer.h>
#include <subdev/top.h>
#include <engine/sw.h>
struct gk104_fifo *fifo = gk104_fifo(base);
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
+ struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
int engn, runl, pbid, ret, i, j;
enum nvkm_devidx engidx;
u32 *map;
if (ret)
return ret;
- ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12,
- &fifo->user.bar);
+ ret = nvkm_vm_get(bar, nvkm_memory_size(fifo->user.mem), 12,
+ NV_MEM_ACCESS_RW, &fifo->user.bar);
if (ret)
return ret;
return NULL;
}
-int
-nvkm_bar_umap(struct nvkm_bar *bar, u64 size, int type, struct nvkm_vma *vma)
+struct nvkm_vmm *
+nvkm_bar_bar1_vmm(struct nvkm_device *device)
{
- return bar->func->umap(bar, size, type, vma);
+ return device->bar->func->bar1.vmm(device->bar);
}
void
.bar1.init = nv50_bar_bar1_init,
.bar1.fini = nv50_bar_bar1_fini,
.bar1.wait = nv50_bar_bar1_wait,
+ .bar1.vmm = nv50_bar_bar1_vmm,
.bar2.init = nv50_bar_bar2_init,
.bar2.fini = nv50_bar_bar2_fini,
.bar2.wait = nv50_bar_bar1_wait,
.kmap = nv50_bar_kmap,
- .umap = nv50_bar_umap,
.flush = g84_bar_flush,
};
return gf100_bar(base)->bar[0].vm;
}
-int
-gf100_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
+struct nvkm_vmm *
+gf100_bar_bar1_vmm(struct nvkm_bar *base)
{
- struct gf100_bar *bar = gf100_bar(base);
- return nvkm_vm_get(bar->bar[1].vm, size, type, NV_MEM_ACCESS_RW, vma);
+ return gf100_bar(base)->bar[1].vm;
}
void
.bar1.init = gf100_bar_bar1_init,
.bar1.fini = gf100_bar_bar1_fini,
.bar1.wait = gf100_bar_bar1_wait,
+ .bar1.vmm = gf100_bar_bar1_vmm,
.bar2.init = gf100_bar_bar2_init,
.bar2.fini = gf100_bar_bar2_fini,
.bar2.wait = gf100_bar_bar1_wait,
.kmap = gf100_bar_kmap,
- .umap = gf100_bar_umap,
.flush = g84_bar_flush,
};
int gf100_bar_oneinit(struct nvkm_bar *);
void gf100_bar_bar1_init(struct nvkm_bar *);
void gf100_bar_bar1_wait(struct nvkm_bar *);
+struct nvkm_vmm *gf100_bar_bar1_vmm(struct nvkm_bar *);
void gf100_bar_bar2_init(struct nvkm_bar *);
-int gf100_bar_umap(struct nvkm_bar *, u64, int, struct nvkm_vma *);
#endif
.bar1.init = gf100_bar_bar1_init,
.bar1.fini = gf100_bar_bar1_fini,
.bar1.wait = gf100_bar_bar1_wait,
- .umap = gf100_bar_umap,
+ .bar1.vmm = gf100_bar_bar1_vmm,
.flush = g84_bar_flush,
};
return nv50_bar(base)->bar2_vm;
}
-int
-nv50_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
-{
- struct nv50_bar *bar = nv50_bar(base);
- return nvkm_vm_get(bar->bar1_vm, size, type, NV_MEM_ACCESS_RW, vma);
-}
-
static void
nv50_bar_flush(struct nvkm_bar *base)
{
spin_unlock_irqrestore(&bar->base.lock, flags);
}
+struct nvkm_vmm *
+nv50_bar_bar1_vmm(struct nvkm_bar *base)
+{
+ return nv50_bar(base)->bar1_vm;
+}
+
void
nv50_bar_bar1_wait(struct nvkm_bar *base)
{
.bar1.init = nv50_bar_bar1_init,
.bar1.fini = nv50_bar_bar1_fini,
.bar1.wait = nv50_bar_bar1_wait,
+ .bar1.vmm = nv50_bar_bar1_vmm,
.bar2.init = nv50_bar_bar2_init,
.bar2.fini = nv50_bar_bar2_fini,
.bar2.wait = nv50_bar_bar1_wait,
.kmap = nv50_bar_kmap,
- .umap = nv50_bar_umap,
.flush = nv50_bar_flush,
};
void nv50_bar_init(struct nvkm_bar *);
void nv50_bar_bar1_init(struct nvkm_bar *);
void nv50_bar_bar1_wait(struct nvkm_bar *);
+struct nvkm_vmm *nv50_bar_bar1_vmm(struct nvkm_bar *);
void nv50_bar_bar2_init(struct nvkm_bar *);
struct nvkm_vm *nv50_bar_kmap(struct nvkm_bar *);
-int nv50_bar_umap(struct nvkm_bar *, u64, int, struct nvkm_vma *);
void nv50_bar_unmap(struct nvkm_bar *, struct nvkm_vma *);
#endif
void (*init)(struct nvkm_bar *);
void (*fini)(struct nvkm_bar *);
void (*wait)(struct nvkm_bar *);
+ struct nvkm_vmm *(*vmm)(struct nvkm_bar *);
} bar1, bar2;
struct nvkm_vm *(*kmap)(struct nvkm_bar *);
- int (*umap)(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
void (*flush)(struct nvkm_bar *);
};