{
struct gk20a_instmem *imem = (void *)nvkm_instmem(object);
struct gk20a_instobj *node = (void *)object;
+ struct nvkm_device *device = imem->base.subdev.device;
unsigned long flags;
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
spin_lock_irqsave(&imem->lock, flags);
if (unlikely(imem->addr != base)) {
- nv_wr32(imem, 0x001700, base >> 16);
+ nvkm_wr32(device, 0x001700, base >> 16);
imem->addr = base;
}
- data = nv_rd32(imem, 0x700000 + addr);
+ data = nvkm_rd32(device, 0x700000 + addr);
spin_unlock_irqrestore(&imem->lock, flags);
return data;
}
{
struct gk20a_instmem *imem = (void *)nvkm_instmem(object);
struct gk20a_instobj *node = (void *)object;
+ struct nvkm_device *device = imem->base.subdev.device;
unsigned long flags;
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
spin_lock_irqsave(&imem->lock, flags);
if (unlikely(imem->addr != base)) {
- nv_wr32(imem, 0x001700, base >> 16);
+ nvkm_wr32(device, 0x001700, base >> 16);
imem->addr = base;
}
- nv_wr32(imem, 0x700000 + addr, data);
+ nvkm_wr32(device, 0x700000 + addr, data);
spin_unlock_irqrestore(&imem->lock, flags);
}
static u32
nv04_instmem_rd32(struct nvkm_object *object, u64 addr)
{
- return nv_rd32(object, 0x700000 + addr);
+ struct nvkm_instmem *imem = (void *)object;
+ return nvkm_rd32(imem->subdev.device, 0x700000 + addr);
}
static void
nv04_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data)
{
- return nv_wr32(object, 0x700000 + addr, data);
+ struct nvkm_instmem *imem = (void *)object;
+ nvkm_wr32(imem->subdev.device, 0x700000 + addr, data);
}
void
* to fit graphics contexts for every channel, the magics come
* from engine/gr/nv40.c
*/
- vs = hweight8((nv_rd32(imem, 0x001540) & 0x0000ff00) >> 8);
+ vs = hweight8((nvkm_rd32(device, 0x001540) & 0x0000ff00) >> 8);
if (device->chipset == 0x40) imem->base.reserved = 0x6aa0 * vs;
else if (device->chipset < 0x43) imem->base.reserved = 0x4f00 * vs;
else if (nv44_gr_class(imem)) imem->base.reserved = 0x4980 * vs;
{
struct nv50_instmem *imem = (void *)nvkm_instmem(object);
struct nv50_instobj *node = (void *)object;
+ struct nvkm_device *device = imem->base.subdev.device;
unsigned long flags;
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
spin_lock_irqsave(&imem->lock, flags);
if (unlikely(imem->addr != base)) {
- nv_wr32(imem, 0x001700, base >> 16);
+ nvkm_wr32(device, 0x001700, base >> 16);
imem->addr = base;
}
- data = nv_rd32(imem, 0x700000 + addr);
+ data = nvkm_rd32(device, 0x700000 + addr);
spin_unlock_irqrestore(&imem->lock, flags);
return data;
}
{
struct nv50_instmem *imem = (void *)nvkm_instmem(object);
struct nv50_instobj *node = (void *)object;
+ struct nvkm_device *device = imem->base.subdev.device;
unsigned long flags;
u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
spin_lock_irqsave(&imem->lock, flags);
if (unlikely(imem->addr != base)) {
- nv_wr32(imem, 0x001700, base >> 16);
+ nvkm_wr32(device, 0x001700, base >> 16);
imem->addr = base;
}
- nv_wr32(imem, 0x700000 + addr, data);
+ nvkm_wr32(device, 0x700000 + addr, data);
spin_unlock_irqrestore(&imem->lock, flags);
}