nvkm_bar_init(struct nvkm_subdev *subdev)
{
struct nvkm_bar *bar = nvkm_bar(subdev);
- nvkm_bar_bar2_init(subdev->device);
bar->func->bar1.init(bar);
bar->func->bar1.wait(bar);
if (bar->func->init)
{
struct nvkm_memory *memory = &iobj->memory;
const u64 size = nvkm_memory_size(memory);
+ void __iomem *map;
int i;
- for (i = 0; i < size; i += 4)
- nvkm_wo32(memory, i, iobj->suspend[i / 4]);
+ if (!(map = nvkm_kmap(memory))) {
+ for (i = 0; i < size; i += 4)
+ nvkm_wo32(memory, i, iobj->suspend[i / 4]);
+ } else {
+ memcpy_toio(map, iobj->suspend, size);
+ }
+ nvkm_done(memory);
+
kvfree(iobj->suspend);
iobj->suspend = NULL;
}
nvkm_instobj_load(iobj);
}
+ nvkm_bar_bar2_init(subdev->device);
+
list_for_each_entry(iobj, &imem->list, head) {
if (iobj->suspend)
nvkm_instobj_load(iobj);
}
/* Switch back to NULL accessors when last map is gone. */
- iobj->base.memory.ptrs = &nv50_instobj_slow;
+ iobj->base.memory.ptrs = NULL;
mutex_unlock(&subdev->mutex);
}
}
*pmemory = &iobj->base.memory;
nvkm_instobj_ctor(&nv50_instobj_func, &imem->base, &iobj->base);
- iobj->base.memory.ptrs = &nv50_instobj_slow;
iobj->imem = imem;
refcount_set(&iobj->maps, 0);
INIT_LIST_HEAD(&iobj->lru);