return imem->func->wr32(imem, addr, data);
}
+void
+nvkm_instmem_boot(struct nvkm_instmem *imem)
+{
+ /* Separate bootstrapped objects from normal list, as we need
+ * to make sure they're accessed with the slowpath on suspend
+ * and resume.
+ */
+ struct nvkm_instobj *iobj, *itmp;
+ spin_lock(&imem->lock);
+ list_for_each_entry_safe(iobj, itmp, &imem->list, head) {
+ list_move_tail(&iobj->head, &imem->boot);
+ }
+ spin_unlock(&imem->lock);
+}
+
static int
nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
{
if (ret)
return ret;
}
+
+ list_for_each_entry(iobj, &imem->boot, head) {
+ int ret = nvkm_instobj_save(iobj);
+ if (ret)
+ return ret;
+ }
}
if (imem->func->fini)
struct nvkm_instmem *imem = nvkm_instmem(subdev);
struct nvkm_instobj *iobj;
+ list_for_each_entry(iobj, &imem->boot, head) {
+ if (iobj->suspend)
+ nvkm_instobj_load(iobj);
+ }
+
list_for_each_entry(iobj, &imem->list, head) {
if (iobj->suspend)
nvkm_instobj_load(iobj);
imem->func = func;
spin_lock_init(&imem->lock);
INIT_LIST_HEAD(&imem->list);
+ INIT_LIST_HEAD(&imem->boot);
}