drm/nouveau/imem: separate pre-BAR2-bootstrap objects from the rest
authorBen Skeggs <bskeggs@redhat.com>
Tue, 31 Oct 2017 17:56:19 +0000 (03:56 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Thu, 2 Nov 2017 03:32:20 +0000 (13:32 +1000)
These will require slow-path access during suspend/resume.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h

index 40f845e312723744c52330bdc29b24666281b9f0..8111c0c3c5ecd9c2bc2f1593228a6be89641eafe 100644 (file)
@@ -9,6 +9,7 @@ struct nvkm_instmem {
 
        spinlock_t lock;
        struct list_head list;
+       struct list_head boot;
        u32 reserved;
 
        struct nvkm_memory *vbios;
index 36b3424149b3ddd0467566f2313b08575c798781..8fc63ec20d6ef8733d976d32cfc48b8b85906d24 100644 (file)
@@ -129,6 +129,21 @@ nvkm_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
        return imem->func->wr32(imem, addr, data);
 }
 
+void
+nvkm_instmem_boot(struct nvkm_instmem *imem)
+{
+       /* Separate bootstrapped objects from normal list, as we need
+        * to make sure they're accessed with the slowpath on suspend
+        * and resume.
+        */
+       struct nvkm_instobj *iobj, *itmp;
+       spin_lock(&imem->lock);
+       list_for_each_entry_safe(iobj, itmp, &imem->list, head) {
+               list_move_tail(&iobj->head, &imem->boot);
+       }
+       spin_unlock(&imem->lock);
+}
+
 static int
 nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
 {
@@ -141,6 +156,12 @@ nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
                        if (ret)
                                return ret;
                }
+
+               list_for_each_entry(iobj, &imem->boot, head) {
+                       int ret = nvkm_instobj_save(iobj);
+                       if (ret)
+                               return ret;
+               }
        }
 
        if (imem->func->fini)
@@ -155,6 +176,11 @@ nvkm_instmem_init(struct nvkm_subdev *subdev)
        struct nvkm_instmem *imem = nvkm_instmem(subdev);
        struct nvkm_instobj *iobj;
 
+       list_for_each_entry(iobj, &imem->boot, head) {
+               if (iobj->suspend)
+                       nvkm_instobj_load(iobj);
+       }
+
        list_for_each_entry(iobj, &imem->list, head) {
                if (iobj->suspend)
                        nvkm_instobj_load(iobj);
@@ -198,4 +224,5 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
        imem->func = func;
        spin_lock_init(&imem->lock);
        INIT_LIST_HEAD(&imem->list);
+       INIT_LIST_HEAD(&imem->boot);
 }
index 374df1ebe2e858b42ae3d9709f5c8679b54fc0ec..be5670f9fefaf7025231487eafac6e0cdd8103de 100644 (file)
@@ -271,6 +271,7 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
        }
 
        nv50_instobj_kmap(iobj, vmm);
+       nvkm_instmem_boot(imem);
        mutex_unlock(&imem->subdev.mutex);
 }
 
index e7515d96b31ff2022d2b8e3de9c7e813e4bded4b..44651ca42d5297761557c99a813589ada6580814 100644 (file)
@@ -16,6 +16,7 @@ struct nvkm_instmem_func {
 
 void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
                       int index, struct nvkm_instmem *);
+void nvkm_instmem_boot(struct nvkm_instmem *);
 
 #include <core/memory.h>