drm/i915: make mappable struct resource centric
authorMatthew Auld <matthew.auld@intel.com>
Mon, 11 Dec 2017 15:18:20 +0000 (15:18 +0000)
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Tue, 12 Dec 2017 10:30:21 +0000 (12:30 +0200)
Now that we are using struct resource to track the stolen region, it is
more convenient if we track the mappable region in a resource as well.

v2: prefer iomap and gmadr naming scheme
    prefer DEFINE_RES_MEM

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171211151822.20953-8-matthew.auld@intel.com
12 files changed:
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
drivers/gpu/drm/i915/selftests/mock_gtt.c

index 77df9bad5dea1d6a8dc76293e1ddd12a947b8cc8..103910a24e4b275dccc1b4a0f0a425a90e4a7b84 100644 (file)
@@ -348,7 +348,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
 
 /* Aperture/GM space definitions for GVT device */
 #define gvt_aperture_sz(gvt)     (gvt->dev_priv->ggtt.mappable_end)
-#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
+#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
 
 #define gvt_ggtt_gm_sz(gvt)      (gvt->dev_priv->ggtt.base.total)
 #define gvt_ggtt_sz(gvt) \
index 5b1fd5f1defb6999ab272ef7cd5d6472b26243b9..54a8fca7e7b2847bea39613a1bce31ada35672fe 100644 (file)
@@ -726,7 +726,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
        if (!ap)
                return -ENOMEM;
 
-       ap->ranges[0].base = ggtt->mappable_base;
+       ap->ranges[0].base = ggtt->gmadr.start;
        ap->ranges[0].size = ggtt->mappable_end;
 
        primary =
index fcc9b53864f0ad4646774b24f34be27f07c0a5ba..e89496aec85703e9f947a4c80c5873fc16e3f65e 100644 (file)
@@ -1116,7 +1116,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
                        page_base += offset & PAGE_MASK;
                }
 
-               if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
+               if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
                                  user_data, page_length)) {
                        ret = -EFAULT;
                        break;
@@ -1324,7 +1324,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                 * If the object is non-shmem backed, we retry again with the
                 * path that handles page fault.
                 */
-               if (ggtt_write(&ggtt->mappable, page_base, page_offset,
+               if (ggtt_write(&ggtt->iomap, page_base, page_offset,
                               user_data, page_length)) {
                        ret = -EFAULT;
                        break;
@@ -1967,9 +1967,9 @@ int i915_gem_fault(struct vm_fault *vmf)
        /* Finally, remap it using the new GTT offset */
        ret = remap_io_mapping(area,
                               area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
-                              (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
+                              (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
                               min_t(u64, vma->size, area->vm_end - area->vm_start),
-                              &ggtt->mappable);
+                              &ggtt->iomap);
        if (ret)
                goto err_fence;
 
index 70ccd63cbf8ee101caa6fdfc43b9e3bc49c2b5f5..4401068ff468ad36aef1d5df5277e2f008f01bb3 100644 (file)
@@ -1012,7 +1012,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
                offset += page << PAGE_SHIFT;
        }
 
-       vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->mappable,
+       vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
                                                         offset);
        cache->page = page;
        cache->vaddr = (unsigned long)vaddr;
index 7a5302318d315e53ecfac0fbf6b58708bb887c54..fbc1f467273ae2f7aa2300ae16e392ec77e3eb6d 100644 (file)
@@ -2912,7 +2912,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
        arch_phys_wc_del(ggtt->mtrr);
-       io_mapping_fini(&ggtt->mappable);
+       io_mapping_fini(&ggtt->iomap);
 }
 
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -3288,8 +3288,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
        int err;
 
        /* TODO: We're not aware of mappable constraints on gen8 yet */
-       ggtt->mappable_base = pci_resource_start(pdev, 2);
-       ggtt->mappable_end = pci_resource_len(pdev, 2);
+       ggtt->gmadr =
+               (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
+                                                pci_resource_len(pdev, 2));
+       ggtt->mappable_end = resource_size(&ggtt->gmadr);
 
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
        if (!err)
@@ -3343,8 +3345,10 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
        u16 snb_gmch_ctl;
        int err;
 
-       ggtt->mappable_base = pci_resource_start(pdev, 2);
-       ggtt->mappable_end = pci_resource_len(pdev, 2);
+       ggtt->gmadr =
+               (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
+                                                pci_resource_len(pdev, 2));
+       ggtt->mappable_end = resource_size(&ggtt->gmadr);
 
        /* 64/512MB is the current min/max we actually know of, but this is just
         * a coarse sanity check.
@@ -3397,6 +3401,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
 static int i915_gmch_probe(struct i915_ggtt *ggtt)
 {
        struct drm_i915_private *dev_priv = ggtt->base.i915;
+       phys_addr_t gmadr_base;
        int ret;
 
        ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
@@ -3406,9 +3411,13 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
        }
 
        intel_gtt_get(&ggtt->base.total,
-                     &ggtt->mappable_base,
+                     &gmadr_base,
                      &ggtt->mappable_end);
 
+       ggtt->gmadr =
+               (struct resource) DEFINE_RES_MEM(gmadr_base,
+                                                ggtt->mappable_end);
+
        ggtt->do_idle_maps = needs_idle_maps(dev_priv);
        ggtt->base.insert_page = i915_ggtt_insert_page;
        ggtt->base.insert_entries = i915_ggtt_insert_entries;
@@ -3476,7 +3485,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
        /* GMADR is the PCI mmio aperture into the global GTT. */
        DRM_INFO("Memory usable by graphics device = %lluM\n",
                 ggtt->base.total >> 20);
-       DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
+       DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
        DRM_DEBUG_DRIVER("GTT stolen size = %lluM\n",
                         (u64)resource_size(&intel_graphics_stolen_res) >> 20);
        if (intel_vtd_active())
@@ -3507,14 +3516,14 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
                ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
-       if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
-                               dev_priv->ggtt.mappable_base,
+       if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
+                               dev_priv->ggtt.gmadr.start,
                                dev_priv->ggtt.mappable_end)) {
                ret = -EIO;
                goto out_gtt_cleanup;
        }
 
-       ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
+       ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
 
        /*
         * Initialise stolen early so that we may reserve preallocated
index db20c72ecfc8aeff7d8146015f4da3bddf8da3bb..4a17ce36281ae894901b30cca04b430c9715b2ef 100644 (file)
@@ -368,9 +368,9 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
  */
 struct i915_ggtt {
        struct i915_address_space base;
-       struct io_mapping mappable;     /* Mapping to our CPU mappable region */
 
-       phys_addr_t mappable_base;      /* PA of our GMADR */
+       struct io_mapping iomap;        /* Mapping to our CPU mappable region */
+       struct resource gmadr;          /* GMADR resource */
        u64 mappable_end;               /* End offset that we can CPU map */
 
        /* Stolen memory is segmented in hardware with different portions
index 48418fb810660472e78ae8d130b45f84d2343e2d..aba50aa613f1f93780d6a4851bf87dca3506f73c 100644 (file)
@@ -956,7 +956,7 @@ i915_error_object_create(struct drm_i915_private *i915,
                ggtt->base.insert_page(&ggtt->base, dma, slot,
                                       I915_CACHE_NONE, 0);
 
-               s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
+               s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
                ret = compress_page(&compress, (void  __force *)s, dst);
                io_mapping_unmap_atomic(s);
 
index 92c11e70fea48c4be65aabc1d0ef69ab7469e666..e0e7c48f45dc3ef10f5c558bd7cb4a655acfe9c0 100644 (file)
@@ -311,7 +311,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 
        ptr = vma->iomap;
        if (ptr == NULL) {
-               ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
+               ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
                                        vma->node.start,
                                        vma->node.size);
                if (ptr == NULL) {
index f0a8686f051360ca324d01ea40b6509ee5df9d38..f9ff1c7fa0542691e8658167b880e6c5c9fe3a38 100644 (file)
@@ -14595,7 +14595,7 @@ int intel_modeset_init(struct drm_device *dev)
                dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
        }
 
-       dev->mode_config.fb_base = ggtt->mappable_base;
+       dev->mode_config.fb_base = ggtt->gmadr.start;
 
        DRM_DEBUG_KMS("%d display pipe%s available.\n",
                      INTEL_INFO(dev_priv)->num_pipes,
index 1b397b41cb4fc17efad5aeeafab1d9e8c9cd05e9..41e9465d44a8128c0463d024be40066212819d79 100644 (file)
@@ -219,7 +219,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
        if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
                regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
        else
-               regs = io_mapping_map_wc(&dev_priv->ggtt.mappable,
+               regs = io_mapping_map_wc(&dev_priv->ggtt.iomap,
                                         overlay->flip_addr,
                                         PAGE_SIZE);
 
@@ -1508,7 +1508,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
                regs = (struct overlay_registers __iomem *)
                        overlay->reg_bo->phys_handle->vaddr;
        else
-               regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.mappable,
+               regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap,
                                                overlay->flip_addr);
 
        return regs;
index 6491cf0a4f4640e88baf54e8c28b1aa1cdfd4aa8..4a28d713a7d8236fa2095b39977c1e727a859ad6 100644 (file)
@@ -1074,7 +1074,7 @@ static int igt_ggtt_page(void *arg)
                                       i915_gem_object_get_dma_address(obj, 0),
                                       offset, I915_CACHE_NONE, 0);
 
-               vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
+               vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
                iowrite32(n, vaddr + n);
                io_mapping_unmap_atomic(vaddr);
 
@@ -1092,7 +1092,7 @@ static int igt_ggtt_page(void *arg)
                                       i915_gem_object_get_dma_address(obj, 0),
                                       offset, I915_CACHE_NONE, 0);
 
-               vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
+               vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
                val = ioread32(vaddr + n);
                io_mapping_unmap_atomic(vaddr);
 
index 336e1afb250f68147da22be96e8d9b87a3e76595..e96873f96116ecddabfac25b1804b39be9daf2b4 100644 (file)
@@ -110,8 +110,8 @@ void mock_init_ggtt(struct drm_i915_private *i915)
 
        ggtt->base.i915 = i915;
 
-       ggtt->mappable_base = 0;
-       ggtt->mappable_end = 2048 * PAGE_SIZE;
+       ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
+       ggtt->mappable_end = resource_size(&ggtt->gmadr);
        ggtt->base.total = 4096 * PAGE_SIZE;
 
        ggtt->base.clear_range = nop_clear_range;