/* Aperture/GM space definitions for GVT device */
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
-#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
+#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
#define gvt_ggtt_sz(gvt) \
if (!ap)
return -ENOMEM;
- ap->ranges[0].base = ggtt->mappable_base;
+ ap->ranges[0].base = ggtt->gmadr.start;
ap->ranges[0].size = ggtt->mappable_end;
primary =
page_base += offset & PAGE_MASK;
}
- if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
+ if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
user_data, page_length)) {
ret = -EFAULT;
break;
* If the object is non-shmem backed, we retry again with the
* path that handles page fault.
*/
- if (ggtt_write(&ggtt->mappable, page_base, page_offset,
+ if (ggtt_write(&ggtt->iomap, page_base, page_offset,
user_data, page_length)) {
ret = -EFAULT;
break;
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
+ (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
- &ggtt->mappable);
+ &ggtt->iomap);
if (ret)
goto err_fence;
offset += page << PAGE_SHIFT;
}
- vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->mappable,
+ vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
offset);
cache->page = page;
cache->vaddr = (unsigned long)vaddr;
mutex_unlock(&dev_priv->drm.struct_mutex);
arch_phys_wc_del(ggtt->mtrr);
- io_mapping_fini(&ggtt->mappable);
+ io_mapping_fini(&ggtt->iomap);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
int err;
/* TODO: We're not aware of mappable constraints on gen8 yet */
- ggtt->mappable_base = pci_resource_start(pdev, 2);
- ggtt->mappable_end = pci_resource_len(pdev, 2);
+ ggtt->gmadr =
+ (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
if (!err)
u16 snb_gmch_ctl;
int err;
- ggtt->mappable_base = pci_resource_start(pdev, 2);
- ggtt->mappable_end = pci_resource_len(pdev, 2);
+ ggtt->gmadr =
+ (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
/* 64/512MB is the current min/max we actually know of, but this is just
* a coarse sanity check.
static int i915_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *dev_priv = ggtt->base.i915;
+ phys_addr_t gmadr_base;
int ret;
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
}
intel_gtt_get(&ggtt->base.total,
- &ggtt->mappable_base,
+ &gmadr_base,
&ggtt->mappable_end);
+ ggtt->gmadr =
+ (struct resource) DEFINE_RES_MEM(gmadr_base,
+ ggtt->mappable_end);
+
ggtt->do_idle_maps = needs_idle_maps(dev_priv);
ggtt->base.insert_page = i915_ggtt_insert_page;
ggtt->base.insert_entries = i915_ggtt_insert_entries;
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %lluM\n",
ggtt->base.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("GTT stolen size = %lluM\n",
(u64)resource_size(&intel_graphics_stolen_res) >> 20);
if (intel_vtd_active())
ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex);
- if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
- dev_priv->ggtt.mappable_base,
+ if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
+ dev_priv->ggtt.gmadr.start,
dev_priv->ggtt.mappable_end)) {
ret = -EIO;
goto out_gtt_cleanup;
}
- ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
+ ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
/*
* Initialise stolen early so that we may reserve preallocated
*/
struct i915_ggtt {
struct i915_address_space base;
- struct io_mapping mappable; /* Mapping to our CPU mappable region */
- phys_addr_t mappable_base; /* PA of our GMADR */
+ struct io_mapping iomap; /* Mapping to our CPU mappable region */
+ struct resource gmadr; /* GMADR resource */
u64 mappable_end; /* End offset that we can CPU map */
/* Stolen memory is segmented in hardware with different portions
ggtt->base.insert_page(&ggtt->base, dma, slot,
I915_CACHE_NONE, 0);
- s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
+ s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
ret = compress_page(&compress, (void __force *)s, dst);
io_mapping_unmap_atomic(s);
ptr = vma->iomap;
if (ptr == NULL) {
- ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
+ ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
vma->node.start,
vma->node.size);
if (ptr == NULL) {
dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
}
- dev->mode_config.fb_base = ggtt->mappable_base;
+ dev->mode_config.fb_base = ggtt->gmadr.start;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
INTEL_INFO(dev_priv)->num_pipes,
if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_wc(&dev_priv->ggtt.mappable,
+ regs = io_mapping_map_wc(&dev_priv->ggtt.iomap,
overlay->flip_addr,
PAGE_SIZE);
regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.mappable,
+ regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap,
overlay->flip_addr);
return regs;
i915_gem_object_get_dma_address(obj, 0),
offset, I915_CACHE_NONE, 0);
- vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
+ vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
iowrite32(n, vaddr + n);
io_mapping_unmap_atomic(vaddr);
i915_gem_object_get_dma_address(obj, 0),
offset, I915_CACHE_NONE, 0);
- vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
+ vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
val = ioread32(vaddr + n);
io_mapping_unmap_atomic(vaddr);
ggtt->base.i915 = i915;
- ggtt->mappable_base = 0;
- ggtt->mappable_end = 2048 * PAGE_SIZE;
+ ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
ggtt->base.total = 4096 * PAGE_SIZE;
ggtt->base.clear_range = nop_clear_range;