void i915_gem_release(struct drm_device *dev, struct drm_file *file);
u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size,
- int tiling_mode);
+ int tiling_mode, unsigned int stride);
u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
- int tiling_mode, bool fenced);
+ int tiling_mode, unsigned int stride,
+ bool fenced);
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
* @dev_priv: i915 device
* @size: object size
* @tiling_mode: tiling mode
+ * @stride: tiling stride
*
* Return the required global GTT size for an object, taking into account
* potential fence register mapping.
*/
u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
- u64 size, int tiling_mode)
+ u64 size, int tiling_mode, unsigned int stride)
{
u64 ggtt_size;
- GEM_BUG_ON(size == 0);
+ GEM_BUG_ON(!size);
- if (INTEL_GEN(dev_priv) >= 4 ||
- tiling_mode == I915_TILING_NONE)
+ if (tiling_mode == I915_TILING_NONE)
return size;
+ GEM_BUG_ON(!stride);
+
+ if (INTEL_GEN(dev_priv) >= 4) {
+ stride *= i915_gem_tile_height(tiling_mode);
+ GEM_BUG_ON(stride & 4095);
+ return roundup(size, stride);
+ }
+
/* Previous chips need a power-of-two fence region when tiling */
if (IS_GEN3(dev_priv))
ggtt_size = 1024*1024;
* @dev_priv: i915 device
* @size: object size
* @tiling_mode: tiling mode
+ * @stride: tiling stride
* @fenced: is fenced alignment required or not
*
* Return the required global GTT alignment for an object, taking into account
* potential fence register mapping.
*/
u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
- int tiling_mode, bool fenced)
+ int tiling_mode, unsigned int stride,
+ bool fenced)
{
- GEM_BUG_ON(size == 0);
+ GEM_BUG_ON(!size);
/*
* Minimum alignment is 4k (GTT page size), but might be greater
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
- return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
+ return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode, stride);
}
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
u32 fence_size;
fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
- i915_gem_object_get_tiling(obj));
+ i915_gem_object_get_tiling(obj),
+ i915_gem_object_get_stride(obj));
/* If the required space is larger than the available
* aperture, we will not able to find a slot for the
* object and unbinding the object now will be in
return true;
}
-static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode)
+static bool i915_vma_fence_prepare(struct i915_vma *vma,
+ int tiling_mode, unsigned int stride)
{
struct drm_i915_private *dev_priv = vma->vm->i915;
u32 size;
return false;
}
- size = i915_gem_get_ggtt_size(dev_priv, vma->size, tiling_mode);
+ size = i915_gem_get_ggtt_size(dev_priv, vma->size, tiling_mode, stride);
if (vma->node.size < size)
return false;
/* Make the current GTT allocation valid for the change in tiling. */
static int
-i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode)
+i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
+ int tiling_mode, unsigned int stride)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
int ret;
if (tiling_mode == I915_TILING_NONE)
return 0;
- if (INTEL_GEN(dev_priv) >= 4)
- return 0;
-
list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (i915_vma_fence_prepare(vma, tiling_mode))
+ if (i915_vma_fence_prepare(vma, tiling_mode, stride))
continue;
ret = i915_vma_unbind(vma);
* whilst executing a fenced command for an untiled object.
*/
- err = i915_gem_object_fence_prepare(obj, args->tiling_mode);
+ err = i915_gem_object_fence_prepare(obj,
+ args->tiling_mode,
+ args->stride);
if (!err) {
struct i915_vma *vma;
fence_size = i915_gem_get_ggtt_size(dev_priv,
vma->size,
- i915_gem_object_get_tiling(obj));
+ i915_gem_object_get_tiling(obj),
+ i915_gem_object_get_stride(obj));
fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
vma->size,
i915_gem_object_get_tiling(obj),
+ i915_gem_object_get_stride(obj),
true);
+ GEM_BUG_ON(!is_power_of_2(fence_alignment));
fenceable = (vma->node.size == fence_size &&
(vma->node.start & (fence_alignment - 1)) == 0);
size = max(size, vma->size);
if (flags & PIN_MAPPABLE)
size = i915_gem_get_ggtt_size(dev_priv, size,
- i915_gem_object_get_tiling(obj));
+ i915_gem_object_get_tiling(obj),
+ i915_gem_object_get_stride(obj));
alignment = max(max(alignment, vma->display_alignment),
i915_gem_get_ggtt_alignment(dev_priv, size,
i915_gem_object_get_tiling(obj),
+ i915_gem_object_get_stride(obj),
flags & PIN_MAPPABLE));
+ GEM_BUG_ON(!is_power_of_2(alignment));
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;