struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (vma->is_ggtt && drm_mm_node_allocated(&vma->node))
+ if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
size += vma->node.size;
}
continue;
seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
- vma->is_ggtt ? "g" : "pp",
+ i915_vma_is_ggtt(vma) ? "g" : "pp",
vma->node.start, vma->node.size);
- if (vma->is_ggtt)
+ if (i915_vma_is_ggtt(vma))
seq_printf(m, ", type: %u", vma->ggtt_view.type);
seq_puts(m, ")");
}
if (!drm_mm_node_allocated(&vma->node))
continue;
- if (vma->is_ggtt) {
+ if (i915_vma_is_ggtt(vma)) {
stats->global += vma->node.size;
} else {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
GEM_BUG_ON(obj->bind_count == 0);
GEM_BUG_ON(!obj->pages);
- if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
+ if (i915_vma_is_ggtt(vma) &&
+ vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
i915_gem_object_finish_gtt(obj);
/* release the fence reg _after_ flushing */
trace_i915_vma_unbind(vma);
vma->vm->unbind_vma(vma);
}
- vma->bound = 0;
+ vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
- if (vma->is_ggtt) {
+ if (i915_vma_is_ggtt(vma)) {
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
obj->map_and_fenceable = false;
} else if (vma->ggtt_view.pages) {
i915_gem_object_unpin_pages(obj);
destroy:
- if (unlikely(vma->closed))
+ if (unlikely(i915_vma_is_closed(vma)))
i915_vma_destroy(vma);
return 0;
u64 min_alignment;
int ret;
- GEM_BUG_ON(vma->bound);
+ GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
size = max(size, vma->size);
int
i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
- unsigned int bound = vma->bound;
+ unsigned int bound;
int ret;
GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
- GEM_BUG_ON((flags & PIN_GLOBAL) && !vma->is_ggtt);
+ GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
- if (WARN_ON(i915_vma_pin_count(vma) == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
+ bound = vma->flags;
+ if (WARN_ON((bound & I915_VMA_PIN_MASK) == I915_VMA_PIN_MASK))
return -EBUSY;
/* Pin early to prevent the shrinker/eviction logic from destroying
*/
__i915_vma_pin(vma);
- if (!bound) {
+ if ((bound & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)) == 0) {
ret = i915_vma_insert(vma, size, alignment, flags);
if (ret)
goto err;
if (ret)
goto err;
- if ((bound ^ vma->bound) & GLOBAL_BIND)
+ if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
__i915_vma_set_map_and_fenceable(vma);
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
* unbound now.
*/
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
- GEM_BUG_ON(!vma->is_ggtt);
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(i915_vma_is_active(vma));
- vma->pin_count = 0;
+ vma->flags &= ~I915_VMA_PIN_MASK;
i915_vma_close(vma);
}
GEM_BUG_ON(obj->bind_count);
GEM_BUG_ON(!view);
list_for_each_entry(vma, &obj->vma_list, obj_link)
- if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
+ if (i915_vma_is_ggtt(vma) &&
+ i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma;
return NULL;
}
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
list_for_each_entry(vma, &o->vma_list, obj_link) {
- if (vma->is_ggtt &&
+ if (i915_vma_is_ggtt(vma) &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm)
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link)
- if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
+ if (i915_vma_is_ggtt(vma) &&
+ i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start;
WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link) {
- if (vma->is_ggtt &&
+ if (i915_vma_is_ggtt(vma) &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link)
- if (vma->is_ggtt &&
+ if (i915_vma_is_ggtt(vma) &&
i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
return true;
GEM_BUG_ON(list_empty(&o->vma_list));
list_for_each_entry(vma, &o->vma_list, obj_link) {
- if (vma->is_ggtt &&
+ if (i915_vma_is_ggtt(vma) &&
vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
return vma->node.size;
}
struct i915_vma *vma, *vn;
list_for_each_entry_safe(vma, vn, *phase, vm_link)
- if (!vma->closed)
+ if (!i915_vma_is_closed(vma))
i915_vma_close(vma);
}
}
if (entry->relocation_count == 0)
return false;
- if (!vma->is_ggtt)
+ if (!i915_vma_is_ggtt(vma))
return false;
/* See also use_cpu_reloc() */
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
struct drm_i915_gem_object *obj = vma->obj;
- WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
+ WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
+ !i915_vma_is_ggtt(vma));
if (entry->alignment &&
vma->node.start & (entry->alignment - 1))
* GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
* upgrade to both bound if we bind either to avoid double-binding.
*/
- vma->bound |= GLOBAL_BIND | LOCAL_BIND;
+ vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
return 0;
}
pte_flags |= PTE_READ_ONLY;
- if (flags & GLOBAL_BIND) {
+ if (flags & I915_VMA_GLOBAL_BIND) {
vma->vm->insert_entries(vma->vm,
vma->ggtt_view.pages,
vma->node.start,
cache_level, pte_flags);
}
- if (flags & LOCAL_BIND) {
+ if (flags & I915_VMA_LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt =
to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
const u64 size = min(vma->size, vma->node.size);
- if (vma->bound & GLOBAL_BIND)
+ if (vma->flags & I915_VMA_GLOBAL_BIND)
vma->vm->clear_range(vma->vm,
vma->node.start, size,
true);
- if (vma->bound & LOCAL_BIND && appgtt)
+ if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
appgtt->base.clear_range(&appgtt->base,
vma->node.start, size,
true);
return;
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- if (unlikely(vma->closed && !i915_vma_is_pinned(vma)))
+ if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
WARN_ON(i915_vma_unbind(vma));
}
{
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(i915_vma_is_active(vma));
- GEM_BUG_ON(!vma->closed);
+ GEM_BUG_ON(!i915_vma_is_closed(vma));
list_del(&vma->vm_link);
- if (!vma->is_ggtt)
+ if (!i915_vma_is_ggtt(vma))
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
void i915_vma_close(struct i915_vma *vma)
{
- GEM_BUG_ON(vma->closed);
- vma->closed = true;
+ GEM_BUG_ON(i915_vma_is_closed(vma));
+ vma->flags |= I915_VMA_CLOSED;
list_del_init(&vma->obj_link);
if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
vma->vm = vm;
vma->obj = obj;
vma->size = obj->base.size;
- vma->is_ggtt = i915_is_ggtt(vm);
if (i915_is_ggtt(vm)) {
+ vma->flags |= I915_VMA_GGTT;
vma->ggtt_view = *view;
if (view->type == I915_GGTT_VIEW_PARTIAL) {
vma->size = view->params.partial.size;
if (!vma)
vma = __i915_gem_vma_create(obj, &ggtt->base, view);
- GEM_BUG_ON(vma->closed);
+ GEM_BUG_ON(i915_vma_is_closed(vma));
return vma;
}
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags)
{
- int ret;
u32 bind_flags;
+ u32 vma_flags;
+ int ret;
if (WARN_ON(flags == 0))
return -EINVAL;
bind_flags = 0;
if (flags & PIN_GLOBAL)
- bind_flags |= GLOBAL_BIND;
+ bind_flags |= I915_VMA_GLOBAL_BIND;
if (flags & PIN_USER)
- bind_flags |= LOCAL_BIND;
+ bind_flags |= I915_VMA_LOCAL_BIND;
+ vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
if (flags & PIN_UPDATE)
- bind_flags |= vma->bound;
+ bind_flags |= vma_flags;
else
- bind_flags &= ~vma->bound;
-
+ bind_flags &= ~vma_flags;
if (bind_flags == 0)
return 0;
- if (vma->bound == 0 && vma->vm->allocate_va_range) {
+ if (vma_flags == 0 && vma->vm->allocate_va_range) {
trace_i915_va_alloc(vma);
ret = vma->vm->allocate_va_range(vma->vm,
vma->node.start,
if (ret)
return ret;
- vma->bound |= bind_flags;
-
+ vma->flags |= bind_flags;
return 0;
}
if (WARN_ON(!vma->obj->map_and_fenceable))
return IO_ERR_PTR(-ENODEV);
- GEM_BUG_ON(!vma->is_ggtt);
- GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
ptr = vma->iomap;
if (ptr == NULL) {
void __iomem *iomap;
u64 size;
- unsigned int active;
- struct i915_gem_active last_read[I915_NUM_ENGINES];
+ unsigned int flags;
+ /**
+ * How many users have pinned this object in GTT space. The following
+ * users can each hold at most one reference: pwrite/pread, execbuffer
+ * (objects are not allowed multiple times for the same batchbuffer),
+ * and the framebuffer code. When switching/pageflipping, the
+ * framebuffer code has at most two buffers pinned per crtc.
+ *
+ * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+ * bits with absolutely no headroom. So use 4 bits.
+ */
+#define I915_VMA_PIN_MASK 0xf
/** Flags and address space this VMA is bound to */
-#define GLOBAL_BIND (1<<0)
-#define LOCAL_BIND (1<<1)
- unsigned int bound : 4;
- bool is_ggtt : 1;
- bool closed : 1;
+#define I915_VMA_GLOBAL_BIND BIT(5)
+#define I915_VMA_LOCAL_BIND BIT(6)
+
+#define I915_VMA_GGTT BIT(7)
+#define I915_VMA_CLOSED BIT(8)
+
+ unsigned int active;
+ struct i915_gem_active last_read[I915_NUM_ENGINES];
/**
* Support different GGTT views into the same object.
struct hlist_node exec_node;
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
-
- /**
- * How many users have pinned this object in GTT space. The following
- * users can each hold at most one reference: pwrite/pread, execbuffer
- * (objects are not allowed multiple times for the same batchbuffer),
- * and the framebuffer code. When switching/pageflipping, the
- * framebuffer code has at most two buffers pinned per crtc.
- *
- * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
- * bits with absolutely no headroom. So use 4 bits. */
- unsigned int pin_count:4;
-#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
};
+static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_GGTT;
+}
+
+static inline bool i915_vma_is_closed(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_CLOSED;
+}
+
static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
{
return vma->active;
static inline int i915_vma_pin_count(const struct i915_vma *vma)
{
- return vma->pin_count;
+ return vma->flags & I915_VMA_PIN_MASK;
}
static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
static inline void __i915_vma_pin(struct i915_vma *vma)
{
- vma->pin_count++;
+ vma->flags++;
GEM_BUG_ON(!i915_vma_is_pinned(vma));
}
static inline void __i915_vma_unpin(struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_pinned(vma));
- vma->pin_count--;
+ vma->flags--;
}
static inline void i915_vma_unpin(struct i915_vma *vma)
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, obj_link)
- if (vma->pin_count)
+ if (i915_vma_is_pinned(vma))
return true;
return false;
goto err;
}
- vma->bound |= GLOBAL_BIND;
+ vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
obj->bind_count++;
if (i915_is_ggtt(vm))
vma = i915_gem_obj_to_ggtt(src);
use_ggtt = (src->cache_level == I915_CACHE_NONE &&
- vma && (vma->bound & GLOBAL_BIND) &&
+ vma && (vma->flags & I915_VMA_GLOBAL_BIND) &&
reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
/* Cannot access stolen address directly, try to use the aperture */
if (src->stolen) {
use_ggtt = true;
- if (!(vma && vma->bound & GLOBAL_BIND))
+ if (!(vma && vma->flags & I915_VMA_GLOBAL_BIND))
goto unwind;
reloc_offset = i915_gem_obj_ggtt_offset(src);