static char get_global_flag(struct drm_i915_gem_object *obj)
{
- return !list_empty(&obj->userfault_link) ? 'g' : ' ';
+ return obj->userfault_count ? 'g' : ' ';
}
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
if (ret)
goto err_unpin;
- /* Mark as being mmapped into userspace for later revocation */
- assert_rpm_wakelock_held(dev_priv);
- if (list_empty(&obj->userfault_link))
- list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
-
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
(ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->mappable);
+ if (ret)
+ goto err_fence;
+ /* Mark as being mmapped into userspace for later revocation */
+ assert_rpm_wakelock_held(dev_priv);
+ if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
+ list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
+ GEM_BUG_ON(!obj->userfault_count);
+
+err_fence:
i915_vma_unpin_fence(vma);
err_unpin:
__i915_vma_unpin(vma);
return ret;
}
+static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+
+ GEM_BUG_ON(!obj->userfault_count);
+
+ obj->userfault_count = 0;
+ list_del(&obj->userfault_link);
+ drm_vma_node_unmap(&obj->base.vma_node,
+ obj->base.dev->anon_inode->i_mapping);
+
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (!i915_vma_is_ggtt(vma))
+ break;
+
+ i915_vma_unset_userfault(vma);
+ }
+}
+
/**
* i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question
lockdep_assert_held(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (list_empty(&obj->userfault_link))
+ if (!obj->userfault_count)
goto out;
- list_del_init(&obj->userfault_link);
- drm_vma_node_unmap(&obj->base.vma_node,
- obj->base.dev->anon_inode->i_mapping);
+ __i915_gem_object_release_mmap(obj);
/* Ensure that the CPU's PTE are revoked and there are not outstanding
* memory transactions from userspace before we return. The TLB
*/
list_for_each_entry_safe(obj, on,
- &dev_priv->mm.userfault_list, userfault_link) {
- list_del_init(&obj->userfault_link);
- drm_vma_node_unmap(&obj->base.vma_node,
- obj->base.dev->anon_inode->i_mapping);
- }
+ &dev_priv->mm.userfault_list, userfault_link)
+ __i915_gem_object_release_mmap(obj);
/* The fence will be lost when the device powers down. If any were
* in use by hardware (i.e. they are pinned), we should not be powering
if (!reg->vma)
continue;
- GEM_BUG_ON(!list_empty(®->vma->obj->userfault_link));
+ GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
reg->dirty = true;
}
}
mutex_init(&obj->mm.lock);
INIT_LIST_HEAD(&obj->global_link);
- INIT_LIST_HEAD(&obj->userfault_link);
INIT_LIST_HEAD(&obj->vma_list);
INIT_LIST_HEAD(&obj->lut_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
llist_for_each_entry_safe(obj, on, freed, freed) {
GEM_BUG_ON(obj->bind_count);
+ GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
GEM_BUG_ON(!list_empty(&obj->lut_list));
if (i915_vma_is_pinned(vma))
return false;
- if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
+ if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma))
return false;
list_add(&vma->evict_link, unwind);
/* Ensure that all userspace CPU access is completed before
* stealing the fence.
*/
- i915_gem_release_mmap(fence->vma->obj);
+ GEM_BUG_ON(fence->vma->fence != fence);
+ i915_vma_revoke_mmap(fence->vma);
fence->vma->fence = NULL;
fence->vma = NULL;
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
if (fence->vma)
- i915_gem_release_mmap(fence->vma->obj);
+ i915_vma_revoke_mmap(fence->vma);
}
}
*/
if (vma && !i915_gem_object_is_tiled(vma->obj)) {
GEM_BUG_ON(!reg->dirty);
- GEM_BUG_ON(!list_empty(&vma->obj->userfault_link));
+ GEM_BUG_ON(i915_vma_has_userfault(vma));
list_move(®->link, &dev_priv->mm.fence_list);
vma->fence = NULL;
/**
* Whether the object is currently in the GGTT mmap.
*/
+ unsigned int userfault_count;
struct list_head userfault_link;
struct list_head batch_pool_link;
vma->iomap = NULL;
}
+void i915_vma_revoke_mmap(struct i915_vma *vma)
+{
+ struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
+ u64 vma_offset;
+
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+
+ if (!i915_vma_has_userfault(vma))
+ return;
+
+ GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+ GEM_BUG_ON(!vma->obj->userfault_count);
+
+ vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+ unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
+ drm_vma_node_offset_addr(node) + vma_offset,
+ vma->size,
+ 1);
+
+ i915_vma_unset_userfault(vma);
+ if (!--vma->obj->userfault_count)
+ list_del(&vma->obj->userfault_link);
+}
+
int i915_vma_unbind(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
return ret;
/* Force a pagefault for domain tracking on next user access */
- i915_gem_release_mmap(obj);
+ i915_vma_revoke_mmap(vma);
__i915_vma_iounmap(vma);
vma->flags &= ~I915_VMA_CAN_FENCE;
}
+ GEM_BUG_ON(vma->fence);
+ GEM_BUG_ON(i915_vma_has_userfault(vma));
if (likely(!vma->vm->closed)) {
trace_i915_vma_unbind(vma);
* that exist in the ctx->handle_vmas LUT for this vma.
*/
unsigned int open_count;
- unsigned int flags;
+ unsigned long flags;
/**
* How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, execbuffer
#define I915_VMA_GGTT BIT(8)
#define I915_VMA_CAN_FENCE BIT(9)
#define I915_VMA_CLOSED BIT(10)
+#define I915_VMA_USERFAULT_BIT 11
+#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES];
return vma->flags & I915_VMA_CLOSED;
}
+static inline bool i915_vma_set_userfault(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+ return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+}
+
+static inline void i915_vma_unset_userfault(struct i915_vma *vma)
+{
+ return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+}
+
+static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
+{
+ return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+}
+
static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
{
return vma->active;
bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
+void i915_vma_revoke_mmap(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);