*/
struct list_head unbound_list;
+ /** Protects access to the userfault_list */
+ spinlock_t userfault_lock;
+
+ /** List of all objects in gtt_space, currently mmaped by userspace.
+ * All objects within this list must also be on bound_list.
+ */
+ struct list_head userfault_list;
+
/** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */
struct drm_mm_node *stolen;
struct list_head global_list;
+ /**
+ * Whether the object is currently in the GGTT mmap.
+ */
+ struct list_head userfault_link;
+
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
*/
unsigned int madv:2;
- /**
- * Whether the current gtt mapping needs to be mappable (and isn't just
- * mappable by accident). Track pin and fault separate for a more
- * accurate mappable working set.
- */
- unsigned int fault_mappable:1;
-
/*
* Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit
if (ret)
goto err_unpin;
+ /* Mark as being mmapped into userspace for later revocation */
+ spin_lock(&dev_priv->mm.userfault_lock);
+ if (list_empty(&obj->userfault_link))
+ list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
+ spin_unlock(&dev_priv->mm.userfault_lock);
+
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
(ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->mappable);
- if (ret)
- goto err_unpin;
- obj->fault_mappable = true;
err_unpin:
__i915_vma_unpin(vma);
err_unlock:
void
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ bool zap = false;
+
/* Serialisation between user GTT access and our code depends upon
* revoking the CPU's PTE whilst the mutex is held. The next user
* pagefault then has to wait until we release the mutex.
*/
- lockdep_assert_held(&obj->base.dev->struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
- if (!obj->fault_mappable)
+ spin_lock(&i915->mm.userfault_lock);
+ if (!list_empty(&obj->userfault_link)) {
+ list_del_init(&obj->userfault_link);
+ zap = true;
+ }
+ spin_unlock(&i915->mm.userfault_lock);
+ if (!zap)
return;
drm_vma_node_unmap(&obj->base.vma_node,
* memory writes before touching registers / GSM.
*/
wmb();
-
- obj->fault_mappable = false;
}
void
{
struct drm_i915_gem_object *obj;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
- i915_gem_release_mmap(obj);
+ spin_lock(&dev_priv->mm.userfault_lock);
+ while ((obj = list_first_entry_or_null(&dev_priv->mm.userfault_list,
+ struct drm_i915_gem_object,
+ userfault_link))) {
+ list_del_init(&obj->userfault_link);
+ spin_unlock(&dev_priv->mm.userfault_lock);
+
+ drm_vma_node_unmap(&obj->base.vma_node,
+ obj->base.dev->anon_inode->i_mapping);
+
+ spin_lock(&dev_priv->mm.userfault_lock);
+ }
+ spin_unlock(&dev_priv->mm.userfault_lock);
}
/**
int i;
INIT_LIST_HEAD(&obj->global_list);
+ INIT_LIST_HEAD(&obj->userfault_link);
for (i = 0; i < I915_NUM_ENGINES; i++)
init_request_active(&obj->last_read[i],
i915_gem_object_retire__read);
int ret;
mutex_lock(&dev->struct_mutex);
+ spin_lock_init(&dev_priv->mm.userfault_lock);
if (!i915.enable_execlists) {
dev_priv->gt.resume = intel_legacy_submission_resume;
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+ INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
i915_gem_retire_work_handler);
INIT_DELAYED_WORK(&dev_priv->gt.idle_work,