struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
- obj = i915_gem_object_alloc(dev_priv);
+ obj = i915_gem_object_alloc();
if (obj == NULL)
return NULL;
struct drm_i915_private {
struct drm_device drm;
- struct kmem_cache *objects;
- struct kmem_cache *vmas;
- struct kmem_cache *luts;
-
const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
struct intel_driver_caps caps;
int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
-void *i915_gem_object_alloc(struct drm_i915_private *dev_priv);
-void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *
return 0;
}
-void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
-{
- return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
-}
-
-void i915_gem_object_free(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- kmem_cache_free(dev_priv->objects, obj);
-}
-
static int
i915_gem_create(struct drm_file *file,
struct drm_i915_private *dev_priv,
* filled slabs to prioritise allocating from the mostly full slabs,
* with the aim of reducing fragmentation.
*/
- kmem_cache_shrink(i915->luts);
- kmem_cache_shrink(i915->vmas);
- kmem_cache_shrink(i915->objects);
-
i915_globals_park();
}
list_del(&lut->obj_link);
list_del(&lut->ctx_link);
- kmem_cache_free(i915->luts, lut);
+ i915_lut_handle_free(lut);
__i915_gem_object_release_unless_active(obj);
}
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(dev_priv);
+ obj = i915_gem_object_alloc();
if (obj == NULL)
return ERR_PTR(-ENOMEM);
int i915_gem_init_early(struct drm_i915_private *dev_priv)
{
- int err = -ENOMEM;
-
- dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
- if (!dev_priv->objects)
- goto err_out;
-
- dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
- if (!dev_priv->vmas)
- goto err_objects;
-
- dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
- if (!dev_priv->luts)
- goto err_vmas;
+ int err;
INIT_LIST_HEAD(&dev_priv->gt.active_rings);
INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
return 0;
-
-err_vmas:
- kmem_cache_destroy(dev_priv->vmas);
-err_objects:
- kmem_cache_destroy(dev_priv->objects);
-err_out:
- return err;
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
- kmem_cache_destroy(dev_priv->luts);
- kmem_cache_destroy(dev_priv->vmas);
- kmem_cache_destroy(dev_priv->objects);
-
- /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
- rcu_barrier();
-
i915_gemfs_fini(dev_priv);
}
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
+static struct i915_global_context {
+ struct kmem_cache *slab_luts;
+} global;
+
+struct i915_lut_handle *i915_lut_handle_alloc(void)
+{
+ return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
+}
+
+void i915_lut_handle_free(struct i915_lut_handle *lut)
+{
+ return kmem_cache_free(global.slab_luts, lut);
+}
+
static void lut_close(struct i915_gem_context *ctx)
{
struct i915_lut_handle *lut, *ln;
list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
list_del(&lut->obj_link);
- kmem_cache_free(ctx->i915->luts, lut);
+ i915_lut_handle_free(lut);
}
rcu_read_lock();
#include "selftests/mock_context.c"
#include "selftests/i915_gem_context.c"
#endif
+
+int __init i915_global_context_init(void)
+{
+ global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
+ if (!global.slab_luts)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void i915_global_context_shrink(void)
+{
+ kmem_cache_shrink(global.slab_luts);
+}
+
+void i915_global_context_exit(void)
+{
+ kmem_cache_destroy(global.slab_luts);
+}
#include "i915_gem.h"
#include "i915_scheduler.h"
#include "intel_device_info.h"
+#include "intel_ringbuffer.h"
struct pid;
struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
+struct i915_lut_handle *i915_lut_handle_alloc(void);
+void i915_lut_handle_free(struct i915_lut_handle *lut);
+
+int i915_global_context_init(void);
+void i915_global_context_shrink(void);
+void i915_global_context_exit(void);
+
#endif /* !__I915_GEM_CONTEXT_H__ */
get_dma_buf(dma_buf);
- obj = i915_gem_object_alloc(to_i915(dev));
+ obj = i915_gem_object_alloc();
if (obj == NULL) {
ret = -ENOMEM;
goto fail_detach;
goto err_obj;
}
- lut = kmem_cache_alloc(eb->i915->luts, GFP_KERNEL);
+ lut = i915_lut_handle_alloc();
if (unlikely(!lut)) {
err = -ENOMEM;
goto err_obj;
err = radix_tree_insert(handles_vma, handle, vma);
if (unlikely(err)) {
- kmem_cache_free(eb->i915->luts, lut);
+ i915_lut_handle_free(lut);
goto err_obj;
}
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(size > ggtt->vm.total);
- vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL);
+ vma = i915_vma_alloc();
if (!vma)
return ERR_PTR(-ENOMEM);
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
#include "i915_drv.h"
#include "i915_gem_object.h"
+static struct i915_global_object {
+ struct kmem_cache *slab_objects;
+} global;
+
+struct drm_i915_gem_object *i915_gem_object_alloc(void)
+{
+ return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
+}
+
+void i915_gem_object_free(struct drm_i915_gem_object *obj)
+{
+ return kmem_cache_free(global.slab_objects, obj);
+}
+
/**
* Mark up the object's coherency levels for a given cache_level
* @obj: #drm_i915_gem_object
obj->cache_dirty =
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
+
+int __init i915_global_objects_init(void)
+{
+ global.slab_objects =
+ KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_objects)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void i915_global_objects_shrink(void)
+{
+ kmem_cache_shrink(global.slab_objects);
+}
+
+void i915_global_objects_exit(void)
+{
+ kmem_cache_destroy(global.slab_objects);
+}
return container_of(gem, struct drm_i915_gem_object, base);
}
+struct drm_i915_gem_object *i915_gem_object_alloc(void);
+void i915_gem_object_free(struct drm_i915_gem_object *obj);
+
/**
* i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
* @filp: DRM file private date
unsigned int cache_level);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
-#endif
+int i915_global_objects_init(void);
+void i915_global_objects_shrink(void);
+void i915_global_objects_exit(void);
+#endif
struct drm_i915_gem_object *obj;
unsigned int cache_level;
- obj = i915_gem_object_alloc(dev_priv);
+ obj = i915_gem_object_alloc();
if (obj == NULL)
return NULL;
return -ENODEV;
}
- obj = i915_gem_object_alloc(dev_priv);
+ obj = i915_gem_object_alloc();
if (obj == NULL)
return -ENOMEM;
#include <linux/workqueue.h>
#include "i915_active.h"
+#include "i915_gem_context.h"
+#include "i915_gem_object.h"
#include "i915_globals.h"
#include "i915_request.h"
#include "i915_scheduler.h"
+#include "i915_vma.h"
int __init i915_globals_init(void)
{
if (err)
return err;
- err = i915_global_request_init();
+ err = i915_global_context_init();
if (err)
goto err_active;
+ err = i915_global_objects_init();
+ if (err)
+ goto err_context;
+
+ err = i915_global_request_init();
+ if (err)
+ goto err_objects;
+
err = i915_global_scheduler_init();
if (err)
goto err_request;
+ err = i915_global_vma_init();
+ if (err)
+ goto err_scheduler;
+
return 0;
+err_scheduler:
+ i915_global_scheduler_exit();
err_request:
i915_global_request_exit();
+err_objects:
+ i915_global_objects_exit();
+err_context:
+ i915_global_context_exit();
err_active:
i915_global_active_exit();
return err;
* with the aim of reducing fragmentation.
*/
i915_global_active_shrink();
+ i915_global_context_shrink();
+ i915_global_objects_shrink();
i915_global_request_shrink();
i915_global_scheduler_shrink();
+ i915_global_vma_shrink();
}
static atomic_t active;
rcu_barrier();
flush_scheduled_work();
+ i915_global_vma_exit();
i915_global_scheduler_exit();
i915_global_request_exit();
+ i915_global_objects_exit();
+ i915_global_context_exit();
i915_global_active_exit();
/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
#include <drm/drm_gem.h>
+static struct i915_global_vma {
+ struct kmem_cache *slab_vmas;
+} global;
+
+struct i915_vma *i915_vma_alloc(void)
+{
+ return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
+}
+
+void i915_vma_free(struct i915_vma *vma)
+{
+ return kmem_cache_free(global.slab_vmas, vma);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
#include <linux/stackdepot.h>
/* The aliasing_ppgtt should never be used directly! */
GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
- vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
+ vma = i915_vma_alloc();
if (vma == NULL)
return ERR_PTR(-ENOMEM);
cmp = i915_vma_compare(pos, vm, view);
if (cmp == 0) {
spin_unlock(&obj->vma.lock);
- kmem_cache_free(vm->i915->vmas, vma);
+ i915_vma_free(vma);
return pos;
}
return vma;
err_vma:
- kmem_cache_free(vm->i915->vmas, vma);
+ i915_vma_free(vma);
return ERR_PTR(-E2BIG);
}
static void __i915_vma_destroy(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = vma->vm->i915;
-
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(vma->fence);
i915_active_fini(&vma->active);
- kmem_cache_free(i915->vmas, vma);
+ i915_vma_free(vma);
}
void i915_vma_destroy(struct i915_vma *vma)
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_vma.c"
#endif
+
+int __init i915_global_vma_init(void)
+{
+ global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_vmas)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void i915_global_vma_shrink(void)
+{
+ kmem_cache_shrink(global.slab_vmas);
+}
+
+void i915_global_vma_exit(void)
+{
+ kmem_cache_destroy(global.slab_vmas);
+}
list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \
for_each_until(!i915_vma_is_ggtt(V))
+struct i915_vma *i915_vma_alloc(void);
+void i915_vma_free(struct i915_vma *vma);
+
+int i915_global_vma_init(void);
+void i915_global_vma_shrink(void);
+void i915_global_vma_exit(void);
+
#endif
if (overflows_type(dma_size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
goto err;
destroy_workqueue(i915->wq);
- kmem_cache_destroy(i915->vmas);
- kmem_cache_destroy(i915->objects);
-
i915_gemfs_fini(i915);
drm_mode_config_cleanup(&i915->drm);
i915->gt.awake = true;
- i915->objects = KMEM_CACHE(mock_object, SLAB_HWCACHE_ALIGN);
- if (!i915->objects)
- goto err_wq;
-
- i915->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
- if (!i915->vmas)
- goto err_objects;
-
i915_timelines_init(i915);
INIT_LIST_HEAD(&i915->gt.active_rings);
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
i915_timelines_fini(i915);
- kmem_cache_destroy(i915->vmas);
-err_objects:
- kmem_cache_destroy(i915->objects);
-err_wq:
destroy_workqueue(i915->wq);
err_drv:
drm_mode_config_cleanup(&i915->drm);