#include "i915_drv.h"
#include "i915_active.h"
+#include "i915_globals.h"
#define BKL(ref) (&(ref)->i915->drm.struct_mutex)
* nodes from a local slab cache to hopefully reduce the fragmentation.
*/
static struct i915_global_active {
+ struct i915_global base;
struct kmem_cache *slab_cache;
} global;
#include "selftests/i915_active.c"
#endif
-int __init i915_global_active_init(void)
+static void i915_global_active_shrink(void)
{
- global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
- if (!global.slab_cache)
- return -ENOMEM;
-
- return 0;
+ kmem_cache_shrink(global.slab_cache);
}
-void i915_global_active_shrink(void)
+static void i915_global_active_exit(void)
{
- kmem_cache_shrink(global.slab_cache);
+ kmem_cache_destroy(global.slab_cache);
}
-void i915_global_active_exit(void)
+static struct i915_global_active global = { {
+ .shrink = i915_global_active_shrink,
+ .exit = i915_global_active_exit,
+} };
+
+int __init i915_global_active_init(void)
{
- kmem_cache_destroy(global.slab_cache);
+ global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_cache)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
}
static inline void i915_active_fini(struct i915_active *ref) { }
#endif
-int i915_global_active_init(void);
-void i915_global_active_shrink(void);
-void i915_global_active_exit(void);
-
#endif /* _I915_ACTIVE_H_ */
#include <linux/log2.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_globals.h"
#include "i915_trace.h"
#include "intel_lrc_reg.h"
#include "intel_workarounds.h"
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
static struct i915_global_context {
+ struct i915_global base;
struct kmem_cache *slab_luts;
} global;
#include "selftests/i915_gem_context.c"
#endif
-int __init i915_global_context_init(void)
+static void i915_global_context_shrink(void)
{
- global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
- if (!global.slab_luts)
- return -ENOMEM;
-
- return 0;
+ kmem_cache_shrink(global.slab_luts);
}
-void i915_global_context_shrink(void)
+static void i915_global_context_exit(void)
{
- kmem_cache_shrink(global.slab_luts);
+ kmem_cache_destroy(global.slab_luts);
}
-void i915_global_context_exit(void)
+static struct i915_global_context global = { {
+ .shrink = i915_global_context_shrink,
+ .exit = i915_global_context_exit,
+} };
+
+int __init i915_global_context_init(void)
{
- kmem_cache_destroy(global.slab_luts);
+ global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
+ if (!global.slab_luts)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
}
struct i915_lut_handle *i915_lut_handle_alloc(void);
void i915_lut_handle_free(struct i915_lut_handle *lut);
-int i915_global_context_init(void);
-void i915_global_context_shrink(void);
-void i915_global_context_exit(void);
-
#endif /* !__I915_GEM_CONTEXT_H__ */
#include "i915_drv.h"
#include "i915_gem_object.h"
+#include "i915_globals.h"
static struct i915_global_object {
+ struct i915_global base;
struct kmem_cache *slab_objects;
} global;
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
+static void i915_global_objects_shrink(void)
+{
+ kmem_cache_shrink(global.slab_objects);
+}
+
+static void i915_global_objects_exit(void)
+{
+ kmem_cache_destroy(global.slab_objects);
+}
+
+static struct i915_global_object global = { {
+ .shrink = i915_global_objects_shrink,
+ .exit = i915_global_objects_exit,
+} };
+
int __init i915_global_objects_init(void)
{
global.slab_objects =
if (!global.slab_objects)
return -ENOMEM;
+ i915_global_register(&global.base);
return 0;
}
-
-void i915_global_objects_shrink(void)
-{
- kmem_cache_shrink(global.slab_objects);
-}
-
-void i915_global_objects_exit(void)
-{
- kmem_cache_destroy(global.slab_objects);
-}
unsigned int cache_level);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
-int i915_global_objects_init(void);
-void i915_global_objects_shrink(void);
-void i915_global_objects_exit(void);
-
#endif
#include "i915_scheduler.h"
#include "i915_vma.h"
-int __init i915_globals_init(void)
+static LIST_HEAD(globals);
+
+void __init i915_global_register(struct i915_global *global)
{
- int err;
+ GEM_BUG_ON(!global->shrink);
+ GEM_BUG_ON(!global->exit);
- err = i915_global_active_init();
- if (err)
- return err;
+ list_add_tail(&global->link, &globals);
+}
- err = i915_global_context_init();
- if (err)
- goto err_active;
+static void __i915_globals_cleanup(void)
+{
+ struct i915_global *global, *next;
- err = i915_global_objects_init();
- if (err)
- goto err_context;
+ list_for_each_entry_safe_reverse(global, next, &globals, link)
+ global->exit();
+}
- err = i915_global_request_init();
- if (err)
- goto err_objects;
+static __initconst int (* const initfn[])(void) = {
+ i915_global_active_init,
+ i915_global_context_init,
+ i915_global_objects_init,
+ i915_global_request_init,
+ i915_global_scheduler_init,
+ i915_global_vma_init,
+};
- err = i915_global_scheduler_init();
- if (err)
- goto err_request;
+int __init i915_globals_init(void)
+{
+ int i;
- err = i915_global_vma_init();
- if (err)
- goto err_scheduler;
+ for (i = 0; i < ARRAY_SIZE(initfn); i++) {
+ int err;
- return 0;
+ err = initfn[i]();
+ if (err) {
+ __i915_globals_cleanup();
+ return err;
+ }
+ }
-err_scheduler:
- i915_global_scheduler_exit();
-err_request:
- i915_global_request_exit();
-err_objects:
- i915_global_objects_exit();
-err_context:
- i915_global_context_exit();
-err_active:
- i915_global_active_exit();
- return err;
+ return 0;
}
static void i915_globals_shrink(void)
{
+ struct i915_global *global;
+
/*
* kmem_cache_shrink() discards empty slabs and reorders partially
* filled slabs to prioritise allocating from the mostly full slabs,
* with the aim of reducing fragmentation.
*/
- i915_global_active_shrink();
- i915_global_context_shrink();
- i915_global_objects_shrink();
- i915_global_request_shrink();
- i915_global_scheduler_shrink();
- i915_global_vma_shrink();
+ list_for_each_entry(global, &globals, link)
+ global->shrink();
}
static atomic_t active;
rcu_barrier();
flush_scheduled_work();
- i915_global_vma_exit();
- i915_global_scheduler_exit();
- i915_global_request_exit();
- i915_global_objects_exit();
- i915_global_context_exit();
- i915_global_active_exit();
+ __i915_globals_cleanup();
/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
rcu_barrier();
#ifndef _I915_GLOBALS_H_
#define _I915_GLOBALS_H_
+typedef void (*i915_global_func_t)(void);
+
+struct i915_global {
+ struct list_head link;
+
+ i915_global_func_t shrink;
+ i915_global_func_t exit;
+};
+
+void i915_global_register(struct i915_global *global);
+
int i915_globals_init(void);
void i915_globals_park(void);
void i915_globals_unpark(void);
void i915_globals_exit(void);
+/* constructors */
+int i915_global_active_init(void);
+int i915_global_context_init(void);
+int i915_global_objects_init(void);
+int i915_global_request_init(void);
+int i915_global_scheduler_init(void);
+int i915_global_vma_init(void);
+
#endif /* _I915_GLOBALS_H_ */
#include "i915_drv.h"
#include "i915_active.h"
+#include "i915_globals.h"
#include "i915_reset.h"
struct execute_cb {
};
static struct i915_global_request {
+ struct i915_global base;
struct kmem_cache *slab_requests;
struct kmem_cache *slab_dependencies;
struct kmem_cache *slab_execute_cbs;
#include "selftests/i915_request.c"
#endif
+static void i915_global_request_shrink(void)
+{
+ kmem_cache_shrink(global.slab_dependencies);
+ kmem_cache_shrink(global.slab_execute_cbs);
+ kmem_cache_shrink(global.slab_requests);
+}
+
+static void i915_global_request_exit(void)
+{
+ kmem_cache_destroy(global.slab_dependencies);
+ kmem_cache_destroy(global.slab_execute_cbs);
+ kmem_cache_destroy(global.slab_requests);
+}
+
+static struct i915_global_request global = { {
+ .shrink = i915_global_request_shrink,
+ .exit = i915_global_request_exit,
+} };
+
int __init i915_global_request_init(void)
{
global.slab_requests = KMEM_CACHE(i915_request,
if (!global.slab_dependencies)
goto err_execute_cbs;
+ i915_global_register(&global.base);
return 0;
err_execute_cbs:
kmem_cache_destroy(global.slab_requests);
return -ENOMEM;
}
-
-void i915_global_request_shrink(void)
-{
- kmem_cache_shrink(global.slab_dependencies);
- kmem_cache_shrink(global.slab_execute_cbs);
- kmem_cache_shrink(global.slab_requests);
-}
-
-void i915_global_request_exit(void)
-{
- kmem_cache_destroy(global.slab_dependencies);
- kmem_cache_destroy(global.slab_execute_cbs);
- kmem_cache_destroy(global.slab_requests);
-}
void i915_retire_requests(struct drm_i915_private *i915);
-int i915_global_request_init(void);
-void i915_global_request_shrink(void);
-void i915_global_request_exit(void);
-
#endif /* I915_REQUEST_H */
#include <linux/mutex.h>
#include "i915_drv.h"
+#include "i915_globals.h"
#include "i915_request.h"
#include "i915_scheduler.h"
static struct i915_global_scheduler {
+ struct i915_global base;
struct kmem_cache *slab_dependencies;
struct kmem_cache *slab_priorities;
} global;
kmem_cache_free(global.slab_priorities, p);
}
+static void i915_global_scheduler_shrink(void)
+{
+ kmem_cache_shrink(global.slab_dependencies);
+ kmem_cache_shrink(global.slab_priorities);
+}
+
+static void i915_global_scheduler_exit(void)
+{
+ kmem_cache_destroy(global.slab_dependencies);
+ kmem_cache_destroy(global.slab_priorities);
+}
+
+static struct i915_global_scheduler global = { {
+ .shrink = i915_global_scheduler_shrink,
+ .exit = i915_global_scheduler_exit,
+} };
+
int __init i915_global_scheduler_init(void)
{
global.slab_dependencies = KMEM_CACHE(i915_dependency,
if (!global.slab_priorities)
goto err_priorities;
+ i915_global_register(&global.base);
return 0;
err_priorities:
kmem_cache_destroy(global.slab_priorities);
return -ENOMEM;
}
-
-void i915_global_scheduler_shrink(void)
-{
- kmem_cache_shrink(global.slab_dependencies);
- kmem_cache_shrink(global.slab_priorities);
-}
-
-void i915_global_scheduler_exit(void)
-{
- kmem_cache_destroy(global.slab_dependencies);
- kmem_cache_destroy(global.slab_priorities);
-}
__i915_priolist_free(p);
}
-int i915_global_scheduler_init(void);
-void i915_global_scheduler_shrink(void);
-void i915_global_scheduler_exit(void);
-
#endif /* _I915_SCHEDULER_H_ */
#include "i915_vma.h"
#include "i915_drv.h"
+#include "i915_globals.h"
#include "intel_ringbuffer.h"
#include "intel_frontbuffer.h"
#include <drm/drm_gem.h>
static struct i915_global_vma {
+ struct i915_global base;
struct kmem_cache *slab_vmas;
} global;
#include "selftests/i915_vma.c"
#endif
-int __init i915_global_vma_init(void)
+static void i915_global_vma_shrink(void)
{
- global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
- if (!global.slab_vmas)
- return -ENOMEM;
-
- return 0;
+ kmem_cache_shrink(global.slab_vmas);
}
-void i915_global_vma_shrink(void)
+static void i915_global_vma_exit(void)
{
- kmem_cache_shrink(global.slab_vmas);
+ kmem_cache_destroy(global.slab_vmas);
}
-void i915_global_vma_exit(void)
+static struct i915_global_vma global = { {
+ .shrink = i915_global_vma_shrink,
+ .exit = i915_global_vma_exit,
+} };
+
+int __init i915_global_vma_init(void)
{
- kmem_cache_destroy(global.slab_vmas);
+ global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_vmas)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
}
struct i915_vma *i915_vma_alloc(void);
void i915_vma_free(struct i915_vma *vma);
-int i915_global_vma_init(void);
-void i915_global_vma_shrink(void);
-void i915_global_vma_exit(void);
-
#endif