-diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 64cad3f..008009f 100644
-diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
-index 29eff1d..7fb1804 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
-@@ -1654,7 +1654,11 @@ int i915_driver_load(struct drm_device *
+@@ -1657,7 +1657,11 @@ int i915_driver_load(struct drm_device *
return 0;
out_gem_unload:
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
if (dev->pdev->msi_enabled)
-@@ -1685,7 +1689,11 @@ int i915_driver_unload(struct drm_device
+@@ -1691,7 +1695,11 @@ int i915_driver_unload(struct drm_device
i915_teardown_sysfs(dev);
bool enable);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
- static long i915_gem_inactive_count(struct shrinker *shrinker,
- struct shrink_control *sc);
- static long i915_gem_inactive_scan(struct shrinker *shrinker,
- struct shrink_control *sc);
+ static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
+ struct shrink_control *sc);
+ static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
+ struct shrink_control *sc);
+#else
+static int i915_gem_inactive_shrink(struct shrinker *shrinker,
+ struct shrink_control *sc);
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.inactive_shrinker);
}
-@@ -4501,8 +4510,13 @@ static bool mutex_is_locked_by(struct mu
+@@ -4501,8 +4510,14 @@ static bool mutex_is_locked_by(struct mu
#endif
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
- static long
+ static unsigned long
i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
+#else
++#define SHRINK_STOP -1
+static int
+i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+#endif
{
struct drm_i915_private *dev_priv =
container_of(shrinker,
-@@ -4511,7 +4525,12 @@ i915_gem_inactive_count(struct shrinker
+@@ -4511,7 +4526,12 @@ i915_gem_inactive_count(struct shrinker
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
bool unlock = true;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
- long cnt;
+ unsigned long count;
+#else
+ int nr_to_scan = sc->nr_to_scan;
-+ int cnt;
++ int count;
+#endif
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
-@@ -4523,6 +4542,17 @@ i915_gem_inactive_count(struct shrinker
+@@ -4523,6 +4543,17 @@ i915_gem_inactive_count(struct shrinker
unlock = false;
}
+ }
+#endif
+
- cnt = 0;
+ count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
-@@ -4535,6 +4565,8 @@ i915_gem_inactive_count(struct shrinker
- mutex_unlock(&dev->struct_mutex);
- return cnt;
+@@ -4536,6 +4567,7 @@ i915_gem_inactive_count(struct shrinker
+ return count;
}
-+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
- static long
+ static unsigned long
i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
-@@ -4568,3 +4600,4 @@ i915_gem_inactive_scan(struct shrinker *
+@@ -4569,3 +4601,4 @@ i915_gem_inactive_scan(struct shrinker *
mutex_unlock(&dev->struct_mutex);
return freed;
}
-diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
-index 83058a2..5f5bafe 100644
-diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
-index b3b4f99..96e1efb 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
-@@ -377,6 +377,11 @@ out:
+@@ -377,6 +377,9 @@ out:
return nr_free;
}
-+static long
-+ttm_pool_shrink_count(
-+ struct shrinker *shrink,
-+ struct shrink_control *sc);
++static unsigned long
++ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc);
+
/**
* Callback for mm to request pool to reduce number of page held.
*
-@@ -388,10 +393,15 @@ out:
+@@ -388,8 +391,13 @@ out:
*
* This code is crying out for a shrinker per pool....
*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
- static long
- ttm_pool_shrink_scan(
- struct shrinker *shrink,
- struct shrink_control *sc)
+ static unsigned long
+ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+#else
+static int ttm_pool_mm_shrink(struct shrinker *shrink,
+ struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned i;
-@@ -410,7 +420,12 @@ ttm_pool_shrink_scan(
+@@ -408,7 +416,12 @@ ttm_pool_shrink_scan(struct shrinker *sh
shrink_pages = ttm_page_pool_free(pool, nr_free);
freed += nr_free - shrink_pages;
}
}
-@@ -430,8 +445,12 @@ ttm_pool_shrink_count(
+@@ -426,8 +439,12 @@ ttm_pool_shrink_count(struct shrinker *s
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
- manager->mm_shrink.count_objects = &ttm_pool_shrink_count;
- manager->mm_shrink.scan_objects = &ttm_pool_shrink_scan;
+ manager->mm_shrink.count_objects = ttm_pool_shrink_count;
+ manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
+#else
-+ manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
++ manager->mm_shrink.shrink = ttm_pool_mm_shrink;
+#endif
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
-@@ -987,6 +987,11 @@ void ttm_dma_unpopulate(struct ttm_dma_t
+@@ -987,6 +987,9 @@ void ttm_dma_unpopulate(struct ttm_dma_t
}
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
-+static long
-+ttm_dma_pool_shrink_count(
-+ struct shrinker *shrink,
-+ struct shrink_control *sc);
++static unsigned long
++ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc);
+
/**
* Callback for mm to request pool to reduce number of page held.
*
-@@ -1000,10 +1005,15 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+@@ -1000,8 +1003,14 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
* I'm getting sadder as I hear more pathetical whimpers about needing per-pool
* shrinkers
*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
- static long
- ttm_dma_pool_shrink_scan(
- struct shrinker *shrink,
- struct shrink_control *sc)
+ static unsigned long
+ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+#else
++#define SHRINK_STOP 0
+static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
+ struct shrink_control *sc)
+#endif
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned idx = 0;
-@@ -1013,7 +1023,11 @@ ttm_dma_pool_shrink_scan(
- long freed = 0;
-
- if (list_empty(&_manager->pools))
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
- return -1;
-+#else
-+ return 0;
-+#endif
-
- mutex_lock(&_manager->lock);
- pool_offset = pool_offset % _manager->npools;
-@@ -1036,7 +1050,12 @@ ttm_dma_pool_shrink_scan(
+@@ -1034,7 +1043,12 @@ ttm_dma_pool_shrink_scan(struct shrinker
nr_free, shrink_pages);
}
mutex_unlock(&_manager->lock);
+#endif
}
- static long
-@@ -1056,8 +1075,12 @@ ttm_dma_pool_shrink_count(
+ static unsigned long
+@@ -1052,8 +1066,12 @@ ttm_dma_pool_shrink_count(struct shrinke
static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
- manager->mm_shrink.count_objects = &ttm_dma_pool_shrink_count;
+ manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
+#else
-+ manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
++ manager->mm_shrink.shrink = ttm_dma_pool_mm_shrink;
+#endif
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);