Since we're doing this outside of a spinlock to provide the necessary
barriers, add an explicit barrier.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/module.h>
+#include <asm/atomic.h>
#define TTM_ASSERT_LOCKED(param)
#define TTM_DEBUG(fmt, arg...)
ttm_bo_mem_put(bo, &bo->mem);
atomic_set(&bo->reserved, 0);
+
+ /*
+ * Make processes trying to reserve really pick it up.
+ */
+ smp_mb__after_atomic_dec();
wake_up_all(&bo->event_queue);
}