#include <linux/device.h>
#include <linux/file.h>
+#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/anon_inodes.h>
+#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/memblock.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/rbtree.h>
+#include <linux/rtmutex.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
+static bool ion_heap_drain_freelist(struct ion_heap *heap);
/* this function should only be called while dev->lock is held */
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct ion_device *dev,
kref_init(&buffer->ref);
ret = heap->ops->allocate(heap, buffer, len, align, flags);
+
if (ret) {
- kfree(buffer);
- return ERR_PTR(ret);
+ if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
+ goto err2;
+
+ ion_heap_drain_freelist(heap);
+ ret = heap->ops->allocate(heap, buffer, len, align,
+ flags);
+ if (ret)
+ goto err2;
}
buffer->dev = dev;
err:
heap->ops->unmap_dma(heap, buffer);
heap->ops->free(buffer);
+err2:
kfree(buffer);
return ERR_PTR(ret);
}
-static void ion_buffer_destroy(struct kref *kref)
+static void _ion_buffer_destroy(struct ion_buffer *buffer)
{
- struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
- struct ion_device *dev = buffer->dev;
-
if (WARN_ON(buffer->kmap_cnt > 0))
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
buffer->heap->ops->free(buffer);
- mutex_lock(&dev->buffer_lock);
- rb_erase(&buffer->node, &dev->buffers);
- mutex_unlock(&dev->buffer_lock);
if (buffer->flags & ION_FLAG_CACHED)
kfree(buffer->dirty);
kfree(buffer);
}
+static void ion_buffer_destroy(struct kref *kref)
+{
+ struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
+ struct ion_heap *heap = buffer->heap;
+ struct ion_device *dev = buffer->dev;
+
+ mutex_lock(&dev->buffer_lock);
+ rb_erase(&buffer->node, &dev->buffers);
+ mutex_unlock(&dev->buffer_lock);
+
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
+ rt_mutex_lock(&heap->lock);
+ list_add(&buffer->list, &heap->free_list);
+ rt_mutex_unlock(&heap->lock);
+ wake_up(&heap->waitqueue);
+ return;
+ }
+ _ion_buffer_destroy(buffer);
+}
+
static void ion_buffer_get(struct ion_buffer *buffer)
{
kref_get(&buffer->ref);
.release = single_release,
};
+static size_t ion_heap_free_list_is_empty(struct ion_heap *heap)
+{
+ bool is_empty;
+
+ rt_mutex_lock(&heap->lock);
+ is_empty = list_empty(&heap->free_list);
+ rt_mutex_unlock(&heap->lock);
+
+ return is_empty;
+}
+
+static int ion_heap_deferred_free(void *data)
+{
+ struct ion_heap *heap = data;
+
+ while (true) {
+ struct ion_buffer *buffer;
+
+ wait_event_freezable(heap->waitqueue,
+ !ion_heap_free_list_is_empty(heap));
+
+ rt_mutex_lock(&heap->lock);
+ if (list_empty(&heap->free_list)) {
+ rt_mutex_unlock(&heap->lock);
+ continue;
+ }
+ buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+ list);
+ list_del(&buffer->list);
+ rt_mutex_unlock(&heap->lock);
+ _ion_buffer_destroy(buffer);
+ }
+
+ return 0;
+}
+
+static bool ion_heap_drain_freelist(struct ion_heap *heap)
+{
+ struct ion_buffer *buffer, *tmp;
+
+ if (ion_heap_free_list_is_empty(heap))
+ return false;
+ rt_mutex_lock(&heap->lock);
+ list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
+ _ion_buffer_destroy(buffer);
+ list_del(&buffer->list);
+ }
+ BUG_ON(!list_empty(&heap->free_list));
+ rt_mutex_unlock(&heap->lock);
+
+
+ return true;
+}
+
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
{
+ struct sched_param param = { .sched_priority = 0 };
+
if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
!heap->ops->unmap_dma)
pr_err("%s: can not add heap with invalid ops struct.\n",
__func__);
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
+ INIT_LIST_HEAD(&heap->free_list);
+ rt_mutex_init(&heap->lock);
+ init_waitqueue_head(&heap->waitqueue);
+ heap->task = kthread_run(ion_heap_deferred_free, heap,
+ "%s", heap->name);
+ sched_setscheduler(heap->task, SCHED_IDLE, ¶m);
+ if (IS_ERR(heap->task))
+ pr_err("%s: creating thread for deferred free failed\n",
+ __func__);
+ }
+
heap->dev = dev;
down_write(&dev->lock);
/* use negative heap->id to reverse the priority -- when traversing
*/
struct ion_buffer {
struct kref ref;
- struct rb_node node;
+ union {
+ struct rb_node node;
+ struct list_head list;
+ };
struct ion_device *dev;
struct ion_heap *heap;
unsigned long flags;
struct vm_area_struct *vma);
};
+/**
+ * heap flags - flags between the heaps and core ion code
+ */
+#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
+
/**
* struct ion_heap - represents a heap in the system
* @node: rb node to put the heap on the device's tree of heaps
* @dev: back pointer to the ion_device
* @type: type of heap
* @ops: ops struct as above
+ * @flags: flags
* @id: id of heap, also indicates priority of this heap when
* allocating. These are specified by platform data and
* MUST be unique
* @name: used for debugging
+ * @free_list: free list head if deferred free is used
+ * @lock: protects the free list
+ * @waitqueue: queue to wait on from deferred free thread
+ * @task: task struct of deferred free thread
* @debug_show: called when heap debug file is read to add any
* heap specific debug info to output
*
struct ion_device *dev;
enum ion_heap_type type;
struct ion_heap_ops *ops;
+ unsigned long flags;
unsigned int id;
const char *name;
+ struct list_head free_list;
+ struct rt_mutex lock;
+ wait_queue_head_t waitqueue;
+ struct task_struct *task;
int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
};