#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "ion.h"
#include "ion_priv.h"
+static const unsigned int orders[] = {8, 4, 0};
+static const int num_orders = ARRAY_SIZE(orders);
+static int order_to_index(unsigned int order)
+{
+ int i;
+ for (i = 0; i < num_orders; i++)
+ if (order == orders[i])
+ return i;
+ BUG();
+ return -1;
+}
+
+static unsigned int order_to_size(int order)
+{
+ return PAGE_SIZE << order;
+}
+
+struct ion_system_heap {
+ struct ion_heap heap;
+ struct ion_page_pool **pools;
+};
+
struct page_info {
struct page *page;
- unsigned long order;
+ unsigned int order;
struct list_head list;
};
-static unsigned int orders[] = {8, 4, 0};
+static struct page *alloc_buffer_page(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long order)
+{
+ bool cached = ion_buffer_cached(buffer);
+ bool split_pages = ion_buffer_fault_user_mappings(buffer);
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ struct page *page;
-static struct page_info *alloc_largest_available(unsigned long size,
- bool split_pages,
+ if (!cached)
+ page = ion_page_pool_alloc(pool);
+ else
+ page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO |
+ __GFP_NOWARN | __GFP_NORETRY, order);
+ if (!page)
+ return 0;
+ if (split_pages)
+ split_page(page, order);
+ return page;
+}
+
+static void free_buffer_page(struct ion_system_heap *heap,
+ struct ion_buffer *buffer, struct page *page,
+ unsigned int order)
+{
+ bool cached = ion_buffer_cached(buffer);
+ bool split_pages = ion_buffer_fault_user_mappings(buffer);
+ int i;
+
+ if (!cached) {
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ /* zero the pages before returning them to the pool for
+ security. This uses vmap as we want to set the pgprot so
+ the writes to occur to noncached mappings, as the pool's
+ purpose is to keep the pages out of the cache */
+ for (i = 0; i < order / PAGE_SIZE; i++) {
+ struct page *sub_page = page + i;
+ void *addr = vmap(&sub_page, 1, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ memset(addr, 0, PAGE_SIZE);
+ vunmap(addr);
+ }
+ ion_page_pool_free(pool, page);
+ } else if (split_pages) {
+ for (i = 0; i < (1 << order); i++)
+ __free_page(page + i);
+ } else {
+ __free_pages(page, order);
+ }
+}
+
+
+static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size,
unsigned int max_order)
{
struct page *page;
struct page_info *info;
int i;
- for (i = 0; i < ARRAY_SIZE(orders); i++) {
- if (size < (1 << orders[i]) * PAGE_SIZE)
+ for (i = 0; i < num_orders; i++) {
+ if (size < order_to_size(orders[i]))
continue;
if (max_order < orders[i])
continue;
- page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO |
- __GFP_NOWARN | __GFP_NORETRY, orders[i]);
+
+ page = alloc_buffer_page(heap, buffer, orders[i]);
if (!page)
continue;
- if (split_pages)
- split_page(page, orders[i]);
- info = kmalloc(sizeof(struct page_info *), GFP_KERNEL);
+
+ info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
info->page = page;
info->order = orders[i];
return info;
unsigned long size, unsigned long align,
unsigned long flags)
{
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
struct sg_table *table;
struct scatterlist *sg;
int ret;
struct page_info *info, *tmp_info;
int i = 0;
long size_remaining = PAGE_ALIGN(size);
- bool split_pages = ion_buffer_fault_user_mappings(buffer);
-
-
unsigned int max_order = orders[0];
+ bool split_pages = ion_buffer_fault_user_mappings(buffer);
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
- info = alloc_largest_available(size_remaining, split_pages,
- max_order);
+ info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
if (!info)
goto err;
list_add_tail(&info->list, &pages);
sg = table->sgl;
list_for_each_entry_safe(info, tmp_info, &pages, list) {
struct page *page = info->page;
-
if (split_pages) {
for (i = 0; i < (1 << info->order); i++) {
sg_set_page(sg, page + i, PAGE_SIZE, 0);
kfree(table);
err:
list_for_each_entry(info, &pages, list) {
- if (split_pages)
- for (i = 0; i < (1 << info->order); i++)
- __free_page(info->page + i);
- else
- __free_pages(info->page, info->order);
-
+ free_buffer_page(sys_heap, buffer, info->page, info->order);
kfree(info);
}
return -ENOMEM;
void ion_system_heap_free(struct ion_buffer *buffer)
{
- int i;
- struct scatterlist *sg;
+ struct ion_heap *heap = buffer->heap;
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
struct sg_table *table = buffer->priv_virt;
+ struct scatterlist *sg;
+ LIST_HEAD(pages);
+ int i;
for_each_sg(table->sgl, sg, table->nents, i)
- __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
- if (buffer->sg_table)
- sg_free_table(buffer->sg_table);
- kfree(buffer->sg_table);
+ free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg)));
+ sg_free_table(table);
+ kfree(table);
}
struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
return 0;
}
-static struct ion_heap_ops vmalloc_ops = {
+static struct ion_heap_ops system_heap_ops = {
.allocate = ion_system_heap_allocate,
.free = ion_system_heap_free,
.map_dma = ion_system_heap_map_dma,
.map_user = ion_system_heap_map_user,
};
+static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
+ void *unused)
+{
+
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int i;
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->pools[i];
+ seq_printf(s, "%d order %u pages in pool = %lu total\n",
+ pool->count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->count);
+ }
+ return 0;
+}
+
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
{
- struct ion_heap *heap;
+ struct ion_system_heap *heap;
+ int i;
- heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+ heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
if (!heap)
return ERR_PTR(-ENOMEM);
- heap->ops = &vmalloc_ops;
- heap->type = ION_HEAP_TYPE_SYSTEM;
- return heap;
+ heap->heap.ops = &system_heap_ops;
+ heap->heap.type = ION_HEAP_TYPE_SYSTEM;
+ heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
+ GFP_KERNEL);
+ if (!heap->pools)
+ goto err_alloc_pools;
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool;
+ pool = ion_page_pool_create(GFP_HIGHUSER | __GFP_ZERO |
+ __GFP_NOWARN | __GFP_NORETRY,
+ orders[i]);
+ if (!pool)
+ goto err_create_pool;
+ heap->pools[i] = pool;
+ }
+ heap->heap.debug_show = ion_system_heap_debug_show;
+ return &heap->heap;
+err_create_pool:
+ for (i = 0; i < num_orders; i++)
+ if (heap->pools[i])
+ ion_page_pool_destroy(heap->pools[i]);
+ kfree(heap->pools);
+err_alloc_pools:
+ kfree(heap);
+ return ERR_PTR(-ENOMEM);
}
void ion_system_heap_destroy(struct ion_heap *heap)
{
- kfree(heap);
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int i;
+
+ for (i = 0; i < num_orders; i++)
+ ion_page_pool_destroy(sys_heap->pools[i]);
+ kfree(sys_heap->pools);
+ kfree(sys_heap);
}
static int ion_system_contig_heap_allocate(struct ion_heap *heap,