1 From c2acd59177043f78446357f560e5436b0318ceb6 Mon Sep 17 00:00:00 2001
2 From: detule <ogjoneski@gmail.com>
3 Date: Tue, 2 Oct 2018 04:10:08 -0400
4 Subject: [PATCH] vchiq_2835_arm: Implement a DMA pool for small bulk
7 During a bulk transfer we request a DMA allocation to hold the
8 scatter-gather list. Most of the time, this allocation is small
9 (<< PAGE_SIZE), however it can be requested at a high enough frequency
10 to cause fragmentation and/or stress the CMA allocator (think time
11 spent in compaction here, or during allocations elsewhere).
13 Implement a pool to serve up small DMA allocations, falling back
14 to a coherent allocation if the request is greater than
17 Signed-off-by: Oliver Gjoneski <ogjoneski@gmail.com>
19 .../interface/vchiq_arm/vchiq_2835_arm.c | 38 ++++++++++++++++---
20 1 file changed, 33 insertions(+), 5 deletions(-)
22 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
23 +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
25 #include <linux/interrupt.h>
26 #include <linux/pagemap.h>
27 #include <linux/dma-mapping.h>
28 +#include <linux/dmapool.h>
30 #include <linux/platform_device.h>
31 #include <linux/uaccess.h>
36 +#define VCHIQ_DMA_POOL_SIZE PAGE_SIZE
38 struct vchiq_2835_state {
40 struct vchiq_arm_state arm_state;
41 @@ -37,6 +40,7 @@ struct vchiq_pagelist_info {
42 struct pagelist *pagelist;
43 size_t pagelist_buffer_size;
46 enum dma_data_direction dma_dir;
47 unsigned int num_pages;
48 unsigned int pages_need_release;
49 @@ -57,6 +61,7 @@ static void __iomem *g_regs;
52 static unsigned int g_cache_line_size = 32;
53 +static struct dma_pool *g_dma_pool;
54 static unsigned int g_fragments_size;
55 static char *g_fragments_base;
56 static char *g_free_fragments;
57 @@ -161,6 +166,14 @@ int vchiq_platform_init(struct platform_
61 + g_dma_pool = dmam_pool_create("vchiq_scatter_pool", dev,
62 + VCHIQ_DMA_POOL_SIZE, g_cache_line_size,
65 + dev_err(dev, "failed to create dma pool");
69 vchiq_log_info(vchiq_arm_log_level,
70 "vchiq_init - done (slots %pK, phys %pad)",
71 vchiq_slot_zero, &slot_phys);
72 @@ -339,9 +352,14 @@ cleanup_pagelistinfo(struct vchiq_pageli
73 for (i = 0; i < pagelistinfo->num_pages; i++)
74 put_page(pagelistinfo->pages[i]);
77 - dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
78 - pagelistinfo->pagelist, pagelistinfo->dma_addr);
79 + if (pagelistinfo->is_from_pool) {
80 + dma_pool_free(g_dma_pool, pagelistinfo->pagelist,
81 + pagelistinfo->dma_addr);
83 + dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
84 + pagelistinfo->pagelist,
85 + pagelistinfo->dma_addr);
89 /* There is a potential problem with partial cache lines (pages?)
90 @@ -361,6 +379,7 @@ create_pagelist(char __user *buf, size_t
92 unsigned int num_pages, offset, i, k;
96 struct scatterlist *scatterlist, *sg;
98 @@ -387,8 +406,16 @@ create_pagelist(char __user *buf, size_t
99 /* Allocate enough storage to hold the page pointers and the page
102 - pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr,
104 + if (pagelist_size > VCHIQ_DMA_POOL_SIZE) {
105 + pagelist = dma_alloc_coherent(g_dev,
109 + is_from_pool = false;
111 + pagelist = dma_pool_alloc(g_dma_pool, GFP_KERNEL, &dma_addr);
112 + is_from_pool = true;
115 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
117 @@ -409,6 +436,7 @@ create_pagelist(char __user *buf, size_t
118 pagelistinfo->pagelist = pagelist;
119 pagelistinfo->pagelist_buffer_size = pagelist_size;
120 pagelistinfo->dma_addr = dma_addr;
121 + pagelistinfo->is_from_pool = is_from_pool;
122 pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ?
123 DMA_TO_DEVICE : DMA_FROM_DEVICE;
124 pagelistinfo->num_pages = num_pages;