79f6a67387362f12d8e4a4e36939ae9f0d77dfd4
[openwrt/staging/mans0n.git] /
1 From 9dc454ebc4380cd90c24a3c224bb0ac7b3d9cc29 Mon Sep 17 00:00:00 2001
2 From: Gurchetan Singh <gurchetansingh@chromium.org>
3 Date: Mon, 2 Dec 2019 17:36:27 -0800
4 Subject: [PATCH] udmabuf: implement begin_cpu_access/end_cpu_access
5 hooks
6
7 Commit 284562e1f34874e267d4f499362c3816f8f6bc3f upstream.
8
9 With the misc device, we should end up using the result of
10 get_arch_dma_ops(..) or dma-direct ops.
11
12 This can allow us to have WC mappings in the guest after
13 synchronization.
14
15 Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org>
16 Link: http://patchwork.freedesktop.org/patch/msgid/20191203013627.85991-4-gurchetansingh@chromium.org
17 Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
18 ---
19 drivers/dma-buf/udmabuf.c | 39 +++++++++++++++++++++++++++++++++++++++
20 1 file changed, 39 insertions(+)
21
22 --- a/drivers/dma-buf/udmabuf.c
23 +++ b/drivers/dma-buf/udmabuf.c
24 @@ -18,6 +18,7 @@ static const size_t size_limit_mb = 64;
25 struct udmabuf {
26 pgoff_t pagecount;
27 struct page **pages;
28 + struct sg_table *sg;
29 struct miscdevice *device;
30 };
31
32 @@ -98,20 +99,58 @@ static void unmap_udmabuf(struct dma_buf
33 static void release_udmabuf(struct dma_buf *buf)
34 {
35 struct udmabuf *ubuf = buf->priv;
36 + struct device *dev = ubuf->device->this_device;
37 pgoff_t pg;
38
39 + if (ubuf->sg)
40 + put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
41 +
42 for (pg = 0; pg < ubuf->pagecount; pg++)
43 put_page(ubuf->pages[pg]);
44 kfree(ubuf->pages);
45 kfree(ubuf);
46 }
47
48 +static int begin_cpu_udmabuf(struct dma_buf *buf,
49 + enum dma_data_direction direction)
50 +{
51 + struct udmabuf *ubuf = buf->priv;
52 + struct device *dev = ubuf->device->this_device;
53 +
54 + if (!ubuf->sg) {
55 + ubuf->sg = get_sg_table(dev, buf, direction);
56 + if (IS_ERR(ubuf->sg))
57 + return PTR_ERR(ubuf->sg);
58 + } else {
59 + dma_sync_sg_for_device(dev, ubuf->sg->sgl,
60 + ubuf->sg->nents,
61 + direction);
62 + }
63 +
64 + return 0;
65 +}
66 +
67 +static int end_cpu_udmabuf(struct dma_buf *buf,
68 + enum dma_data_direction direction)
69 +{
70 + struct udmabuf *ubuf = buf->priv;
71 + struct device *dev = ubuf->device->this_device;
72 +
73 + if (!ubuf->sg)
74 + return -EINVAL;
75 +
76 + dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
77 + return 0;
78 +}
79 +
80 static const struct dma_buf_ops udmabuf_ops = {
81 .cache_sgt_mapping = true,
82 .map_dma_buf = map_udmabuf,
83 .unmap_dma_buf = unmap_udmabuf,
84 .release = release_udmabuf,
85 .mmap = mmap_udmabuf,
86 + .begin_cpu_access = begin_cpu_udmabuf,
87 + .end_cpu_access = end_cpu_udmabuf,
88 };
89
90 #define SEALS_WANTED (F_SEAL_SHRINK)