e5d1edfb72a1c32e4d3ae1b22b740f9622c201af
[openwrt/staging/ynezz.git] /
1 From 6f2f0f6465acbd59391c43352ff0df77df1f01db Mon Sep 17 00:00:00 2001
2 From: Abhishek Sahu <absahu@codeaurora.org>
3 Date: Mon, 12 Mar 2018 18:44:59 +0530
4 Subject: [PATCH 10/13] i2c: qup: fix buffer overflow for multiple msg of
5 maximum xfer len
6 MIME-Version: 1.0
7 Content-Type: text/plain; charset=UTF-8
8 Content-Transfer-Encoding: 8bit
9
10 The BAM mode requires buffer for start tag data and tx, rx SG
11 list. Currently, this is being taken for maximum transfer length
12 (65K). But an I2C transfer can have multiple messages and each
13 message can be of this maximum length so the buffer overflow will
14 happen in this case. Since increasing buffer length won’t be
15 feasible since an I2C transfer can contain any number of messages
16 so this patch does following changes to make i2c transfers working
17 for multiple messages case.
18
19 1. Calculate the required buffers for 2 maximum length messages
20 (65K * 2).
21 2. Split the descriptor formation and descriptor scheduling.
22 The idea is to fit as many messages in one DMA transfers for 65K
23 threshold value (max_xfer_sg_len). Whenever the sg_cnt is
24 crossing this, then schedule the BAM transfer and subsequent
25 transfer will again start from zero.
26
27 Signed-off-by: Abhishek Sahu <absahu@codeaurora.org>
28 Reviewed-by: Andy Gross <andy.gross@linaro.org>
29 Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
30 ---
31 drivers/i2c/busses/i2c-qup.c | 194 ++++++++++++++++++++---------------
32 1 file changed, 110 insertions(+), 84 deletions(-)
33
34 --- a/drivers/i2c/busses/i2c-qup.c
35 +++ b/drivers/i2c/busses/i2c-qup.c
36 @@ -118,8 +118,12 @@
37 #define ONE_BYTE 0x1
38 #define QUP_I2C_MX_CONFIG_DURING_RUN BIT(31)
39
40 +/* Maximum transfer length for single DMA descriptor */
41 #define MX_TX_RX_LEN SZ_64K
42 #define MX_BLOCKS (MX_TX_RX_LEN / QUP_READ_LIMIT)
43 +/* Maximum transfer length for all DMA descriptors */
44 +#define MX_DMA_TX_RX_LEN (2 * MX_TX_RX_LEN)
45 +#define MX_DMA_BLOCKS (MX_DMA_TX_RX_LEN / QUP_READ_LIMIT)
46
47 /*
48 * Minimum transfer timeout for i2c transfers in seconds. It will be added on
49 @@ -150,6 +154,7 @@ struct qup_i2c_bam {
50 struct qup_i2c_tag tag;
51 struct dma_chan *dma;
52 struct scatterlist *sg;
53 + unsigned int sg_cnt;
54 };
55
56 struct qup_i2c_dev {
57 @@ -188,6 +193,8 @@ struct qup_i2c_dev {
58 bool is_dma;
59 /* To check if the current transfer is using DMA */
60 bool use_dma;
61 + unsigned int max_xfer_sg_len;
62 + unsigned int tag_buf_pos;
63 struct dma_pool *dpool;
64 struct qup_i2c_tag start_tag;
65 struct qup_i2c_bam brx;
66 @@ -692,102 +699,87 @@ static int qup_i2c_req_dma(struct qup_i2
67 return 0;
68 }
69
70 -static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg,
71 - int num)
72 +static int qup_i2c_bam_make_desc(struct qup_i2c_dev *qup, struct i2c_msg *msg)
73 {
74 - struct dma_async_tx_descriptor *txd, *rxd = NULL;
75 - int ret = 0, idx = 0, limit = QUP_READ_LIMIT;
76 - dma_cookie_t cookie_rx, cookie_tx;
77 - u32 len, blocks, rem;
78 - u32 i, tlen, tx_len, tx_cnt = 0, rx_cnt = 0, off = 0;
79 + int ret = 0, limit = QUP_READ_LIMIT;
80 + u32 len = 0, blocks, rem;
81 + u32 i = 0, tlen, tx_len = 0;
82 u8 *tags;
83
84 - while (idx < num) {
85 - tx_len = 0, len = 0, i = 0;
86 -
87 - qup->is_last = (idx == (num - 1));
88 + qup_i2c_set_blk_data(qup, msg);
89
90 - qup_i2c_set_blk_data(qup, msg);
91 + blocks = qup->blk.count;
92 + rem = msg->len - (blocks - 1) * limit;
93
94 - blocks = qup->blk.count;
95 - rem = msg->len - (blocks - 1) * limit;
96 + if (msg->flags & I2C_M_RD) {
97 + while (qup->blk.pos < blocks) {
98 + tlen = (i == (blocks - 1)) ? rem : limit;
99 + tags = &qup->start_tag.start[qup->tag_buf_pos + len];
100 + len += qup_i2c_set_tags(tags, qup, msg);
101 + qup->blk.data_len -= tlen;
102 +
103 + /* scratch buf to read the start and len tags */
104 + ret = qup_sg_set_buf(&qup->brx.sg[qup->brx.sg_cnt++],
105 + &qup->brx.tag.start[0],
106 + 2, qup, DMA_FROM_DEVICE);
107
108 - if (msg->flags & I2C_M_RD) {
109 - while (qup->blk.pos < blocks) {
110 - tlen = (i == (blocks - 1)) ? rem : limit;
111 - tags = &qup->start_tag.start[off + len];
112 - len += qup_i2c_set_tags(tags, qup, msg);
113 - qup->blk.data_len -= tlen;
114 -
115 - /* scratch buf to read the start and len tags */
116 - ret = qup_sg_set_buf(&qup->brx.sg[rx_cnt++],
117 - &qup->brx.tag.start[0],
118 - 2, qup, DMA_FROM_DEVICE);
119 -
120 - if (ret)
121 - return ret;
122 -
123 - ret = qup_sg_set_buf(&qup->brx.sg[rx_cnt++],
124 - &msg->buf[limit * i],
125 - tlen, qup,
126 - DMA_FROM_DEVICE);
127 - if (ret)
128 - return ret;
129 + if (ret)
130 + return ret;
131
132 - i++;
133 - qup->blk.pos = i;
134 - }
135 - ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++],
136 - &qup->start_tag.start[off],
137 - len, qup, DMA_TO_DEVICE);
138 + ret = qup_sg_set_buf(&qup->brx.sg[qup->brx.sg_cnt++],
139 + &msg->buf[limit * i],
140 + tlen, qup,
141 + DMA_FROM_DEVICE);
142 if (ret)
143 return ret;
144
145 - off += len;
146 - } else {
147 - while (qup->blk.pos < blocks) {
148 - tlen = (i == (blocks - 1)) ? rem : limit;
149 - tags = &qup->start_tag.start[off + tx_len];
150 - len = qup_i2c_set_tags(tags, qup, msg);
151 - qup->blk.data_len -= tlen;
152 -
153 - ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++],
154 - tags, len,
155 - qup, DMA_TO_DEVICE);
156 - if (ret)
157 - return ret;
158 -
159 - tx_len += len;
160 - ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++],
161 - &msg->buf[limit * i],
162 - tlen, qup, DMA_TO_DEVICE);
163 - if (ret)
164 - return ret;
165 - i++;
166 - qup->blk.pos = i;
167 - }
168 - off += tx_len;
169 + i++;
170 + qup->blk.pos = i;
171 + }
172 + ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++],
173 + &qup->start_tag.start[qup->tag_buf_pos],
174 + len, qup, DMA_TO_DEVICE);
175 + if (ret)
176 + return ret;
177
178 - if (idx == (num - 1)) {
179 - len = 1;
180 - if (rx_cnt) {
181 - qup->btx.tag.start[0] =
182 - QUP_BAM_INPUT_EOT;
183 - len++;
184 - }
185 - qup->btx.tag.start[len - 1] =
186 - QUP_BAM_FLUSH_STOP;
187 - ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++],
188 - &qup->btx.tag.start[0],
189 - len, qup, DMA_TO_DEVICE);
190 - if (ret)
191 - return ret;
192 - }
193 + qup->tag_buf_pos += len;
194 + } else {
195 + while (qup->blk.pos < blocks) {
196 + tlen = (i == (blocks - 1)) ? rem : limit;
197 + tags = &qup->start_tag.start[qup->tag_buf_pos + tx_len];
198 + len = qup_i2c_set_tags(tags, qup, msg);
199 + qup->blk.data_len -= tlen;
200 +
201 + ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++],
202 + tags, len,
203 + qup, DMA_TO_DEVICE);
204 + if (ret)
205 + return ret;
206 +
207 + tx_len += len;
208 + ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++],
209 + &msg->buf[limit * i],
210 + tlen, qup, DMA_TO_DEVICE);
211 + if (ret)
212 + return ret;
213 + i++;
214 + qup->blk.pos = i;
215 }
216 - idx++;
217 - msg++;
218 +
219 + qup->tag_buf_pos += tx_len;
220 }
221
222 + return 0;
223 +}
224 +
225 +static int qup_i2c_bam_schedule_desc(struct qup_i2c_dev *qup)
226 +{
227 + struct dma_async_tx_descriptor *txd, *rxd = NULL;
228 + int ret = 0;
229 + dma_cookie_t cookie_rx, cookie_tx;
230 + u32 len = 0;
231 + u32 tx_cnt = qup->btx.sg_cnt, rx_cnt = qup->brx.sg_cnt;
232 +
233 /* schedule the EOT and FLUSH I2C tags */
234 len = 1;
235 if (rx_cnt) {
236 @@ -886,11 +878,19 @@ desc_err:
237 return ret;
238 }
239
240 +static void qup_i2c_bam_clear_tag_buffers(struct qup_i2c_dev *qup)
241 +{
242 + qup->btx.sg_cnt = 0;
243 + qup->brx.sg_cnt = 0;
244 + qup->tag_buf_pos = 0;
245 +}
246 +
247 static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
248 int num)
249 {
250 struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
251 int ret = 0;
252 + int idx = 0;
253
254 enable_irq(qup->irq);
255 ret = qup_i2c_req_dma(qup);
256 @@ -913,9 +913,34 @@ static int qup_i2c_bam_xfer(struct i2c_a
257 goto out;
258
259 writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
260 + qup_i2c_bam_clear_tag_buffers(qup);
261 +
262 + for (idx = 0; idx < num; idx++) {
263 + qup->msg = msg + idx;
264 + qup->is_last = idx == (num - 1);
265 +
266 + ret = qup_i2c_bam_make_desc(qup, qup->msg);
267 + if (ret)
268 + break;
269 +
270 + /*
271 + * Make DMA descriptor and schedule the BAM transfer if its
272 + * already crossed the maximum length. Since the memory for all
273 + * tags buffers have been taken for 2 maximum possible
274 + * transfers length so it will never cross the buffer actual
275 + * length.
276 + */
277 + if (qup->btx.sg_cnt > qup->max_xfer_sg_len ||
278 + qup->brx.sg_cnt > qup->max_xfer_sg_len ||
279 + qup->is_last) {
280 + ret = qup_i2c_bam_schedule_desc(qup);
281 + if (ret)
282 + break;
283 +
284 + qup_i2c_bam_clear_tag_buffers(qup);
285 + }
286 + }
287
288 - qup->msg = msg;
289 - ret = qup_i2c_bam_do_xfer(qup, qup->msg, num);
290 out:
291 disable_irq(qup->irq);
292
293 @@ -1468,7 +1493,8 @@ static int qup_i2c_probe(struct platform
294 else if (ret != 0)
295 goto nodma;
296
297 - blocks = (MX_BLOCKS << 1) + 1;
298 + qup->max_xfer_sg_len = (MX_BLOCKS << 1);
299 + blocks = (MX_DMA_BLOCKS << 1) + 1;
300 qup->btx.sg = devm_kzalloc(&pdev->dev,
301 sizeof(*qup->btx.sg) * blocks,
302 GFP_KERNEL);
303 @@ -1611,7 +1637,7 @@ nodma:
304 one_bit_t = (USEC_PER_SEC / clk_freq) + 1;
305 qup->one_byte_t = one_bit_t * 9;
306 qup->xfer_timeout = TOUT_MIN * HZ +
307 - usecs_to_jiffies(MX_TX_RX_LEN * qup->one_byte_t);
308 + usecs_to_jiffies(MX_DMA_TX_RX_LEN * qup->one_byte_t);
309
310 dev_dbg(qup->dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
311 qup->in_blk_sz, qup->in_fifo_sz,