1 From 6f2f0f6465acbd59391c43352ff0df77df1f01db Mon Sep 17 00:00:00 2001
2 From: Abhishek Sahu <absahu@codeaurora.org>
3 Date: Mon, 12 Mar 2018 18:44:59 +0530
4 Subject: [PATCH 10/13] i2c: qup: fix buffer overflow for multiple msg of
7 Content-Type: text/plain; charset=UTF-8
8 Content-Transfer-Encoding: 8bit
10 The BAM mode requires buffer for start tag data and tx, rx SG
11 list. Currently, this is being taken for maximum transfer length
12 (65K). But an I2C transfer can have multiple messages and each
13 message can be of this maximum length so the buffer overflow will
14 happen in this case. Since increasing buffer length won’t be
15 feasible since an I2C transfer can contain any number of messages
16 so this patch does following changes to make i2c transfers working
17 for multiple messages case.
19 1. Calculate the required buffers for 2 maximum length messages
21 2. Split the descriptor formation and descriptor scheduling.
22 The idea is to fit as many messages in one DMA transfers for 65K
23 threshold value (max_xfer_sg_len). Whenever the sg_cnt is
24 crossing this, then schedule the BAM transfer and subsequent
25 transfer will again start from zero.
27 Signed-off-by: Abhishek Sahu <absahu@codeaurora.org>
28 Reviewed-by: Andy Gross <andy.gross@linaro.org>
29 Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
31 drivers/i2c/busses/i2c-qup.c | 194 ++++++++++++++++++++---------------
32 1 file changed, 110 insertions(+), 84 deletions(-)
34 --- a/drivers/i2c/busses/i2c-qup.c
35 +++ b/drivers/i2c/busses/i2c-qup.c
38 #define QUP_I2C_MX_CONFIG_DURING_RUN BIT(31)
40 +/* Maximum transfer length for single DMA descriptor */
41 #define MX_TX_RX_LEN SZ_64K
42 #define MX_BLOCKS (MX_TX_RX_LEN / QUP_READ_LIMIT)
43 +/* Maximum transfer length for all DMA descriptors */
44 +#define MX_DMA_TX_RX_LEN (2 * MX_TX_RX_LEN)
45 +#define MX_DMA_BLOCKS (MX_DMA_TX_RX_LEN / QUP_READ_LIMIT)
48 * Minimum transfer timeout for i2c transfers in seconds. It will be added on
49 @@ -150,6 +154,7 @@ struct qup_i2c_bam {
50 struct qup_i2c_tag tag;
52 struct scatterlist *sg;
53 + unsigned int sg_cnt;
57 @@ -188,6 +193,8 @@ struct qup_i2c_dev {
59 /* To check if the current transfer is using DMA */
61 + unsigned int max_xfer_sg_len;
62 + unsigned int tag_buf_pos;
63 struct dma_pool *dpool;
64 struct qup_i2c_tag start_tag;
65 struct qup_i2c_bam brx;
66 @@ -692,102 +699,87 @@ static int qup_i2c_req_dma(struct qup_i2
70 -static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg,
72 +static int qup_i2c_bam_make_desc(struct qup_i2c_dev *qup, struct i2c_msg *msg)
74 - struct dma_async_tx_descriptor *txd, *rxd = NULL;
75 - int ret = 0, idx = 0, limit = QUP_READ_LIMIT;
76 - dma_cookie_t cookie_rx, cookie_tx;
77 - u32 len, blocks, rem;
78 - u32 i, tlen, tx_len, tx_cnt = 0, rx_cnt = 0, off = 0;
79 + int ret = 0, limit = QUP_READ_LIMIT;
80 + u32 len = 0, blocks, rem;
81 + u32 i = 0, tlen, tx_len = 0;
85 - tx_len = 0, len = 0, i = 0;
87 - qup->is_last = (idx == (num - 1));
88 + qup_i2c_set_blk_data(qup, msg);
90 - qup_i2c_set_blk_data(qup, msg);
91 + blocks = qup->blk.count;
92 + rem = msg->len - (blocks - 1) * limit;
94 - blocks = qup->blk.count;
95 - rem = msg->len - (blocks - 1) * limit;
96 + if (msg->flags & I2C_M_RD) {
97 + while (qup->blk.pos < blocks) {
98 + tlen = (i == (blocks - 1)) ? rem : limit;
99 + tags = &qup->start_tag.start[qup->tag_buf_pos + len];
100 + len += qup_i2c_set_tags(tags, qup, msg);
101 + qup->blk.data_len -= tlen;
103 + /* scratch buf to read the start and len tags */
104 + ret = qup_sg_set_buf(&qup->brx.sg[qup->brx.sg_cnt++],
105 + &qup->brx.tag.start[0],
106 + 2, qup, DMA_FROM_DEVICE);
108 - if (msg->flags & I2C_M_RD) {
109 - while (qup->blk.pos < blocks) {
110 - tlen = (i == (blocks - 1)) ? rem : limit;
111 - tags = &qup->start_tag.start[off + len];
112 - len += qup_i2c_set_tags(tags, qup, msg);
113 - qup->blk.data_len -= tlen;
115 - /* scratch buf to read the start and len tags */
116 - ret = qup_sg_set_buf(&qup->brx.sg[rx_cnt++],
117 - &qup->brx.tag.start[0],
118 - 2, qup, DMA_FROM_DEVICE);
123 - ret = qup_sg_set_buf(&qup->brx.sg[rx_cnt++],
124 - &msg->buf[limit * i],
135 - ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++],
136 - &qup->start_tag.start[off],
137 - len, qup, DMA_TO_DEVICE);
138 + ret = qup_sg_set_buf(&qup->brx.sg[qup->brx.sg_cnt++],
139 + &msg->buf[limit * i],
147 - while (qup->blk.pos < blocks) {
148 - tlen = (i == (blocks - 1)) ? rem : limit;
149 - tags = &qup->start_tag.start[off + tx_len];
150 - len = qup_i2c_set_tags(tags, qup, msg);
151 - qup->blk.data_len -= tlen;
153 - ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++],
155 - qup, DMA_TO_DEVICE);
160 - ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++],
161 - &msg->buf[limit * i],
162 - tlen, qup, DMA_TO_DEVICE);
172 + ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++],
173 + &qup->start_tag.start[qup->tag_buf_pos],
174 + len, qup, DMA_TO_DEVICE);
178 - if (idx == (num - 1)) {
181 - qup->btx.tag.start[0] =
185 - qup->btx.tag.start[len - 1] =
186 - QUP_BAM_FLUSH_STOP;
187 - ret = qup_sg_set_buf(&qup->btx.sg[tx_cnt++],
188 - &qup->btx.tag.start[0],
189 - len, qup, DMA_TO_DEVICE);
193 + qup->tag_buf_pos += len;
195 + while (qup->blk.pos < blocks) {
196 + tlen = (i == (blocks - 1)) ? rem : limit;
197 + tags = &qup->start_tag.start[qup->tag_buf_pos + tx_len];
198 + len = qup_i2c_set_tags(tags, qup, msg);
199 + qup->blk.data_len -= tlen;
201 + ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++],
203 + qup, DMA_TO_DEVICE);
208 + ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++],
209 + &msg->buf[limit * i],
210 + tlen, qup, DMA_TO_DEVICE);
219 + qup->tag_buf_pos += tx_len;
225 +static int qup_i2c_bam_schedule_desc(struct qup_i2c_dev *qup)
227 + struct dma_async_tx_descriptor *txd, *rxd = NULL;
229 + dma_cookie_t cookie_rx, cookie_tx;
231 + u32 tx_cnt = qup->btx.sg_cnt, rx_cnt = qup->brx.sg_cnt;
233 /* schedule the EOT and FLUSH I2C tags */
236 @@ -886,11 +878,19 @@ desc_err:
240 +static void qup_i2c_bam_clear_tag_buffers(struct qup_i2c_dev *qup)
242 + qup->btx.sg_cnt = 0;
243 + qup->brx.sg_cnt = 0;
244 + qup->tag_buf_pos = 0;
247 static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
250 struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
254 enable_irq(qup->irq);
255 ret = qup_i2c_req_dma(qup);
256 @@ -913,9 +913,34 @@ static int qup_i2c_bam_xfer(struct i2c_a
259 writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
260 + qup_i2c_bam_clear_tag_buffers(qup);
262 + for (idx = 0; idx < num; idx++) {
263 + qup->msg = msg + idx;
264 + qup->is_last = idx == (num - 1);
266 + ret = qup_i2c_bam_make_desc(qup, qup->msg);
271 + * Make DMA descriptor and schedule the BAM transfer if its
272 + * already crossed the maximum length. Since the memory for all
273 + * tags buffers have been taken for 2 maximum possible
274 + * transfers length so it will never cross the buffer actual
277 + if (qup->btx.sg_cnt > qup->max_xfer_sg_len ||
278 + qup->brx.sg_cnt > qup->max_xfer_sg_len ||
280 + ret = qup_i2c_bam_schedule_desc(qup);
284 + qup_i2c_bam_clear_tag_buffers(qup);
289 - ret = qup_i2c_bam_do_xfer(qup, qup->msg, num);
291 disable_irq(qup->irq);
293 @@ -1468,7 +1493,8 @@ static int qup_i2c_probe(struct platform
297 - blocks = (MX_BLOCKS << 1) + 1;
298 + qup->max_xfer_sg_len = (MX_BLOCKS << 1);
299 + blocks = (MX_DMA_BLOCKS << 1) + 1;
300 qup->btx.sg = devm_kzalloc(&pdev->dev,
301 sizeof(*qup->btx.sg) * blocks,
303 @@ -1611,7 +1637,7 @@ nodma:
304 one_bit_t = (USEC_PER_SEC / clk_freq) + 1;
305 qup->one_byte_t = one_bit_t * 9;
306 qup->xfer_timeout = TOUT_MIN * HZ +
307 - usecs_to_jiffies(MX_TX_RX_LEN * qup->one_byte_t);
308 + usecs_to_jiffies(MX_DMA_TX_RX_LEN * qup->one_byte_t);
310 dev_dbg(qup->dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
311 qup->in_blk_sz, qup->in_fifo_sz,