1 From 0d3b557ef5494adf6458fe4e6f4a9b41e6e0d12a Mon Sep 17 00:00:00 2001
2 From: Dave Stevenson <dave.stevenson@raspberrypi.org>
3 Date: Thu, 10 May 2018 12:42:11 -0700
4 Subject: [PATCH 371/454] staging: bcm2835-camera: Remove V4L2/MMAL buffer
7 commit 9384167070713570a25f854d641979e94163c425 upstream
9 The MMAL and V4L2 buffers had been disassociated, and linked on
10 demand. Seeing as both are finite and low in number, and we now have
11 the same number of each, link them for the duration. This removes the
12 complexity of maintaining lists as the struct mmal_buffer context
13 comes back from the VPU, so we can directly link back to the relevant
16 Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.org>
17 Signed-off-by: Eric Anholt <eric@anholt.net>
18 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
20 .../bcm2835-camera/bcm2835-camera.c | 7 +-
21 .../vc04_services/bcm2835-camera/mmal-vchiq.c | 109 ++++--------------
22 2 files changed, 29 insertions(+), 87 deletions(-)
24 --- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
25 +++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
26 @@ -301,8 +301,8 @@ static int buffer_prepare(struct vb2_buf
27 struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
30 - v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
32 + v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p, vb %p\n",
35 BUG_ON(!dev->capture.port);
36 BUG_ON(!dev->capture.fmt);
37 @@ -522,7 +522,8 @@ static void buffer_queue(struct vb2_buff
40 v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
41 - "%s: dev:%p buf:%p\n", __func__, dev, buf);
42 + "%s: dev:%p buf:%p, idx %u\n",
43 + __func__, dev, buf, vb2->vb2_buf.index);
45 ret = vchiq_mmal_submit_buffer(dev->instance, dev->capture.port, buf);
47 --- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
48 +++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
49 @@ -329,16 +329,12 @@ static int bulk_receive(struct vchiq_mma
50 struct mmal_msg_context *msg_context)
53 - unsigned long flags = 0;
56 rd_len = msg->u.buffer_from_host.buffer_header.length;
58 - /* take buffer from queue */
59 - spin_lock_irqsave(&msg_context->u.bulk.port->slock, flags);
60 - if (list_empty(&msg_context->u.bulk.port->buffers)) {
61 - spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
62 - pr_err("buffer list empty trying to submit bulk receive\n");
63 + if (!msg_context->u.bulk.buffer) {
64 + pr_err("bulk.buffer not configured - error in buffer_from_host\n");
66 /* todo: this is a serious error, we should never have
67 * committed a buffer_to_host operation to the mmal
68 @@ -353,13 +349,6 @@ static int bulk_receive(struct vchiq_mma
72 - msg_context->u.bulk.buffer =
73 - list_entry(msg_context->u.bulk.port->buffers.next,
74 - struct mmal_buffer, list);
75 - list_del(&msg_context->u.bulk.buffer->list);
77 - spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
79 /* ensure we do not overrun the available buffer */
80 if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
81 rd_len = msg_context->u.bulk.buffer->buffer_size;
82 @@ -422,31 +411,6 @@ static int inline_receive(struct vchiq_m
84 struct mmal_msg_context *msg_context)
86 - unsigned long flags = 0;
88 - /* take buffer from queue */
89 - spin_lock_irqsave(&msg_context->u.bulk.port->slock, flags);
90 - if (list_empty(&msg_context->u.bulk.port->buffers)) {
91 - spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
92 - pr_err("buffer list empty trying to receive inline\n");
94 - /* todo: this is a serious error, we should never have
95 - * committed a buffer_to_host operation to the mmal
96 - * port without the buffer to back it up (with
97 - * underflow handling) and there is no obvious way to
98 - * deal with this. Less bad than the bulk case as we
99 - * can just drop this on the floor but...unhelpful
104 - msg_context->u.bulk.buffer =
105 - list_entry(msg_context->u.bulk.port->buffers.next,
106 - struct mmal_buffer, list);
107 - list_del(&msg_context->u.bulk.buffer->list);
109 - spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
111 memcpy(msg_context->u.bulk.buffer->buffer,
112 msg->u.buffer_from_host.short_data,
113 msg->u.buffer_from_host.payload_in_message);
114 @@ -466,6 +430,9 @@ buffer_from_host(struct vchiq_mmal_insta
118 + if (!port->enabled)
121 pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
124 @@ -479,7 +446,7 @@ buffer_from_host(struct vchiq_mmal_insta
125 /* store bulk message context for when data arrives */
126 msg_context->u.bulk.instance = instance;
127 msg_context->u.bulk.port = port;
128 - msg_context->u.bulk.buffer = NULL; /* not valid until bulk xfer */
129 + msg_context->u.bulk.buffer = buf;
130 msg_context->u.bulk.buffer_used = 0;
132 /* initialise work structure ready to schedule callback */
133 @@ -529,43 +496,6 @@ buffer_from_host(struct vchiq_mmal_insta
137 -/* submit a buffer to the mmal sevice
139 - * the buffer_from_host uses size data from the ports next available
140 - * mmal_buffer and deals with there being no buffer available by
141 - * incrementing the underflow for later
143 -static int port_buffer_from_host(struct vchiq_mmal_instance *instance,
144 - struct vchiq_mmal_port *port)
147 - struct mmal_buffer *buf;
148 - unsigned long flags = 0;
150 - if (!port->enabled)
153 - /* peek buffer from queue */
154 - spin_lock_irqsave(&port->slock, flags);
155 - if (list_empty(&port->buffers)) {
156 - spin_unlock_irqrestore(&port->slock, flags);
160 - buf = list_entry(port->buffers.next, struct mmal_buffer, list);
162 - spin_unlock_irqrestore(&port->slock, flags);
164 - /* issue buffer to mmal service */
165 - ret = buffer_from_host(instance, port, buf);
167 - pr_err("adding buffer header failed\n");
168 - /* todo: how should this be dealt with */
174 /* deals with receipt of buffer to host message */
175 static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
176 struct mmal_msg *msg, u32 msg_len)
177 @@ -1425,7 +1355,14 @@ static int port_disable(struct vchiq_mma
178 ret = port_action_port(instance, port,
179 MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
181 - /* drain all queued buffers on port */
183 + * Drain all queued buffers on port. This should only
184 + * apply to buffers that have been queued before the port
185 + * has been enabled. If the port has been enabled and buffers
186 + * passed, then the buffers should have been removed from this
187 + * list, and we should get the relevant callbacks via VCHIQ
188 + * to release the buffers.
190 spin_lock_irqsave(&port->slock, flags);
192 list_for_each_safe(buf_head, q, &port->buffers) {
193 @@ -1454,7 +1391,7 @@ static int port_enable(struct vchiq_mmal
194 struct vchiq_mmal_port *port)
196 unsigned int hdr_count;
197 - struct list_head *buf_head;
198 + struct list_head *q, *buf_head;
202 @@ -1480,7 +1417,7 @@ static int port_enable(struct vchiq_mmal
203 if (port->buffer_cb) {
204 /* send buffer headers to videocore */
206 - list_for_each(buf_head, &port->buffers) {
207 + list_for_each_safe(buf_head, q, &port->buffers) {
208 struct mmal_buffer *mmalbuf;
210 mmalbuf = list_entry(buf_head, struct mmal_buffer,
211 @@ -1489,6 +1426,7 @@ static int port_enable(struct vchiq_mmal
215 + list_del(buf_head);
217 if (hdr_count > port->current_buffer.num)
219 @@ -1701,12 +1639,15 @@ int vchiq_mmal_submit_buffer(struct vchi
220 struct mmal_buffer *buffer)
222 unsigned long flags = 0;
225 - spin_lock_irqsave(&port->slock, flags);
226 - list_add_tail(&buffer->list, &port->buffers);
227 - spin_unlock_irqrestore(&port->slock, flags);
229 - port_buffer_from_host(instance, port);
230 + ret = buffer_from_host(instance, port, buffer);
231 + if (ret == -EINVAL) {
232 + /* Port is disabled. Queue for when it is enabled. */
233 + spin_lock_irqsave(&port->slock, flags);
234 + list_add_tail(&buffer->list, &port->buffers);
235 + spin_unlock_irqrestore(&port->slock, flags);