1 From 92f7f613ece28ecff26cbe5b5af20343bb624db1 Mon Sep 17 00:00:00 2001
2 From: Maxime Ripard <maxime@cerno.tech>
3 Date: Mon, 2 May 2022 10:20:56 +0200
4 Subject: [PATCH] drm/vc4: crtc: Use an union to store the page flip
7 We'll need to extend the vc4_async_flip_state structure to rely on
8 another callback implementation, so let's move the current one into a
11 Signed-off-by: Maxime Ripard <maxime@cerno.tech>
13 drivers/gpu/drm/vc4/vc4_crtc.c | 30 +++++++++++++++++++-----------
14 1 file changed, 19 insertions(+), 11 deletions(-)
16 --- a/drivers/gpu/drm/vc4/vc4_crtc.c
17 +++ b/drivers/gpu/drm/vc4/vc4_crtc.c
18 @@ -802,18 +802,18 @@ struct vc4_async_flip_state {
19 struct drm_framebuffer *old_fb;
20 struct drm_pending_vblank_event *event;
22 - struct vc4_seqno_cb cb;
23 - struct dma_fence_cb fence_cb;
25 + struct dma_fence_cb fence;
26 + struct vc4_seqno_cb seqno;
30 /* Called when the V3D execution for the BO being flipped to is done, so that
31 * we can actually update the plane's address to point to it.
34 -vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
35 +vc4_async_page_flip_complete(struct vc4_async_flip_state *flip_state)
37 - struct vc4_async_flip_state *flip_state =
38 - container_of(cb, struct vc4_async_flip_state, cb);
39 struct drm_crtc *crtc = flip_state->crtc;
40 struct drm_device *dev = crtc->dev;
41 struct drm_plane *plane = crtc->primary;
42 @@ -849,13 +849,21 @@ vc4_async_page_flip_complete(struct vc4_
46 +static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb)
48 + struct vc4_async_flip_state *flip_state =
49 + container_of(cb, struct vc4_async_flip_state, cb.seqno);
51 + vc4_async_page_flip_complete(flip_state);
54 static void vc4_async_page_flip_fence_complete(struct dma_fence *fence,
55 struct dma_fence_cb *cb)
57 struct vc4_async_flip_state *flip_state =
58 - container_of(cb, struct vc4_async_flip_state, fence_cb);
59 + container_of(cb, struct vc4_async_flip_state, cb.fence);
61 - vc4_async_page_flip_complete(&flip_state->cb);
62 + vc4_async_page_flip_complete(flip_state);
66 @@ -870,14 +878,14 @@ static int vc4_async_set_fence_cb(struct
68 struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
70 - return vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno,
71 - vc4_async_page_flip_complete);
72 + return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
73 + vc4_async_page_flip_seqno_complete);
76 fence = dma_fence_get(dma_resv_excl_fence(cma_bo->base.resv));
77 - if (dma_fence_add_callback(fence, &flip_state->fence_cb,
78 + if (dma_fence_add_callback(fence, &flip_state->cb.fence,
79 vc4_async_page_flip_fence_complete))
80 - vc4_async_page_flip_fence_complete(fence, &flip_state->fence_cb);
81 + vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence);