d3b680a56c5329648c4ed260151817fcf80d1cb5
[openwrt/staging/ansuel.git] /
1 From 99c7b8eabae7a6a6e6c5f53f3a9d0996b24e10b3 Mon Sep 17 00:00:00 2001
2 From: Maxime Ripard <maxime@cerno.tech>
3 Date: Mon, 2 May 2022 15:27:36 +0200
4 Subject: [PATCH] drm/vc4: crtc: Move the BO Handling out of Common
5 Page-Flip Handler
6
7 The function vc4_async_page_flip() handles asynchronous page-flips in
8 the vc4 driver.
9
10 However, it mixes some generic code with code that should only be run on
11 older generations that have the GPU handled by the vc4 driver.
12
13 Let's split the generic part out of vc4_async_page_flip() and into a
14 common function that we be reusable by an handler made for the BCM2711.
15
16 Signed-off-by: Maxime Ripard <maxime@cerno.tech>
17 ---
18 drivers/gpu/drm/vc4/vc4_crtc.c | 75 ++++++++++++++++++++++------------
19 1 file changed, 48 insertions(+), 27 deletions(-)
20
21 --- a/drivers/gpu/drm/vc4/vc4_crtc.c
22 +++ b/drivers/gpu/drm/vc4/vc4_crtc.c
23 @@ -912,40 +912,19 @@ static int vc4_async_set_fence_cb(struct
24 return 0;
25 }
26
27 -/* Implements async (non-vblank-synced) page flips.
28 - *
29 - * The page flip ioctl needs to return immediately, so we grab the
30 - * modeset semaphore on the pipe, and queue the address update for
31 - * when V3D is done with the BO being flipped to.
32 - */
33 -static int vc4_async_page_flip(struct drm_crtc *crtc,
34 - struct drm_framebuffer *fb,
35 - struct drm_pending_vblank_event *event,
36 - uint32_t flags)
37 +static int
38 +vc4_async_page_flip_common(struct drm_crtc *crtc,
39 + struct drm_framebuffer *fb,
40 + struct drm_pending_vblank_event *event,
41 + uint32_t flags)
42 {
43 struct drm_device *dev = crtc->dev;
44 struct drm_plane *plane = crtc->primary;
45 - int ret = 0;
46 struct vc4_async_flip_state *flip_state;
47 - struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
48 - struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
49 -
50 - /* Increment the BO usecnt here, so that we never end up with an
51 - * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the
52 - * plane is later updated through the non-async path.
53 - * FIXME: we should move to generic async-page-flip when it's
54 - * available, so that we can get rid of this hand-made prepare_fb()
55 - * logic.
56 - */
57 - ret = vc4_bo_inc_usecnt(bo);
58 - if (ret)
59 - return ret;
60
61 flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
62 - if (!flip_state) {
63 - vc4_bo_dec_usecnt(bo);
64 + if (!flip_state)
65 return -ENOMEM;
66 - }
67
68 drm_framebuffer_get(fb);
69 flip_state->fb = fb;
70 @@ -978,6 +957,48 @@ static int vc4_async_page_flip(struct dr
71 return 0;
72 }
73
74 +/* Implements async (non-vblank-synced) page flips.
75 + *
76 + * The page flip ioctl needs to return immediately, so we grab the
77 + * modeset semaphore on the pipe, and queue the address update for
78 + * when V3D is done with the BO being flipped to.
79 + */
80 +static int vc4_async_page_flip(struct drm_crtc *crtc,
81 + struct drm_framebuffer *fb,
82 + struct drm_pending_vblank_event *event,
83 + uint32_t flags)
84 +{
85 + struct drm_device *dev = crtc->dev;
86 + struct vc4_dev *vc4 = to_vc4_dev(dev);
87 + struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
88 + struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
89 + int ret;
90 +
91 + if (WARN_ON_ONCE(vc4->is_vc5))
92 + return -ENODEV;
93 +
94 + /*
95 + * Increment the BO usecnt here, so that we never end up with an
96 + * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the
97 + * plane is later updated through the non-async path.
98 + *
99 + * FIXME: we should move to generic async-page-flip when
100 + * it's available, so that we can get rid of this
101 + * hand-made prepare_fb() logic.
102 + */
103 + ret = vc4_bo_inc_usecnt(bo);
104 + if (ret)
105 + return ret;
106 +
107 + ret = vc4_async_page_flip_common(crtc, fb, event, flags);
108 + if (ret) {
109 + vc4_bo_dec_usecnt(bo);
110 + return ret;
111 + }
112 +
113 + return 0;
114 +}
115 +
116 int vc4_page_flip(struct drm_crtc *crtc,
117 struct drm_framebuffer *fb,
118 struct drm_pending_vblank_event *event,