699db05e0c99172da6cd2973ba4c0017ce8b67c1
[openwrt/staging/ldir.git] /
1 From c382ea6b0457027b6ad883ee4348e03df515a785 Mon Sep 17 00:00:00 2001
2 From: Maxime Ripard <maxime@cerno.tech>
3 Date: Fri, 17 Feb 2023 13:29:27 +0100
4 Subject: [PATCH] drm/vc4: Make v3d paths unavailable on any generation newer
5 than vc4
6
7 The V3D IP has been separate since BCM2711, so let's make sure we issue
8 a WARN if we're running not only on BCM2711, but also anything newer.
9
10 Signed-off-by: Maxime Ripard <maxime@cerno.tech>
11 ---
12 drivers/gpu/drm/vc4/vc4_bo.c | 28 +++++++++++-----------
13 drivers/gpu/drm/vc4/vc4_crtc.c | 4 ++--
14 drivers/gpu/drm/vc4/vc4_drv.c | 8 +++----
15 drivers/gpu/drm/vc4/vc4_gem.c | 24 +++++++++----------
16 drivers/gpu/drm/vc4/vc4_irq.c | 10 ++++----
17 drivers/gpu/drm/vc4/vc4_kms.c | 2 +-
18 drivers/gpu/drm/vc4/vc4_perfmon.c | 20 ++++++++--------
19 drivers/gpu/drm/vc4/vc4_render_cl.c | 2 +-
20 drivers/gpu/drm/vc4/vc4_v3d.c | 10 ++++----
21 drivers/gpu/drm/vc4/vc4_validate.c | 8 +++----
22 drivers/gpu/drm/vc4/vc4_validate_shaders.c | 2 +-
23 11 files changed, 59 insertions(+), 59 deletions(-)
24
25 --- a/drivers/gpu/drm/vc4/vc4_bo.c
26 +++ b/drivers/gpu/drm/vc4/vc4_bo.c
27 @@ -251,7 +251,7 @@ void vc4_bo_add_to_purgeable_pool(struct
28 {
29 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
30
31 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
32 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
33 return;
34
35 mutex_lock(&vc4->purgeable.lock);
36 @@ -265,7 +265,7 @@ static void vc4_bo_remove_from_purgeable
37 {
38 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
39
40 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
41 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
42 return;
43
44 /* list_del_init() is used here because the caller might release
45 @@ -396,7 +396,7 @@ struct drm_gem_object *vc4_create_object
46 struct vc4_dev *vc4 = to_vc4_dev(dev);
47 struct vc4_bo *bo;
48
49 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
50 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
51 return ERR_PTR(-ENODEV);
52
53 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
54 @@ -427,7 +427,7 @@ struct vc4_bo *vc4_bo_create(struct drm_
55 struct drm_gem_dma_object *dma_obj;
56 struct vc4_bo *bo;
57
58 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
59 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
60 return ERR_PTR(-ENODEV);
61
62 if (size == 0)
63 @@ -496,7 +496,7 @@ int vc4_bo_dumb_create(struct drm_file *
64 struct vc4_bo *bo = NULL;
65 int ret;
66
67 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
68 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
69 return -ENODEV;
70
71 ret = vc4_dumb_fixup_args(args);
72 @@ -622,7 +622,7 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo)
73 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
74 int ret;
75
76 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
77 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
78 return -ENODEV;
79
80 /* Fast path: if the BO is already retained by someone, no need to
81 @@ -661,7 +661,7 @@ void vc4_bo_dec_usecnt(struct vc4_bo *bo
82 {
83 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
84
85 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
86 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
87 return;
88
89 /* Fast path: if the BO is still retained by someone, no need to test
90 @@ -783,7 +783,7 @@ int vc4_create_bo_ioctl(struct drm_devic
91 struct vc4_bo *bo = NULL;
92 int ret;
93
94 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
95 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
96 return -ENODEV;
97
98 ret = vc4_grab_bin_bo(vc4, vc4file);
99 @@ -813,7 +813,7 @@ int vc4_mmap_bo_ioctl(struct drm_device
100 struct drm_vc4_mmap_bo *args = data;
101 struct drm_gem_object *gem_obj;
102
103 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
104 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
105 return -ENODEV;
106
107 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
108 @@ -839,7 +839,7 @@ vc4_create_shader_bo_ioctl(struct drm_de
109 struct vc4_bo *bo = NULL;
110 int ret;
111
112 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
113 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
114 return -ENODEV;
115
116 if (args->size == 0)
117 @@ -918,7 +918,7 @@ int vc4_set_tiling_ioctl(struct drm_devi
118 struct vc4_bo *bo;
119 bool t_format;
120
121 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
122 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
123 return -ENODEV;
124
125 if (args->flags != 0)
126 @@ -964,7 +964,7 @@ int vc4_get_tiling_ioctl(struct drm_devi
127 struct drm_gem_object *gem_obj;
128 struct vc4_bo *bo;
129
130 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
131 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
132 return -ENODEV;
133
134 if (args->flags != 0 || args->modifier != 0)
135 @@ -1011,7 +1011,7 @@ int vc4_bo_cache_init(struct drm_device
136 int ret;
137 int i;
138
139 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
140 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
141 return -ENODEV;
142
143 /* Create the initial set of BO labels that the kernel will
144 @@ -1075,7 +1075,7 @@ int vc4_label_bo_ioctl(struct drm_device
145 struct drm_gem_object *gem_obj;
146 int ret = 0, label;
147
148 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
149 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
150 return -ENODEV;
151
152 if (!args->len)
153 --- a/drivers/gpu/drm/vc4/vc4_crtc.c
154 +++ b/drivers/gpu/drm/vc4/vc4_crtc.c
155 @@ -1023,7 +1023,7 @@ static int vc4_async_page_flip(struct dr
156 struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
157 int ret;
158
159 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
160 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
161 return -ENODEV;
162
163 /*
164 @@ -1066,7 +1066,7 @@ int vc4_page_flip(struct drm_crtc *crtc,
165 struct drm_device *dev = crtc->dev;
166 struct vc4_dev *vc4 = to_vc4_dev(dev);
167
168 - if (vc4->gen == VC4_GEN_5)
169 + if (vc4->gen > VC4_GEN_4)
170 return vc5_async_page_flip(crtc, fb, event, flags);
171 else
172 return vc4_async_page_flip(crtc, fb, event, flags);
173 --- a/drivers/gpu/drm/vc4/vc4_drv.c
174 +++ b/drivers/gpu/drm/vc4/vc4_drv.c
175 @@ -98,7 +98,7 @@ static int vc4_get_param_ioctl(struct dr
176 if (args->pad != 0)
177 return -EINVAL;
178
179 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
180 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
181 return -ENODEV;
182
183 if (!vc4->v3d)
184 @@ -147,7 +147,7 @@ static int vc4_open(struct drm_device *d
185 struct vc4_dev *vc4 = to_vc4_dev(dev);
186 struct vc4_file *vc4file;
187
188 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
189 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
190 return -ENODEV;
191
192 vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL);
193 @@ -165,7 +165,7 @@ static void vc4_close(struct drm_device
194 struct vc4_dev *vc4 = to_vc4_dev(dev);
195 struct vc4_file *vc4file = file->driver_priv;
196
197 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
198 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
199 return;
200
201 if (vc4file->bin_bo_used)
202 @@ -315,7 +315,7 @@ static int vc4_drm_bind(struct device *d
203 else
204 gen = VC4_GEN_4;
205
206 - if (gen == VC4_GEN_5)
207 + if (gen > VC4_GEN_4)
208 driver = &vc5_drm_driver;
209 else
210 driver = &vc4_drm_driver;
211 --- a/drivers/gpu/drm/vc4/vc4_gem.c
212 +++ b/drivers/gpu/drm/vc4/vc4_gem.c
213 @@ -76,7 +76,7 @@ vc4_get_hang_state_ioctl(struct drm_devi
214 u32 i;
215 int ret = 0;
216
217 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
218 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
219 return -ENODEV;
220
221 if (!vc4->v3d) {
222 @@ -389,7 +389,7 @@ vc4_wait_for_seqno(struct drm_device *de
223 unsigned long timeout_expire;
224 DEFINE_WAIT(wait);
225
226 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
227 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
228 return -ENODEV;
229
230 if (vc4->finished_seqno >= seqno)
231 @@ -474,7 +474,7 @@ vc4_submit_next_bin_job(struct drm_devic
232 struct vc4_dev *vc4 = to_vc4_dev(dev);
233 struct vc4_exec_info *exec;
234
235 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
236 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
237 return;
238
239 again:
240 @@ -522,7 +522,7 @@ vc4_submit_next_render_job(struct drm_de
241 if (!exec)
242 return;
243
244 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
245 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
246 return;
247
248 /* A previous RCL may have written to one of our textures, and
249 @@ -543,7 +543,7 @@ vc4_move_job_to_render(struct drm_device
250 struct vc4_dev *vc4 = to_vc4_dev(dev);
251 bool was_empty = list_empty(&vc4->render_job_list);
252
253 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
254 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
255 return;
256
257 list_move_tail(&exec->head, &vc4->render_job_list);
258 @@ -1012,7 +1012,7 @@ vc4_job_handle_completed(struct vc4_dev
259 unsigned long irqflags;
260 struct vc4_seqno_cb *cb, *cb_temp;
261
262 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
263 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
264 return;
265
266 spin_lock_irqsave(&vc4->job_lock, irqflags);
267 @@ -1051,7 +1051,7 @@ int vc4_queue_seqno_cb(struct drm_device
268 struct vc4_dev *vc4 = to_vc4_dev(dev);
269 unsigned long irqflags;
270
271 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
272 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
273 return -ENODEV;
274
275 cb->func = func;
276 @@ -1107,7 +1107,7 @@ vc4_wait_seqno_ioctl(struct drm_device *
277 struct vc4_dev *vc4 = to_vc4_dev(dev);
278 struct drm_vc4_wait_seqno *args = data;
279
280 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
281 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
282 return -ENODEV;
283
284 return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
285 @@ -1124,7 +1124,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev
286 struct drm_gem_object *gem_obj;
287 struct vc4_bo *bo;
288
289 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
290 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
291 return -ENODEV;
292
293 if (args->pad != 0)
294 @@ -1173,7 +1173,7 @@ vc4_submit_cl_ioctl(struct drm_device *d
295 args->shader_rec_size,
296 args->bo_handle_count);
297
298 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
299 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
300 return -ENODEV;
301
302 if (!vc4->v3d) {
303 @@ -1310,7 +1310,7 @@ int vc4_gem_init(struct drm_device *dev)
304 struct vc4_dev *vc4 = to_vc4_dev(dev);
305 int ret;
306
307 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
308 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
309 return -ENODEV;
310
311 vc4->dma_fence_context = dma_fence_context_alloc(1);
312 @@ -1369,7 +1369,7 @@ int vc4_gem_madvise_ioctl(struct drm_dev
313 struct vc4_bo *bo;
314 int ret;
315
316 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
317 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
318 return -ENODEV;
319
320 switch (args->madv) {
321 --- a/drivers/gpu/drm/vc4/vc4_irq.c
322 +++ b/drivers/gpu/drm/vc4/vc4_irq.c
323 @@ -265,7 +265,7 @@ vc4_irq_enable(struct drm_device *dev)
324 {
325 struct vc4_dev *vc4 = to_vc4_dev(dev);
326
327 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
328 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
329 return;
330
331 if (!vc4->v3d)
332 @@ -282,7 +282,7 @@ vc4_irq_disable(struct drm_device *dev)
333 {
334 struct vc4_dev *vc4 = to_vc4_dev(dev);
335
336 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
337 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
338 return;
339
340 if (!vc4->v3d)
341 @@ -305,7 +305,7 @@ int vc4_irq_install(struct drm_device *d
342 struct vc4_dev *vc4 = to_vc4_dev(dev);
343 int ret;
344
345 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
346 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
347 return -ENODEV;
348
349 if (irq == IRQ_NOTCONNECTED)
350 @@ -326,7 +326,7 @@ void vc4_irq_uninstall(struct drm_device
351 {
352 struct vc4_dev *vc4 = to_vc4_dev(dev);
353
354 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
355 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
356 return;
357
358 vc4_irq_disable(dev);
359 @@ -339,7 +339,7 @@ void vc4_irq_reset(struct drm_device *de
360 struct vc4_dev *vc4 = to_vc4_dev(dev);
361 unsigned long irqflags;
362
363 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
364 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
365 return;
366
367 /* Acknowledge any stale IRQs. */
368 --- a/drivers/gpu/drm/vc4/vc4_kms.c
369 +++ b/drivers/gpu/drm/vc4/vc4_kms.c
370 @@ -482,7 +482,7 @@ static struct drm_framebuffer *vc4_fb_cr
371 struct vc4_dev *vc4 = to_vc4_dev(dev);
372 struct drm_mode_fb_cmd2 mode_cmd_local;
373
374 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
375 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
376 return ERR_PTR(-ENODEV);
377
378 /* If the user didn't specify a modifier, use the
379 --- a/drivers/gpu/drm/vc4/vc4_perfmon.c
380 +++ b/drivers/gpu/drm/vc4/vc4_perfmon.c
381 @@ -23,7 +23,7 @@ void vc4_perfmon_get(struct vc4_perfmon
382 return;
383
384 vc4 = perfmon->dev;
385 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
386 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
387 return;
388
389 refcount_inc(&perfmon->refcnt);
390 @@ -37,7 +37,7 @@ void vc4_perfmon_put(struct vc4_perfmon
391 return;
392
393 vc4 = perfmon->dev;
394 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
395 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
396 return;
397
398 if (refcount_dec_and_test(&perfmon->refcnt))
399 @@ -49,7 +49,7 @@ void vc4_perfmon_start(struct vc4_dev *v
400 unsigned int i;
401 u32 mask;
402
403 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
404 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
405 return;
406
407 if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon))
408 @@ -69,7 +69,7 @@ void vc4_perfmon_stop(struct vc4_dev *vc
409 {
410 unsigned int i;
411
412 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
413 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
414 return;
415
416 if (WARN_ON_ONCE(!vc4->active_perfmon ||
417 @@ -90,7 +90,7 @@ struct vc4_perfmon *vc4_perfmon_find(str
418 struct vc4_dev *vc4 = vc4file->dev;
419 struct vc4_perfmon *perfmon;
420
421 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
422 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
423 return NULL;
424
425 mutex_lock(&vc4file->perfmon.lock);
426 @@ -105,7 +105,7 @@ void vc4_perfmon_open_file(struct vc4_fi
427 {
428 struct vc4_dev *vc4 = vc4file->dev;
429
430 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
431 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
432 return;
433
434 mutex_init(&vc4file->perfmon.lock);
435 @@ -126,7 +126,7 @@ void vc4_perfmon_close_file(struct vc4_f
436 {
437 struct vc4_dev *vc4 = vc4file->dev;
438
439 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
440 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
441 return;
442
443 mutex_lock(&vc4file->perfmon.lock);
444 @@ -146,7 +146,7 @@ int vc4_perfmon_create_ioctl(struct drm_
445 unsigned int i;
446 int ret;
447
448 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
449 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
450 return -ENODEV;
451
452 if (!vc4->v3d) {
453 @@ -200,7 +200,7 @@ int vc4_perfmon_destroy_ioctl(struct drm
454 struct drm_vc4_perfmon_destroy *req = data;
455 struct vc4_perfmon *perfmon;
456
457 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
458 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
459 return -ENODEV;
460
461 if (!vc4->v3d) {
462 @@ -228,7 +228,7 @@ int vc4_perfmon_get_values_ioctl(struct
463 struct vc4_perfmon *perfmon;
464 int ret;
465
466 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
467 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
468 return -ENODEV;
469
470 if (!vc4->v3d) {
471 --- a/drivers/gpu/drm/vc4/vc4_render_cl.c
472 +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
473 @@ -599,7 +599,7 @@ int vc4_get_rcl(struct drm_device *dev,
474 bool has_bin = args->bin_cl_size != 0;
475 int ret;
476
477 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
478 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
479 return -ENODEV;
480
481 if (args->min_x_tile > args->max_x_tile ||
482 --- a/drivers/gpu/drm/vc4/vc4_v3d.c
483 +++ b/drivers/gpu/drm/vc4/vc4_v3d.c
484 @@ -127,7 +127,7 @@ static int vc4_v3d_debugfs_ident(struct
485 int
486 vc4_v3d_pm_get(struct vc4_dev *vc4)
487 {
488 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
489 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
490 return -ENODEV;
491
492 mutex_lock(&vc4->power_lock);
493 @@ -148,7 +148,7 @@ vc4_v3d_pm_get(struct vc4_dev *vc4)
494 void
495 vc4_v3d_pm_put(struct vc4_dev *vc4)
496 {
497 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
498 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
499 return;
500
501 mutex_lock(&vc4->power_lock);
502 @@ -178,7 +178,7 @@ int vc4_v3d_get_bin_slot(struct vc4_dev
503 uint64_t seqno = 0;
504 struct vc4_exec_info *exec;
505
506 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
507 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
508 return -ENODEV;
509
510 try_again:
511 @@ -325,7 +325,7 @@ int vc4_v3d_bin_bo_get(struct vc4_dev *v
512 {
513 int ret = 0;
514
515 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
516 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
517 return -ENODEV;
518
519 mutex_lock(&vc4->bin_bo_lock);
520 @@ -360,7 +360,7 @@ static void bin_bo_release(struct kref *
521
522 void vc4_v3d_bin_bo_put(struct vc4_dev *vc4)
523 {
524 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
525 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
526 return;
527
528 mutex_lock(&vc4->bin_bo_lock);
529 --- a/drivers/gpu/drm/vc4/vc4_validate.c
530 +++ b/drivers/gpu/drm/vc4/vc4_validate.c
531 @@ -109,7 +109,7 @@ vc4_use_bo(struct vc4_exec_info *exec, u
532 struct drm_gem_dma_object *obj;
533 struct vc4_bo *bo;
534
535 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
536 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
537 return NULL;
538
539 if (hindex >= exec->bo_count) {
540 @@ -169,7 +169,7 @@ vc4_check_tex_size(struct vc4_exec_info
541 uint32_t utile_w = utile_width(cpp);
542 uint32_t utile_h = utile_height(cpp);
543
544 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
545 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
546 return false;
547
548 /* The shaded vertex format stores signed 12.4 fixed point
549 @@ -495,7 +495,7 @@ vc4_validate_bin_cl(struct drm_device *d
550 uint32_t dst_offset = 0;
551 uint32_t src_offset = 0;
552
553 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
554 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
555 return -ENODEV;
556
557 while (src_offset < len) {
558 @@ -942,7 +942,7 @@ vc4_validate_shader_recs(struct drm_devi
559 uint32_t i;
560 int ret = 0;
561
562 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
563 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
564 return -ENODEV;
565
566 for (i = 0; i < exec->shader_state_count; i++) {
567 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
568 +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
569 @@ -786,7 +786,7 @@ vc4_validate_shader(struct drm_gem_dma_o
570 struct vc4_validated_shader_info *validated_shader = NULL;
571 struct vc4_shader_validation_state validation_state;
572
573 - if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
574 + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
575 return NULL;
576
577 memset(&validation_state, 0, sizeof(validation_state));