9f49d4cde63111d1cde13715af920528b0609e42
[openwrt/staging/nbd.git] /
1 From b3b779b1060fce4c012e4a0771379dfa2fe5773f Mon Sep 17 00:00:00 2001
2 From: Eric Anholt <eric@anholt.net>
3 Date: Thu, 27 Dec 2018 14:04:44 -0800
4 Subject: [PATCH 584/782] drm/v3d: Rename the fence signaled from IRQs to
5 "irq_fence".
6
7 We have another thing called the "done fence" that tracks when the
8 scheduler considers the job done, and having the shared name was
9 confusing.
10
11 Signed-off-by: Eric Anholt <eric@anholt.net>
12 ---
13 drivers/gpu/drm/v3d/v3d_drv.h | 4 ++--
14 drivers/gpu/drm/v3d/v3d_gem.c | 6 +++---
15 drivers/gpu/drm/v3d/v3d_irq.c | 6 +++---
16 drivers/gpu/drm/v3d/v3d_sched.c | 12 ++++++------
17 4 files changed, 14 insertions(+), 14 deletions(-)
18
19 --- a/drivers/gpu/drm/v3d/v3d_drv.h
20 +++ b/drivers/gpu/drm/v3d/v3d_drv.h
21 @@ -182,7 +182,7 @@ struct v3d_job {
22 struct dma_fence *in_fence;
23
24 /* v3d fence to be signaled by IRQ handler when the job is complete. */
25 - struct dma_fence *done_fence;
26 + struct dma_fence *irq_fence;
27
28 /* GPU virtual addresses of the start/end of the CL job. */
29 u32 start, end;
30 @@ -229,7 +229,7 @@ struct v3d_tfu_job {
31 struct dma_fence *in_fence;
32
33 /* v3d fence to be signaled by IRQ handler when the job is complete. */
34 - struct dma_fence *done_fence;
35 + struct dma_fence *irq_fence;
36
37 struct v3d_dev *v3d;
38
39 --- a/drivers/gpu/drm/v3d/v3d_gem.c
40 +++ b/drivers/gpu/drm/v3d/v3d_gem.c
41 @@ -381,8 +381,8 @@ v3d_exec_cleanup(struct kref *ref)
42 dma_fence_put(exec->bin.in_fence);
43 dma_fence_put(exec->render.in_fence);
44
45 - dma_fence_put(exec->bin.done_fence);
46 - dma_fence_put(exec->render.done_fence);
47 + dma_fence_put(exec->bin.irq_fence);
48 + dma_fence_put(exec->render.irq_fence);
49
50 dma_fence_put(exec->bin_done_fence);
51 dma_fence_put(exec->render_done_fence);
52 @@ -411,7 +411,7 @@ v3d_tfu_job_cleanup(struct kref *ref)
53 unsigned int i;
54
55 dma_fence_put(job->in_fence);
56 - dma_fence_put(job->done_fence);
57 + dma_fence_put(job->irq_fence);
58
59 for (i = 0; i < ARRAY_SIZE(job->bo); i++) {
60 if (job->bo[i])
61 --- a/drivers/gpu/drm/v3d/v3d_irq.c
62 +++ b/drivers/gpu/drm/v3d/v3d_irq.c
63 @@ -93,7 +93,7 @@ v3d_irq(int irq, void *arg)
64
65 if (intsts & V3D_INT_FLDONE) {
66 struct v3d_fence *fence =
67 - to_v3d_fence(v3d->bin_job->bin.done_fence);
68 + to_v3d_fence(v3d->bin_job->bin.irq_fence);
69
70 trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
71 dma_fence_signal(&fence->base);
72 @@ -102,7 +102,7 @@ v3d_irq(int irq, void *arg)
73
74 if (intsts & V3D_INT_FRDONE) {
75 struct v3d_fence *fence =
76 - to_v3d_fence(v3d->render_job->render.done_fence);
77 + to_v3d_fence(v3d->render_job->render.irq_fence);
78
79 trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
80 dma_fence_signal(&fence->base);
81 @@ -138,7 +138,7 @@ v3d_hub_irq(int irq, void *arg)
82
83 if (intsts & V3D_HUB_INT_TFUC) {
84 struct v3d_fence *fence =
85 - to_v3d_fence(v3d->tfu_job->done_fence);
86 + to_v3d_fence(v3d->tfu_job->irq_fence);
87
88 trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
89 dma_fence_signal(&fence->base);
90 --- a/drivers/gpu/drm/v3d/v3d_sched.c
91 +++ b/drivers/gpu/drm/v3d/v3d_sched.c
92 @@ -152,9 +152,9 @@ static struct dma_fence *v3d_job_run(str
93 if (IS_ERR(fence))
94 return NULL;
95
96 - if (job->done_fence)
97 - dma_fence_put(job->done_fence);
98 - job->done_fence = dma_fence_get(fence);
99 + if (job->irq_fence)
100 + dma_fence_put(job->irq_fence);
101 + job->irq_fence = dma_fence_get(fence);
102
103 trace_v3d_submit_cl(dev, q == V3D_RENDER, to_v3d_fence(fence)->seqno,
104 job->start, job->end);
105 @@ -195,9 +195,9 @@ v3d_tfu_job_run(struct drm_sched_job *sc
106 return NULL;
107
108 v3d->tfu_job = job;
109 - if (job->done_fence)
110 - dma_fence_put(job->done_fence);
111 - job->done_fence = dma_fence_get(fence);
112 + if (job->irq_fence)
113 + dma_fence_put(job->irq_fence);
114 + job->irq_fence = dma_fence_get(fence);
115
116 trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);
117