a18a9525e2c1e7abc6c1839a4b121abd111882a7
[openwrt/staging/lynxis.git] /
1 From cbe6a244c89bd8826dbf007f8d26d1859f03b381 Mon Sep 17 00:00:00 2001
2 From: Jonathan Bell <jonathan@raspberrypi.com>
3 Date: Mon, 13 Dec 2021 15:05:56 +0000
4 Subject: [PATCH] xhci: refactor out TRBS_PER_SEGMENT define in runtime
5 code
6
7 In anticipation of adjusting the number of utilised TRBs in a ring
8 segment, add trbs_per_seg to struct xhci_ring and use this instead
9 of a compile-time define.
10
11 Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
12 ---
13 drivers/usb/host/xhci-mem.c | 48 +++++++++++++++++++-----------------
14 drivers/usb/host/xhci-ring.c | 20 +++++++++------
15 drivers/usb/host/xhci.c | 6 ++---
16 drivers/usb/host/xhci.h | 1 +
17 4 files changed, 42 insertions(+), 33 deletions(-)
18
19 --- a/drivers/usb/host/xhci-mem.c
20 +++ b/drivers/usb/host/xhci-mem.c
21 @@ -98,6 +98,7 @@ static void xhci_free_segments_for_ring(
22 */
23 static void xhci_link_segments(struct xhci_segment *prev,
24 struct xhci_segment *next,
25 + unsigned int trbs_per_seg,
26 enum xhci_ring_type type, bool chain_links)
27 {
28 u32 val;
29 @@ -106,16 +107,16 @@ static void xhci_link_segments(struct xh
30 return;
31 prev->next = next;
32 if (type != TYPE_EVENT) {
33 - prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
34 + prev->trbs[trbs_per_seg - 1].link.segment_ptr =
35 cpu_to_le64(next->dma);
36
37 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
38 - val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
39 + val = le32_to_cpu(prev->trbs[trbs_per_seg - 1].link.control);
40 val &= ~TRB_TYPE_BITMASK;
41 val |= TRB_TYPE(TRB_LINK);
42 if (chain_links)
43 val |= TRB_CHAIN;
44 - prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
45 + prev->trbs[trbs_per_seg - 1].link.control = cpu_to_le32(val);
46 }
47 }
48
49 @@ -139,15 +140,17 @@ static void xhci_link_rings(struct xhci_
50 (xhci->quirks & XHCI_AMD_0x96_HOST)));
51
52 next = ring->enq_seg->next;
53 - xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
54 - xhci_link_segments(last, next, ring->type, chain_links);
55 + xhci_link_segments(ring->enq_seg, first, ring->trbs_per_seg,
56 + ring->type, chain_links);
57 + xhci_link_segments(last, next, ring->trbs_per_seg,
58 + ring->type, chain_links);
59 ring->num_segs += num_segs;
60 - ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
61 + ring->num_trbs_free += (ring->trbs_per_seg - 1) * num_segs;
62
63 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
64 - ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
65 + ring->last_seg->trbs[ring->trbs_per_seg - 1].link.control
66 &= ~cpu_to_le32(LINK_TOGGLE);
67 - last->trbs[TRBS_PER_SEGMENT-1].link.control
68 + last->trbs[ring->trbs_per_seg - 1].link.control
69 |= cpu_to_le32(LINK_TOGGLE);
70 ring->last_seg = last;
71 }
72 @@ -314,14 +317,15 @@ void xhci_initialize_ring_info(struct xh
73 * Each segment has a link TRB, and leave an extra TRB for SW
74 * accounting purpose
75 */
76 - ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
77 + ring->num_trbs_free = ring->num_segs * (ring->trbs_per_seg - 1) - 1;
78 }
79
80 /* Allocate segments and link them for a ring */
81 static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
82 struct xhci_segment **first, struct xhci_segment **last,
83 - unsigned int num_segs, unsigned int cycle_state,
84 - enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
85 + unsigned int num_segs, unsigned int trbs_per_seg,
86 + unsigned int cycle_state, enum xhci_ring_type type,
87 + unsigned int max_packet, gfp_t flags)
88 {
89 struct xhci_segment *prev;
90 bool chain_links;
91 @@ -350,12 +354,12 @@ static int xhci_alloc_segments_for_ring(
92 }
93 return -ENOMEM;
94 }
95 - xhci_link_segments(prev, next, type, chain_links);
96 + xhci_link_segments(prev, next, trbs_per_seg, type, chain_links);
97
98 prev = next;
99 num_segs--;
100 }
101 - xhci_link_segments(prev, *first, type, chain_links);
102 + xhci_link_segments(prev, *first, trbs_per_seg, type, chain_links);
103 *last = prev;
104
105 return 0;
106 @@ -387,16 +391,17 @@ struct xhci_ring *xhci_ring_alloc(struct
107 if (num_segs == 0)
108 return ring;
109
110 + ring->trbs_per_seg = TRBS_PER_SEGMENT;
111 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
112 - &ring->last_seg, num_segs, cycle_state, type,
113 - max_packet, flags);
114 + &ring->last_seg, num_segs, ring->trbs_per_seg,
115 + cycle_state, type, max_packet, flags);
116 if (ret)
117 goto fail;
118
119 /* Only event ring does not use link TRB */
120 if (type != TYPE_EVENT) {
121 /* See section 4.9.2.1 and 6.4.4.1 */
122 - ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
123 + ring->last_seg->trbs[ring->trbs_per_seg - 1].link.control |=
124 cpu_to_le32(LINK_TOGGLE);
125 }
126 xhci_initialize_ring_info(ring, cycle_state);
127 @@ -429,16 +434,15 @@ int xhci_ring_expansion(struct xhci_hcd
128 unsigned int num_segs_needed;
129 int ret;
130
131 - num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
132 - (TRBS_PER_SEGMENT - 1);
133 -
134 + num_segs_needed = (num_trbs + (ring->trbs_per_seg - 1) - 1) /
135 + (ring->trbs_per_seg - 1);
136 /* Allocate number of segments we needed, or double the ring size */
137 num_segs = ring->num_segs > num_segs_needed ?
138 ring->num_segs : num_segs_needed;
139
140 ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
141 - num_segs, ring->cycle_state, ring->type,
142 - ring->bounce_buf_len, flags);
143 + num_segs, ring->trbs_per_seg, ring->cycle_state,
144 + ring->type, ring->bounce_buf_len, flags);
145 if (ret)
146 return -ENOMEM;
147
148 @@ -1811,7 +1815,7 @@ int xhci_alloc_erst(struct xhci_hcd *xhc
149 for (val = 0; val < evt_ring->num_segs; val++) {
150 entry = &erst->entries[val];
151 entry->seg_addr = cpu_to_le64(seg->dma);
152 - entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
153 + entry->seg_size = cpu_to_le32(evt_ring->trbs_per_seg);
154 entry->rsvd = 0;
155 seg = seg->next;
156 }
157 --- a/drivers/usb/host/xhci-ring.c
158 +++ b/drivers/usb/host/xhci-ring.c
159 @@ -90,15 +90,16 @@ static bool trb_is_link(union xhci_trb *
160 return TRB_TYPE_LINK_LE32(trb->link.control);
161 }
162
163 -static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
164 +static bool last_trb_on_seg(struct xhci_segment *seg,
165 + unsigned int trbs_per_seg, union xhci_trb *trb)
166 {
167 - return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
168 + return trb == &seg->trbs[trbs_per_seg - 1];
169 }
170
171 static bool last_trb_on_ring(struct xhci_ring *ring,
172 struct xhci_segment *seg, union xhci_trb *trb)
173 {
174 - return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
175 + return last_trb_on_seg(seg, ring->trbs_per_seg, trb) && (seg->next == ring->first_seg);
176 }
177
178 static bool link_trb_toggles_cycle(union xhci_trb *trb)
179 @@ -161,7 +162,8 @@ void inc_deq(struct xhci_hcd *xhci, stru
180
181 /* event ring doesn't have link trbs, check for last trb */
182 if (ring->type == TYPE_EVENT) {
183 - if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
184 + if (!last_trb_on_seg(ring->deq_seg, ring->trbs_per_seg,
185 + ring->dequeue)) {
186 ring->dequeue++;
187 goto out;
188 }
189 @@ -174,7 +176,8 @@ void inc_deq(struct xhci_hcd *xhci, stru
190
191 /* All other rings have link trbs */
192 if (!trb_is_link(ring->dequeue)) {
193 - if (last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
194 + if (last_trb_on_seg(ring->deq_seg, ring->trbs_per_seg,
195 + ring->dequeue)) {
196 xhci_warn(xhci, "Missing link TRB at end of segment\n");
197 } else {
198 ring->dequeue++;
199 @@ -225,7 +228,7 @@ static void inc_enq(struct xhci_hcd *xhc
200 if (!trb_is_link(ring->enqueue))
201 ring->num_trbs_free--;
202
203 - if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) {
204 + if (last_trb_on_seg(ring->enq_seg, ring->trbs_per_seg, ring->enqueue)) {
205 xhci_err(xhci, "Tried to move enqueue past ring segment\n");
206 return;
207 }
208 @@ -3153,7 +3156,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd
209 * that clears the EHB.
210 */
211 while (xhci_handle_event(xhci) > 0) {
212 - if (event_loop++ < TRBS_PER_SEGMENT / 2)
213 + if (event_loop++ < xhci->event_ring->trbs_per_seg / 2)
214 continue;
215 xhci_update_erst_dequeue(xhci, event_ring_deq);
216 event_ring_deq = xhci->event_ring->dequeue;
217 @@ -3295,7 +3298,8 @@ static int prepare_ring(struct xhci_hcd
218 }
219 }
220
221 - if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) {
222 + if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->trbs_per_seg,
223 + ep_ring->enqueue)) {
224 xhci_warn(xhci, "Missing link TRB at end of ring segment\n");
225 return -EINVAL;
226 }
227 --- a/drivers/usb/host/xhci.c
228 +++ b/drivers/usb/host/xhci.c
229 @@ -877,8 +877,8 @@ static void xhci_clear_command_ring(stru
230 seg = ring->deq_seg;
231 do {
232 memset(seg->trbs, 0,
233 - sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
234 - seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
235 + sizeof(union xhci_trb) * (ring->trbs_per_seg - 1));
236 + seg->trbs[ring->trbs_per_seg - 1].link.control &=
237 cpu_to_le32(~TRB_CYCLE);
238 seg = seg->next;
239 } while (seg != ring->deq_seg);
240 @@ -889,7 +889,7 @@ static void xhci_clear_command_ring(stru
241 ring->enq_seg = ring->deq_seg;
242 ring->enqueue = ring->dequeue;
243
244 - ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
245 + ring->num_trbs_free = ring->num_segs * (ring->trbs_per_seg - 1) - 1;
246 /*
247 * Ring is now zeroed, so the HW should look for change of ownership
248 * when the cycle bit is set to 1.
249 --- a/drivers/usb/host/xhci.h
250 +++ b/drivers/usb/host/xhci.h
251 @@ -1635,6 +1635,7 @@ struct xhci_ring {
252 unsigned int num_trbs_free;
253 unsigned int num_trbs_free_temp;
254 unsigned int bounce_buf_len;
255 + unsigned int trbs_per_seg;
256 enum xhci_ring_type type;
257 bool last_td_was_short;
258 struct radix_tree_root *trb_address_map;