5b6bf5abcbafb545948acd2f92b47f1d2bd9d4bb
[openwrt/staging/stintel.git] /
1 From 8dd72bdbb1e3f0061f2e4a9bb4f6fce0966585a6 Mon Sep 17 00:00:00 2001
2 From: Luo Jie <quic_luoj@quicinc.com>
3 Date: Wed, 27 Dec 2023 13:13:46 +0800
4 Subject: [PATCH 22/50] net: ethernet: qualcomm: Initialize PPE queue settings
5
6 Configure unicast and multicast hardware queues to forward
7 the traffic between PPE ports.
8
9 Each PPE port is assigned with the specific queue resource,
10 and the egress queue ID is decided by the priority and the
11 RSS hash value of packet.
12
13 Change-Id: I3e4d4e12548a12b11f129106678375cc3b58828d
14 Signed-off-by: Luo Jie <quic_luoj@quicinc.com>
15 ---
16 drivers/net/ethernet/qualcomm/ppe/ppe_api.c | 44 +++
17 drivers/net/ethernet/qualcomm/ppe/ppe_api.h | 34 ++
18 .../net/ethernet/qualcomm/ppe/ppe_config.c | 362 +++++++++++++++++-
19 .../net/ethernet/qualcomm/ppe/ppe_config.h | 41 ++
20 drivers/net/ethernet/qualcomm/ppe/ppe_regs.h | 16 +
21 5 files changed, 496 insertions(+), 1 deletion(-)
22
23 diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_api.c b/drivers/net/ethernet/qualcomm/ppe/ppe_api.c
24 index ba35fc151e2c..72d416e0ca44 100644
25 --- a/drivers/net/ethernet/qualcomm/ppe/ppe_api.c
26 +++ b/drivers/net/ethernet/qualcomm/ppe/ppe_api.c
27 @@ -38,3 +38,47 @@ int ppe_queue_priority_set(struct ppe_device *ppe_dev,
28
29 return ppe_queue_scheduler_set(ppe_dev, node_id, level, port, sch_cfg);
30 }
31 +
32 +/**
33 + * ppe_edma_queue_offset_config - Configure queue offset for EDMA interface
34 + * @ppe_dev: PPE device
35 + * @class: The class to configure queue offset
36 + * @index: Class index, internal priority or hash value
37 + * @queue_offset: Queue offset value
38 + *
39 + * PPE EDMA queue offset is configured based on the PPE internal priority or
40 + * RSS hash value, the profile ID is fixed to 0 for EDMA interface.
41 + *
42 + * Return 0 on success, negative error code on failure.
43 + */
44 +int ppe_edma_queue_offset_config(struct ppe_device *ppe_dev,
45 + enum ppe_queue_class_type class,
46 + int index, int queue_offset)
47 +{
48 + if (class == PPE_QUEUE_CLASS_PRIORITY)
49 + return ppe_queue_ucast_pri_class_set(ppe_dev, 0,
50 + index, queue_offset);
51 +
52 + return ppe_queue_ucast_hash_class_set(ppe_dev, 0,
53 + index, queue_offset);
54 +}
55 +
56 +/**
57 + * ppe_edma_queue_resource_get - Get EDMA queue resource
58 + * @ppe_dev: PPE device
59 + * @type: Resource type
60 + * @res_start: Resource start ID returned
61 + * @res_end: Resource end ID returned
62 + *
63 + * PPE EDMA queue resource includes unicast queue and multicast queue.
64 + *
65 + * Return 0 on success, negative error code on failure.
66 + */
67 +int ppe_edma_queue_resource_get(struct ppe_device *ppe_dev, int type,
68 + int *res_start, int *res_end)
69 +{
70 + if (type != PPE_RES_UCAST && type != PPE_RES_MCAST)
71 + return -EINVAL;
72 +
73 + return ppe_port_resource_get(ppe_dev, 0, type, res_start, res_end);
74 +};
75 diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_api.h b/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
76 index ee5b47d06991..c8aa8945f90f 100644
77 --- a/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
78 +++ b/drivers/net/ethernet/qualcomm/ppe/ppe_api.h
79 @@ -15,7 +15,41 @@
80 #define PPE_QUEUE_ID_NUM 300
81 #define PPE_FLOW_ID_NUM 64
82 #define PPE_QUEUE_SCH_PRI_NUM 8
83 +#define PPE_QUEUE_INTER_PRI_NUM 16
84 +#define PPE_QUEUE_HASH_NUM 256
85 +
86 +/**
87 + * enum ppe_queue_class_type - PPE queue class type
88 + * @PPE_QUEUE_CLASS_PRIORITY: Queue offset configured from internal priority
89 + * @PPE_QUEUE_CLASS_HASH: Queue offset configured from RSS hash.
90 + */
91 +enum ppe_queue_class_type {
92 + PPE_QUEUE_CLASS_PRIORITY,
93 + PPE_QUEUE_CLASS_HASH,
94 +};
95 +
96 +/**
97 + * enum ppe_resource_type - PPE resource type
98 + * @PPE_RES_UCAST: Unicast queue resource
99 + * @PPE_RES_MCAST: Multicast queue resource
100 + * @PPE_RES_FLOW_ID: Flow resource
101 + * @PPE_RES_L0_NODE: Level 0 QoS node resource
102 + * @PPE_RES_L1_NODE: Level 1 QoS node resource
103 + */
104 +enum ppe_resource_type {
105 + PPE_RES_UCAST,
106 + PPE_RES_MCAST,
107 + PPE_RES_FLOW_ID,
108 + PPE_RES_L0_NODE,
109 + PPE_RES_L1_NODE,
110 +};
111
112 int ppe_queue_priority_set(struct ppe_device *ppe_dev,
113 int queue_id, int priority);
114 +
115 +int ppe_edma_queue_offset_config(struct ppe_device *ppe_dev,
116 + enum ppe_queue_class_type class,
117 + int index, int queue_offset);
118 +int ppe_edma_queue_resource_get(struct ppe_device *ppe_dev, int type,
119 + int *res_start, int *res_end);
120 #endif
121 diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
122 index bdef26da6fd3..ac90d33aecba 100644
123 --- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
124 +++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
125 @@ -119,6 +119,34 @@ struct ppe_port_schedule_config {
126 unsigned int drr_node_id;
127 };
128
129 +/**
130 + * struct ppe_port_schedule_resource - PPE port scheduler resource.
131 + * @ucastq_start: Unicast queue start ID.
132 + * @ucastq_end: Unicast queue end ID.
133 + * @mcastq_start: Multicast queue start ID.
134 + * @mcastq_end: Multicast queue end ID.
135 + * @flow_id_start: Flow start ID.
136 + * @flow_id_end: Flow end ID.
137 + * @l0node_start: Scheduler node start ID for queue level.
138 + * @l0node_end: Scheduler node end ID for queue level.
139 + * @l1node_start: Scheduler node start ID for flow level.
140 + * @l1node_end: Scheduler node end ID for flow level.
141 + *
142 + * PPE scheduler resource allocated among the PPE ports.
143 + */
144 +struct ppe_port_schedule_resource {
145 + unsigned int ucastq_start;
146 + unsigned int ucastq_end;
147 + unsigned int mcastq_start;
148 + unsigned int mcastq_end;
149 + unsigned int flow_id_start;
150 + unsigned int flow_id_end;
151 + unsigned int l0node_start;
152 + unsigned int l0node_end;
153 + unsigned int l1node_start;
154 + unsigned int l1node_end;
155 +};
156 +
157 static int ipq9574_ppe_bm_group_config = 1550;
158 static struct ppe_bm_port_config ipq9574_ppe_bm_port_config[] = {
159 {
160 @@ -648,6 +676,111 @@ static struct ppe_port_schedule_config ppe_qos_schedule_config[] = {
161 },
162 };
163
164 +/* The QoS resource is applied to each PPE port, there are some
165 + * resource reserved as the last one.
166 + */
167 +static struct ppe_port_schedule_resource ppe_scheduler_res[] = {
168 + { .ucastq_start = 0,
169 + .ucastq_end = 63,
170 + .mcastq_start = 256,
171 + .ucastq_end = 271,
172 + .flow_id_start = 0,
173 + .flow_id_end = 0,
174 + .l0node_start = 0,
175 + .l0node_end = 7,
176 + .l1node_start = 0,
177 + .l1node_end = 0,
178 + },
179 + { .ucastq_start = 144,
180 + .ucastq_end = 159,
181 + .mcastq_start = 272,
182 + .ucastq_end = 275,
183 + .flow_id_start = 36,
184 + .flow_id_end = 39,
185 + .l0node_start = 48,
186 + .l0node_end = 63,
187 + .l1node_start = 8,
188 + .l1node_end = 11,
189 + },
190 + { .ucastq_start = 160,
191 + .ucastq_end = 175,
192 + .mcastq_start = 276,
193 + .ucastq_end = 279,
194 + .flow_id_start = 40,
195 + .flow_id_end = 43,
196 + .l0node_start = 64,
197 + .l0node_end = 79,
198 + .l1node_start = 12,
199 + .l1node_end = 15,
200 + },
201 + { .ucastq_start = 176,
202 + .ucastq_end = 191,
203 + .mcastq_start = 280,
204 + .ucastq_end = 283,
205 + .flow_id_start = 44,
206 + .flow_id_end = 47,
207 + .l0node_start = 80,
208 + .l0node_end = 95,
209 + .l1node_start = 16,
210 + .l1node_end = 19,
211 + },
212 + { .ucastq_start = 192,
213 + .ucastq_end = 207,
214 + .mcastq_start = 284,
215 + .ucastq_end = 287,
216 + .flow_id_start = 48,
217 + .flow_id_end = 51,
218 + .l0node_start = 96,
219 + .l0node_end = 111,
220 + .l1node_start = 20,
221 + .l1node_end = 23,
222 + },
223 + { .ucastq_start = 208,
224 + .ucastq_end = 223,
225 + .mcastq_start = 288,
226 + .ucastq_end = 291,
227 + .flow_id_start = 52,
228 + .flow_id_end = 55,
229 + .l0node_start = 112,
230 + .l0node_end = 127,
231 + .l1node_start = 24,
232 + .l1node_end = 27,
233 + },
234 + { .ucastq_start = 224,
235 + .ucastq_end = 239,
236 + .mcastq_start = 292,
237 + .ucastq_end = 295,
238 + .flow_id_start = 56,
239 + .flow_id_end = 59,
240 + .l0node_start = 128,
241 + .l0node_end = 143,
242 + .l1node_start = 28,
243 + .l1node_end = 31,
244 + },
245 + { .ucastq_start = 240,
246 + .ucastq_end = 255,
247 + .mcastq_start = 296,
248 + .ucastq_end = 299,
249 + .flow_id_start = 60,
250 + .flow_id_end = 63,
251 + .l0node_start = 144,
252 + .l0node_end = 159,
253 + .l1node_start = 32,
254 + .l1node_end = 35,
255 + },
256 + { .ucastq_start = 64,
257 + .ucastq_end = 143,
258 + .mcastq_start = 0,
259 + .ucastq_end = 0,
260 + .flow_id_start = 1,
261 + .flow_id_end = 35,
262 + .l0node_start = 8,
263 + .l0node_end = 47,
264 + .l1node_start = 1,
265 + .l1node_end = 7,
266 + },
267 +};
268 +
269 /* Set the first level scheduler configuration. */
270 static int ppe_scheduler_l0_queue_map_set(struct ppe_device *ppe_dev,
271 int node_id, int port,
272 @@ -893,6 +1026,147 @@ int ppe_queue_scheduler_get(struct ppe_device *ppe_dev,
273 port, scheduler_cfg);
274 }
275
276 +/**
277 + * ppe_queue_ucast_base_set - Set PPE unicast queue base ID
278 + * @ppe_dev: PPE device
279 + * @queue_dst: PPE queue destination configuration
280 + * @queue_base: PPE queue base ID
281 + * @profile_id: Profile ID
282 + *
283 + * The PPE unicast queue base ID is configured based on the destination
284 + * port information per profile ID.
285 + *
286 + * Return 0 on success, negative error code on failure.
287 + */
288 +int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
289 + struct ppe_queue_ucast_dest queue_dst,
290 + int queue_base, int profile_id)
291 +{
292 + int index, profile_size;
293 + u32 val, reg;
294 +
295 + profile_size = queue_dst.src_profile << 8;
296 + if (queue_dst.service_code_en)
297 + index = PPE_QUEUE_BASE_SERVICE_CODE + profile_size +
298 + queue_dst.service_code;
299 + else if (queue_dst.cpu_code_en)
300 + index = PPE_QUEUE_BASE_CPU_CODE + profile_size +
301 + queue_dst.cpu_code;
302 + else
303 + index = profile_size + queue_dst.dest_port;
304 +
305 + val = FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID, profile_id);
306 + val |= FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID, queue_base);
307 + reg = PPE_UCAST_QUEUE_MAP_TBL_ADDR + index * PPE_UCAST_QUEUE_MAP_TBL_INC;
308 +
309 + return regmap_write(ppe_dev->regmap, reg, val);
310 +}
311 +
312 +/**
313 + * ppe_queue_ucast_pri_class_set - Set PPE unicast queue class of priority
314 + * @ppe_dev: PPE device
315 + * @profile_id: Profile ID
316 + * @priority: Priority to be used to set class
317 + * @class_offset: Class value for the destination queue ID
318 + *
319 + * The PPE unicast queue class is configured based on the PPE
320 + * internal priority.
321 + *
322 + * Return 0 on success, negative error code on failure.
323 + */
324 +int ppe_queue_ucast_pri_class_set(struct ppe_device *ppe_dev,
325 + int profile_id,
326 + int priority,
327 + int class_offset)
328 +{
329 + u32 val, reg;
330 + int index;
331 +
332 + index = (profile_id << 4) + priority;
333 + val = FIELD_PREP(PPE_UCAST_PRIORITY_MAP_TBL_CLASS, class_offset);
334 + reg = PPE_UCAST_PRIORITY_MAP_TBL_ADDR + index * PPE_UCAST_PRIORITY_MAP_TBL_INC;
335 +
336 + return regmap_write(ppe_dev->regmap, reg, val);
337 +}
338 +
339 +/**
340 + * ppe_queue_ucast_hash_class_set - Set PPE unicast queue class of hash value
341 + * @ppe_dev: PPE device
342 + * @profile_id: Profile ID
343 + * @rss_hash: Hash value to be used to set clas
344 + * @class_offset: Class value for the destination queue ID
345 + *
346 + * The PPE unicast queue class is configured based on the RSS hash value.
347 + *
348 + * Return 0 on success, negative error code on failure.
349 + */
350 +int ppe_queue_ucast_hash_class_set(struct ppe_device *ppe_dev,
351 + int profile_id,
352 + int rss_hash,
353 + int class_offset)
354 +{
355 + u32 val, reg;
356 + int index;
357 +
358 + index = (profile_id << 8) + rss_hash;
359 + val = FIELD_PREP(PPE_UCAST_HASH_MAP_TBL_HASH, class_offset);
360 + reg = PPE_UCAST_HASH_MAP_TBL_ADDR + index * PPE_UCAST_HASH_MAP_TBL_INC;
361 +
362 + return regmap_write(ppe_dev->regmap, reg, val);
363 +}
364 +
365 +/**
366 + * ppe_port_resource_get - Get PPE resource per port
367 + * @ppe_dev: PPE device
368 + * @port: PPE port
369 + * @type: Resource type
370 + * @res_start: Resource start ID
371 + * @res_end: Resource end ID
372 + *
373 + * PPE resource is assigned per PPE port, which is acquired for QoS scheduler.
374 + *
375 + * Return 0 on success, negative error code on failure.
376 + */
377 +int ppe_port_resource_get(struct ppe_device *ppe_dev, int port, int type,
378 + int *res_start, int *res_end)
379 +{
380 + struct ppe_port_schedule_resource res;
381 +
382 + /* The reserved resource with the maximum port ID of PPE is
383 + * also allowed to be acquired.
384 + */
385 + if (port > ppe_dev->num_ports)
386 + return -EINVAL;
387 +
388 + res = ppe_scheduler_res[port];
389 + switch (type) {
390 + case PPE_RES_UCAST:
391 + *res_start = res.ucastq_start;
392 + *res_end = res.ucastq_end;
393 + break;
394 + case PPE_RES_MCAST:
395 + *res_start = res.mcastq_start;
396 + *res_end = res.mcastq_end;
397 + break;
398 + case PPE_RES_FLOW_ID:
399 + *res_start = res.flow_id_start;
400 + *res_end = res.flow_id_end;
401 + break;
402 + case PPE_RES_L0_NODE:
403 + *res_start = res.l0node_start;
404 + *res_end = res.l0node_end;
405 + break;
406 + case PPE_RES_L1_NODE:
407 + *res_start = res.l1node_start;
408 + *res_end = res.l1node_end;
409 + break;
410 + default:
411 + return -EINVAL;
412 + }
413 +
414 + return 0;
415 +}
416 +
417 static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
418 struct ppe_bm_port_config port_cfg)
419 {
420 @@ -1219,6 +1493,88 @@ static int ppe_config_scheduler(struct ppe_device *ppe_dev)
421 return ret;
422 };
423
424 +/* Configure PPE queue destination of each PPE port. */
425 +static int ppe_queue_dest_init(struct ppe_device *ppe_dev)
426 +{
427 + int ret, port_id, index, class, res_start, res_end, queue_base, pri_max;
428 + struct ppe_queue_ucast_dest queue_dst;
429 +
430 + for (port_id = 0; port_id < ppe_dev->num_ports; port_id++) {
431 + memset(&queue_dst, 0, sizeof(queue_dst));
432 +
433 + ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_UCAST,
434 + &res_start, &res_end);
435 + if (ret)
436 + return ret;
437 +
438 + queue_base = res_start;
439 + queue_dst.dest_port = port_id;
440 +
441 + /* Configure queue base ID and profile ID that is same as
442 + * physical port ID.
443 + */
444 + ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
445 + queue_base, port_id);
446 + if (ret)
447 + return ret;
448 +
449 + /* Queue priority range supported by each PPE port */
450 + ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_L0_NODE,
451 + &res_start, &res_end);
452 + if (ret)
453 + return ret;
454 +
455 + pri_max = res_end - res_start;
456 +
457 + /* Redirect ARP reply packet with the max priority on CPU port,
458 + * which keeps the ARP reply directed to CPU (CPU code is 101)
459 + * with highest priority received by EDMA when there is a heavy
460 + * traffic loaded.
461 + */
462 + if (port_id == 0) {
463 + memset(&queue_dst, 0, sizeof(queue_dst));
464 +
465 + queue_dst.cpu_code_en = true;
466 + queue_dst.cpu_code = 101;
467 + ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
468 + queue_base + pri_max,
469 + 0);
470 + if (ret)
471 + return ret;
472 + }
473 +
474 + /* Initialize the class offset of internal priority. */
475 + for (index = 0; index < PPE_QUEUE_INTER_PRI_NUM; index++) {
476 + class = index > pri_max ? pri_max : index;
477 +
478 + ret = ppe_queue_ucast_pri_class_set(ppe_dev, port_id,
479 + index, class);
480 + if (ret)
481 + return ret;
482 + }
483 +
484 + /* Initialize the class offset of RSS hash as 0 to avoid the
485 + * random hardware value that will lead to the unexpected
486 + * destination queue generated.
487 + */
488 + index = 0;
489 + for (index = 0; index < PPE_QUEUE_HASH_NUM; index++) {
490 + ret = ppe_queue_ucast_hash_class_set(ppe_dev, port_id,
491 + index, 0);
492 + if (ret)
493 + return ret;
494 + }
495 + }
496 +
497 + return 0;
498 +}
499 +
500 +/* Initialize PPE device to handle traffic correctly. */
501 +static int ppe_dev_hw_init(struct ppe_device *ppe_dev)
502 +{
503 + return ppe_queue_dest_init(ppe_dev);
504 +}
505 +
506 int ppe_hw_config(struct ppe_device *ppe_dev)
507 {
508 int ret;
509 @@ -1231,5 +1587,9 @@ int ppe_hw_config(struct ppe_device *ppe_dev)
510 if (ret)
511 return ret;
512
513 - return ppe_config_scheduler(ppe_dev);
514 + ret = ppe_config_scheduler(ppe_dev);
515 + if (ret)
516 + return ret;
517 +
518 + return ppe_dev_hw_init(ppe_dev);
519 }
520 diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
521 index 167a114031fd..676c4ec45f6f 100644
522 --- a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
523 +++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
524 @@ -6,6 +6,13 @@
525 #ifndef __PPE_CONFIG_H__
526 #define __PPE_CONFIG_H__
527
528 +/* There are different queue config ranges for the destination port,
529 + * CPU code and service code.
530 + */
531 +#define PPE_QUEUE_BASE_DEST_PORT 0
532 +#define PPE_QUEUE_BASE_CPU_CODE 1024
533 +#define PPE_QUEUE_BASE_SERVICE_CODE 2048
534 +
535 /**
536 * struct ppe_qos_scheduler_cfg - PPE QoS scheduler configuration.
537 * @flow_id: PPE flow ID.
538 @@ -26,6 +33,26 @@ struct ppe_qos_scheduler_cfg {
539 int node_frame_mode;
540 };
541
542 +/**
543 + * struct ppe_queue_ucast_dest - PPE unicast queue destination.
544 + * @src_profile: Source profile.
545 + * @service_code_en: Enable service code.
546 + * @service_code: Service code.
547 + * @cpu_code_en: Enable CPU code.
548 + * @cpu_code: CPU code.
549 + * @dest_port: destination port.
550 + *
551 + * PPE egress queue ID is decided by the egress port ID.
552 + */
553 +struct ppe_queue_ucast_dest {
554 + int src_profile;
555 + bool service_code_en;
556 + int service_code;
557 + bool cpu_code_en;
558 + int cpu_code;
559 + int dest_port;
560 +};
561 +
562 int ppe_hw_config(struct ppe_device *ppe_dev);
563 int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
564 int node_id, bool flow_level, int port,
565 @@ -33,4 +60,18 @@ int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
566 int ppe_queue_scheduler_get(struct ppe_device *ppe_dev,
567 int node_id, bool flow_level, int *port,
568 struct ppe_qos_scheduler_cfg *scheduler_cfg);
569 +int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
570 + struct ppe_queue_ucast_dest queue_dst,
571 + int queue_base,
572 + int profile_id);
573 +int ppe_queue_ucast_pri_class_set(struct ppe_device *ppe_dev,
574 + int profile_id,
575 + int priority,
576 + int class_offset);
577 +int ppe_queue_ucast_hash_class_set(struct ppe_device *ppe_dev,
578 + int profile_id,
579 + int rss_hash,
580 + int class_offset);
581 +int ppe_port_resource_get(struct ppe_device *ppe_dev, int port, int type,
582 + int *res_start, int *res_end);
583 #endif
584 diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
585 index 0279f1023bcf..1f6828237f94 100644
586 --- a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
587 +++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
588 @@ -153,6 +153,22 @@
589 #define PPE_BM_PORT_FC_SET_PRE_ALLOC(tbl_cfg, value) \
590 u32p_replace_bits((u32 *)(tbl_cfg) + 0x1, value, PPE_BM_PORT_FC_W1_PRE_ALLOC)
591
592 +#define PPE_UCAST_QUEUE_MAP_TBL_ADDR 0x810000
593 +#define PPE_UCAST_QUEUE_MAP_TBL_NUM 3072
594 +#define PPE_UCAST_QUEUE_MAP_TBL_INC 0x10
595 +#define PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID GENMASK(3, 0)
596 +#define PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID GENMASK(11, 4)
597 +
598 +#define PPE_UCAST_HASH_MAP_TBL_ADDR 0x830000
599 +#define PPE_UCAST_HASH_MAP_TBL_NUM 4096
600 +#define PPE_UCAST_HASH_MAP_TBL_INC 0x10
601 +#define PPE_UCAST_HASH_MAP_TBL_HASH GENMASK(7, 0)
602 +
603 +#define PPE_UCAST_PRIORITY_MAP_TBL_ADDR 0x842000
604 +#define PPE_UCAST_PRIORITY_MAP_TBL_NUM 256
605 +#define PPE_UCAST_PRIORITY_MAP_TBL_INC 0x10
606 +#define PPE_UCAST_PRIORITY_MAP_TBL_CLASS GENMASK(3, 0)
607 +
608 /* PPE unicast queue (0-255) configurations. */
609 #define PPE_AC_UNI_QUEUE_CFG_TBL_ADDR 0x848000
610 #define PPE_AC_UNI_QUEUE_CFG_TBL_NUM 256
611 --
612 2.45.2
613