d246046c79fa6a8c3618ee9cf3442edba6e51bc0
[openwrt/openwrt.git] /
1 From 8464c7b6e7c491ae06b18103881611c98678cf1f Mon Sep 17 00:00:00 2001
2 From: Haiying Wang <Haiying.Wang@nxp.com>
3 Date: Thu, 13 Apr 2017 14:54:01 -0400
4 Subject: [PATCH] soc: fsl: dpio: enable qbman CENA portal memory access
5
6 Once we enable the cacheable portal memory, we need to do
7 cache flush for enqueue, vdq, buffer release, and management
8 commands, as well as invalidate and prefetch for the valid bit
9 of management command response and next index of dqrr.
10
11 Signed-off-by: Haiying Wang <Haiying.Wang@nxp.com>
12 ---
13 drivers/soc/fsl/dpio/qbman-portal.c | 23 +++++++++++++++++------
14 1 file changed, 17 insertions(+), 6 deletions(-)
15
16 --- a/drivers/soc/fsl/dpio/qbman-portal.c
17 +++ b/drivers/soc/fsl/dpio/qbman-portal.c
18 @@ -90,6 +90,14 @@ enum qbman_sdqcr_fc {
19 qbman_sdqcr_fc_up_to_3 = 1
20 };
21
22 +#define dccvac(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); }
23 +#define dcivac(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); }
24 +static inline void qbman_inval_prefetch(struct qbman_swp *p, uint32_t offset)
25 +{
26 + dcivac(p->addr_cena + offset);
27 + prefetch(p->addr_cena + offset);
28 +}
29 +
30 /* Portal Access */
31
32 static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
33 @@ -190,7 +198,7 @@ struct qbman_swp *qbman_swp_init(const s
34 memset(p->addr_cena, 0, 64 * 1024);
35
36 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
37 - 1, /* Writes Non-cacheable */
38 + 0, /* Writes cacheable */
39 0, /* EQCR_CI stashing threshold */
40 3, /* RPM: Valid bit mode, RCR in array mode */
41 2, /* DCM: Discrete consumption ack mode */
42 @@ -329,6 +337,7 @@ void qbman_swp_mc_submit(struct qbman_sw
43 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
44 dma_wmb();
45 *v = cmd_verb | p->mc.valid_bit;
46 + dccvac(cmd);
47 } else {
48 *v = cmd_verb | p->mc.valid_bit;
49 dma_wmb();
50 @@ -345,6 +354,7 @@ void *qbman_swp_mc_result(struct qbman_s
51 u32 *ret, verb;
52
53 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
54 + qbman_inval_prefetch(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
55 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
56 /* Remove the valid-bit - command completed if the rest
57 * is non-zero.
58 @@ -481,6 +491,7 @@ int qbman_swp_enqueue(struct qbman_swp *
59 /* Set the verb byte, have to substitute in the valid-bit */
60 dma_wmb();
61 p->verb = d->verb | EQAR_VB(eqar);
62 + dccvac(p);
63 } else {
64 p->verb = d->verb | EQAR_VB(eqar);
65 dma_wmb();
66 @@ -677,6 +688,7 @@ int qbman_swp_pull(struct qbman_swp *s,
67 /* Set the verb byte, have to substitute in the valid-bit */
68 p->verb = d->verb | s->vdq.valid_bit;
69 s->vdq.valid_bit ^= QB_VALID_BIT;
70 + dccvac(p);
71 } else {
72 p->verb = d->verb | s->vdq.valid_bit;
73 s->vdq.valid_bit ^= QB_VALID_BIT;
74 @@ -736,8 +748,7 @@ const struct dpaa2_dq *qbman_swp_dqrr_ne
75 s->dqrr.next_idx, pi);
76 s->dqrr.reset_bug = 0;
77 }
78 - prefetch(qbman_get_cmd(s,
79 - QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
80 + qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
81 }
82
83 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
84 @@ -755,8 +766,7 @@ const struct dpaa2_dq *qbman_swp_dqrr_ne
85 * knew from reading PI.
86 */
87 if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
88 - prefetch(qbman_get_cmd(s,
89 - QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
90 + qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
91 return NULL;
92 }
93 /*
94 @@ -779,7 +789,7 @@ const struct dpaa2_dq *qbman_swp_dqrr_ne
95 (flags & DPAA2_DQ_STAT_EXPIRED))
96 atomic_inc(&s->vdq.available);
97
98 - prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
99 + qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
100
101 return p;
102 }
103 @@ -911,6 +921,7 @@ int qbman_swp_release(struct qbman_swp *
104 */
105 dma_wmb();
106 p->verb = d->verb | RAR_VB(rar) | num_buffers;
107 + dccvac(p);
108 } else {
109 p->verb = d->verb | RAR_VB(rar) | num_buffers;
110 dma_wmb();