1 From 2cd5485663847d468dc207b3ff85fb1fab44d97f Mon Sep 17 00:00:00 2001
2 From: Ansuel Smith <ansuelsmth@gmail.com>
3 Date: Wed, 2 Feb 2022 01:03:31 +0100
4 Subject: [PATCH 12/16] net: dsa: qca8k: add support for phy read/write with
7 Use mgmt Ethernet also for phy read/write if availabale. Use a different
8 seq number to make sure we receive the correct packet.
9 On any error, we fallback to the legacy mdio read/write.
11 Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
12 Signed-off-by: David S. Miller <davem@davemloft.net>
14 drivers/net/dsa/qca8k.c | 216 ++++++++++++++++++++++++++++++++++++++++
15 drivers/net/dsa/qca8k.h | 1 +
16 2 files changed, 217 insertions(+)
18 diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
19 index 199cf4f761c0..0ce5b7ca0b7f 100644
20 --- a/drivers/net/dsa/qca8k.c
21 +++ b/drivers/net/dsa/qca8k.c
22 @@ -867,6 +867,199 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
23 regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
27 +qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
28 + struct sk_buff *read_skb, u32 *val)
30 + struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
34 + reinit_completion(&mgmt_eth_data->rw_done);
36 + /* Increment seq_num and set it in the copy pkt */
37 + mgmt_eth_data->seq++;
38 + qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
39 + mgmt_eth_data->ack = false;
41 + dev_queue_xmit(skb);
43 + ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
44 + QCA8K_ETHERNET_TIMEOUT);
46 + ack = mgmt_eth_data->ack;
54 + *val = mgmt_eth_data->data[0];
60 +qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
61 + int regnum, u16 data)
63 + struct sk_buff *write_skb, *clear_skb, *read_skb;
64 + struct qca8k_mgmt_eth_data *mgmt_eth_data;
65 + u32 write_val, clear_val = 0, val;
66 + struct net_device *mgmt_master;
70 + if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
73 + mgmt_eth_data = &priv->mgmt_eth_data;
75 + write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
76 + QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
77 + QCA8K_MDIO_MASTER_REG_ADDR(regnum);
80 + write_val |= QCA8K_MDIO_MASTER_READ;
82 + write_val |= QCA8K_MDIO_MASTER_WRITE;
83 + write_val |= QCA8K_MDIO_MASTER_DATA(data);
86 + /* Prealloc all the needed skb before the lock */
87 + write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL,
88 + &write_val, QCA8K_ETHERNET_PHY_PRIORITY);
92 + clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL,
93 + &clear_val, QCA8K_ETHERNET_PHY_PRIORITY);
99 + read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL,
100 + &clear_val, QCA8K_ETHERNET_PHY_PRIORITY);
106 + /* Actually start the request:
107 + * 1. Send mdio master packet
108 + * 2. Busy Wait for mdio master command
109 + * 3. Get the data if we are reading
110 + * 4. Reset the mdio master (even with error)
112 + mutex_lock(&mgmt_eth_data->mutex);
114 + /* Check if mgmt_master is operational */
115 + mgmt_master = priv->mgmt_master;
116 + if (!mgmt_master) {
117 + mutex_unlock(&mgmt_eth_data->mutex);
119 + goto err_mgmt_master;
122 + read_skb->dev = mgmt_master;
123 + clear_skb->dev = mgmt_master;
124 + write_skb->dev = mgmt_master;
126 + reinit_completion(&mgmt_eth_data->rw_done);
128 + /* Increment seq_num and set it in the write pkt */
129 + mgmt_eth_data->seq++;
130 + qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
131 + mgmt_eth_data->ack = false;
133 + dev_queue_xmit(write_skb);
135 + ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
136 + QCA8K_ETHERNET_TIMEOUT);
138 + ack = mgmt_eth_data->ack;
142 + kfree_skb(read_skb);
148 + kfree_skb(read_skb);
152 + ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
153 + !(val & QCA8K_MDIO_MASTER_BUSY), 0,
154 + QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
155 + mgmt_eth_data, read_skb, &val);
157 + if (ret < 0 && ret1 < 0) {
163 + reinit_completion(&mgmt_eth_data->rw_done);
165 + /* Increment seq_num and set it in the read pkt */
166 + mgmt_eth_data->seq++;
167 + qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
168 + mgmt_eth_data->ack = false;
170 + dev_queue_xmit(read_skb);
172 + ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
173 + QCA8K_ETHERNET_TIMEOUT);
175 + ack = mgmt_eth_data->ack;
187 + ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
189 + kfree_skb(read_skb);
192 + reinit_completion(&mgmt_eth_data->rw_done);
194 + /* Increment seq_num and set it in the clear pkt */
195 + mgmt_eth_data->seq++;
196 + qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
197 + mgmt_eth_data->ack = false;
199 + dev_queue_xmit(clear_skb);
201 + wait_for_completion_timeout(&mgmt_eth_data->rw_done,
202 + QCA8K_ETHERNET_TIMEOUT);
204 + mutex_unlock(&mgmt_eth_data->mutex);
208 + /* Error handling before lock */
210 + kfree_skb(read_skb);
212 + kfree_skb(clear_skb);
214 + kfree_skb(write_skb);
220 qca8k_port_to_phy(int port)
222 @@ -989,6 +1182,12 @@ qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 da
224 struct qca8k_priv *priv = slave_bus->priv;
225 struct mii_bus *bus = priv->bus;
228 + /* Use mdio Ethernet when available, fallback to legacy one on error */
229 + ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
233 return qca8k_mdio_write(bus, phy, regnum, data);
235 @@ -998,6 +1197,12 @@ qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
237 struct qca8k_priv *priv = slave_bus->priv;
238 struct mii_bus *bus = priv->bus;
241 + /* Use mdio Ethernet when available, fallback to legacy one on error */
242 + ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
246 return qca8k_mdio_read(bus, phy, regnum);
248 @@ -1006,6 +1211,7 @@ static int
249 qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
251 struct qca8k_priv *priv = ds->priv;
254 /* Check if the legacy mapping should be used and the
255 * port is not correctly mapped to the right PHY in the
256 @@ -1014,6 +1220,11 @@ qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
257 if (priv->legacy_phy_port_mapping)
258 port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
260 + /* Use mdio Ethernet when available, fallback to legacy one on error */
261 + ret = qca8k_phy_eth_command(priv, false, port, regnum, 0);
265 return qca8k_mdio_write(priv->bus, port, regnum, data);
268 @@ -1030,6 +1241,11 @@ qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
269 if (priv->legacy_phy_port_mapping)
270 port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
272 + /* Use mdio Ethernet when available, fallback to legacy one on error */
273 + ret = qca8k_phy_eth_command(priv, true, port, regnum, 0);
277 ret = qca8k_mdio_read(priv->bus, port, regnum);
280 diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
281 index 2d7d084db089..c6f6abd2108e 100644
282 --- a/drivers/net/dsa/qca8k.h
283 +++ b/drivers/net/dsa/qca8k.h
285 #include <linux/dsa/tag_qca.h>
287 #define QCA8K_ETHERNET_MDIO_PRIORITY 7
288 +#define QCA8K_ETHERNET_PHY_PRIORITY 6
289 #define QCA8K_ETHERNET_TIMEOUT 100
291 #define QCA8K_NUM_PORTS 7