1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Thu, 23 Mar 2023 11:05:22 +0100
3 Subject: [PATCH] net: ethernet: mediatek: fix ppe flow accounting for L2
6 For L2 flows, the packet/byte counters should report the sum of the
7 counters of their subflows, both current and expired.
8 In order to make this work, change the way that accounting data is tracked.
9 Reset counters when a flow enters bind. Once it expires (or enters unbind),
10 store the last counter value in struct mtk_flow_entry.
12 Signed-off-by: Felix Fietkau <nbd@nbd.name>
15 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c
16 +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
17 @@ -80,9 +80,9 @@ static int mtk_ppe_mib_wait_busy(struct
21 - ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val,
22 - !(val & MTK_PPE_MIB_SER_CR_ST),
23 - 20, MTK_PPE_WAIT_TIMEOUT_US);
24 + ret = readl_poll_timeout_atomic(ppe->base + MTK_PPE_MIB_SER_CR, val,
25 + !(val & MTK_PPE_MIB_SER_CR_ST),
26 + 20, MTK_PPE_WAIT_TIMEOUT_US);
29 dev_err(ppe->dev, "MIB table busy");
30 @@ -90,17 +90,31 @@ static int mtk_ppe_mib_wait_busy(struct
34 -static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
35 +static inline struct mtk_foe_accounting *
36 +mtk_ppe_acct_data(struct mtk_ppe *ppe, u16 index)
38 + if (!ppe->acct_table)
41 + return ppe->acct_table + index * sizeof(struct mtk_foe_accounting);
44 +struct mtk_foe_accounting *mtk_ppe_mib_entry_read(struct mtk_ppe *ppe, u16 index)
46 u32 val, cnt_r0, cnt_r1, cnt_r2;
47 + struct mtk_foe_accounting *acct;
50 val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST;
51 ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val);
53 + acct = mtk_ppe_acct_data(ppe, index);
57 ret = mtk_ppe_mib_wait_busy(ppe);
62 cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
63 cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
64 @@ -109,19 +123,19 @@ static int mtk_mib_entry_read(struct mtk
65 if (mtk_is_netsys_v3_or_greater(ppe->eth)) {
66 /* 64 bit for each counter */
67 u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3);
68 - *bytes = ((u64)cnt_r1 << 32) | cnt_r0;
69 - *packets = ((u64)cnt_r3 << 32) | cnt_r2;
70 + acct->bytes += ((u64)cnt_r1 << 32) | cnt_r0;
71 + acct->packets += ((u64)cnt_r3 << 32) | cnt_r2;
73 /* 48 bit byte counter, 40 bit packet counter */
74 u32 byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
75 u32 byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
76 u32 pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
77 u32 pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
78 - *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
79 - *packets = ((u64)pkt_cnt_high << 16) | pkt_cnt_low;
80 + acct->bytes += ((u64)byte_cnt_high << 32) | byte_cnt_low;
81 + acct->packets += ((u64)pkt_cnt_high << 16) | pkt_cnt_low;
88 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
89 @@ -522,14 +536,6 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
90 hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
92 mtk_ppe_cache_clear(ppe);
94 - if (ppe->accounting) {
95 - struct mtk_foe_accounting *acct;
97 - acct = ppe->acct_table + entry->hash * sizeof(*acct);
102 entry->hash = 0xffff;
104 @@ -554,11 +560,14 @@ static int __mtk_foe_entry_idle_time(str
108 -mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
109 +mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
110 + u64 *packets, u64 *bytes)
112 + struct mtk_foe_accounting *acct;
113 struct mtk_foe_entry foe = {};
114 struct mtk_foe_entry *hwe;
115 u16 hash = entry->hash;
120 @@ -569,18 +578,35 @@ mtk_flow_entry_update(struct mtk_ppe *pp
121 memcpy(&foe, hwe, len);
123 if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) ||
124 - FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND)
126 + FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND) {
127 + acct = mtk_ppe_acct_data(ppe, hash);
129 + entry->prev_packets += acct->packets;
130 + entry->prev_bytes += acct->bytes;
136 entry->data.ib1 = foe.ib1;
137 + acct = mtk_ppe_mib_entry_read(ppe, hash);
142 + *packets += acct->packets;
143 + *bytes += acct->bytes;
151 mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
153 u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
154 + u64 *packets = &entry->packets;
155 + u64 *bytes = &entry->bytes;
156 struct mtk_flow_entry *cur;
157 struct hlist_node *tmp;
159 @@ -589,7 +615,9 @@ mtk_flow_entry_update_l2(struct mtk_ppe
160 hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_list) {
163 - if (!mtk_flow_entry_update(ppe, cur)) {
164 + if (!mtk_flow_entry_update(ppe, cur, packets, bytes)) {
165 + entry->prev_packets += cur->prev_packets;
166 + entry->prev_bytes += cur->prev_bytes;
167 __mtk_foe_entry_clear(ppe, entry, false);
170 @@ -604,10 +632,29 @@ mtk_flow_entry_update_l2(struct mtk_ppe
174 +void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
177 + entry->packets = entry->prev_packets;
178 + entry->bytes = entry->prev_bytes;
180 + spin_lock_bh(&ppe_lock);
182 + if (entry->type == MTK_FLOW_TYPE_L2)
183 + mtk_flow_entry_update_l2(ppe, entry);
185 + mtk_flow_entry_update(ppe, entry, &entry->packets, &entry->bytes);
187 + *idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
189 + spin_unlock_bh(&ppe_lock);
193 __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
196 + struct mtk_foe_accounting *acct;
197 struct mtk_eth *eth = ppe->eth;
198 u16 timestamp = mtk_eth_timestamp(eth);
199 struct mtk_foe_entry *hwe;
200 @@ -638,6 +685,12 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
204 + acct = mtk_ppe_mib_entry_read(ppe, hash);
210 mtk_ppe_cache_clear(ppe);
213 @@ -802,21 +855,6 @@ void __mtk_ppe_check_skb(struct mtk_ppe
214 spin_unlock_bh(&ppe_lock);
217 -int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
221 - spin_lock_bh(&ppe_lock);
222 - if (entry->type == MTK_FLOW_TYPE_L2)
223 - mtk_flow_entry_update_l2(ppe, entry);
225 - mtk_flow_entry_update(ppe, entry);
226 - idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
227 - spin_unlock_bh(&ppe_lock);
232 int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
235 @@ -844,32 +882,6 @@ int mtk_ppe_prepare_reset(struct mtk_ppe
236 return mtk_ppe_wait_busy(ppe);
239 -struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
240 - struct mtk_foe_accounting *diff)
242 - struct mtk_foe_accounting *acct;
243 - int size = sizeof(struct mtk_foe_accounting);
244 - u64 bytes, packets;
246 - if (!ppe->accounting)
249 - if (mtk_mib_entry_read(ppe, index, &bytes, &packets))
252 - acct = ppe->acct_table + index * size;
254 - acct->bytes += bytes;
255 - acct->packets += packets;
258 - diff->bytes = bytes;
259 - diff->packets = packets;
265 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index)
267 bool accounting = eth->soc->has_accounting;
268 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h
269 +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
270 @@ -304,6 +304,8 @@ struct mtk_flow_entry {
271 struct mtk_foe_entry data;
272 struct rhash_head node;
273 unsigned long cookie;
274 + u64 prev_packets, prev_bytes;
275 + u64 packets, bytes;
278 struct mtk_mib_entry {
279 @@ -348,6 +350,7 @@ void mtk_ppe_deinit(struct mtk_eth *eth)
280 void mtk_ppe_start(struct mtk_ppe *ppe);
281 int mtk_ppe_stop(struct mtk_ppe *ppe);
282 int mtk_ppe_prepare_reset(struct mtk_ppe *ppe);
283 +struct mtk_foe_accounting *mtk_ppe_mib_entry_read(struct mtk_ppe *ppe, u16 index);
285 void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
287 @@ -397,9 +400,8 @@ int mtk_foe_entry_set_queue(struct mtk_e
289 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
290 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
291 -int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
292 int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index);
293 -struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
294 - struct mtk_foe_accounting *diff);
295 +void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
299 --- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
300 +++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
301 @@ -96,7 +96,7 @@ mtk_ppe_debugfs_foe_show(struct seq_file
302 if (bind && state != MTK_FOE_STATE_BIND)
305 - acct = mtk_foe_entry_get_mib(ppe, i, NULL);
306 + acct = mtk_ppe_mib_entry_read(ppe, i);
308 type = mtk_get_ib1_pkt_type(ppe->eth, entry->ib1);
309 seq_printf(m, "%05x %s %7s", i,
310 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
311 +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
312 @@ -505,24 +505,21 @@ static int
313 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
315 struct mtk_flow_entry *entry;
316 - struct mtk_foe_accounting diff;
318 + u64 packets, bytes;
321 entry = rhashtable_lookup(ð->flow_table, &f->cookie,
326 - idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
327 + packets = entry->packets;
328 + bytes = entry->bytes;
329 + mtk_foe_entry_get_stats(eth->ppe[entry->ppe_index], entry, &idle);
330 + f->stats.pkts += entry->packets - packets;
331 + f->stats.bytes += entry->bytes - bytes;
332 f->stats.lastused = jiffies - idle * HZ;
334 - if (entry->hash != 0xFFFF &&
335 - mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash,
337 - f->stats.pkts += diff.packets;
338 - f->stats.bytes += diff.bytes;