unsigned int estdep;
unsigned int estsel;
unsigned int fpesel;
+ unsigned int tbssel;
};
/* RX Buffer size must be multiple of 4/8/16 bytes */
struct tc_cbs_qopt_offload;
struct flow_cls_offload;
struct tc_taprio_qopt_offload;
+struct tc_etf_qopt_offload;
struct stmmac_tc_ops {
int (*init)(struct stmmac_priv *priv);
struct flow_cls_offload *cls);
int (*setup_taprio)(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt);
+ int (*setup_etf)(struct stmmac_priv *priv,
+ struct tc_etf_qopt_offload *qopt);
};
#define stmmac_tc_init(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_cls, __args)
#define stmmac_tc_setup_taprio(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_taprio, __args)
+#define stmmac_tc_setup_etf(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_etf, __args)
struct stmmac_counters;
return stmmac_tc_setup_cbs(priv, priv, type_data);
case TC_SETUP_QDISC_TAPRIO:
return stmmac_tc_setup_taprio(priv, priv, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return stmmac_tc_setup_etf(priv, priv, type_data);
default:
return -EOPNOTSUPP;
}
return ret;
}
+static int tc_setup_etf(struct stmmac_priv *priv,
+ struct tc_etf_qopt_offload *qopt)
+{
+ if (!priv->dma_cap.tbssel)
+ return -EOPNOTSUPP;
+ if (qopt->queue >= priv->plat->tx_queues_to_use)
+ return -EINVAL;
+ if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
+ return -EINVAL;
+
+ if (qopt->enable)
+ priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
+ else
+ priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
+
+ netdev_info(priv->dev, "%s ETF for Queue %d\n",
+ qopt->enable ? "enabled" : "disabled", qopt->queue);
+ return 0;
+}
+
const struct stmmac_tc_ops dwmac510_tc_ops = {
.init = tc_init,
.setup_cls_u32 = tc_setup_cls_u32,
.setup_cbs = tc_setup_cbs,
.setup_cls = tc_setup_cls,
.setup_taprio = tc_setup_taprio,
+ .setup_etf = tc_setup_etf,
};