static int qed_fill_eth_dev_info(struct qed_dev *cdev,
struct qed_dev_eth_info *info)
{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
int i;
memset(info, 0, sizeof(*info));
- info->num_tc = 1;
-
if (IS_PF(cdev)) {
int max_vf_vlan_filters = 0;
int max_vf_mac_filters = 0;
+ info->num_tc = p_hwfn->hw_info.num_hw_tc;
+
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
u16 num_queues = 0;
} else {
u16 total_cids = 0;
+ info->num_tc = 1;
+
/* Determine queues & XDP support */
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
rc = qed_eth_tx_queue_start(p_hwfn,
p_hwfn->hw_info.opaque_fid,
- p_params, 0,
+ p_params, p_params->tc,
pbl_addr, pbl_size, ret_params);
if (rc) {
params->eth_pf_params.num_arfs_filters = 0;
/* In case we might support RDMA, don't allow qede to be greedy
- * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
+ * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
+ * per hwfn.
*/
if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
u16 *num_cons;
num_cons = ¶ms->eth_pf_params.num_cons;
- *num_cons = min_t(u16, *num_cons, 192);
+ *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
}
for (i = 0; i < cdev->num_hwfns; i++) {
#define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
QEDE_MAX_TSS_CNT(edev))
#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
+#define QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx) ((edev)->fp_num_rx + \
+ ((idx) % QEDE_TSS_COUNT(edev)))
+#define QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx) ((idx) / QEDE_TSS_COUNT(edev))
+#define QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq) ((QEDE_TSS_COUNT(edev) * \
+ (txq)->cos) + (txq)->index)
+#define QEDE_NDEV_TXQ_ID_TO_TXQ(edev, idx) \
+ (&((edev)->fp_array[QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)].txq \
+ [QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)]))
+#define QEDE_FP_TC0_TXQ(fp) (&((fp)->txq[0]))
/* Regular Tx requires skb + metadata for release purpose,
* while XDP requires the pages and the mapped address.
/* Slowpath; Should be kept in end [unless missing padding] */
void *handle;
+ u16 cos;
+ u16 ndev_txq_id;
};
#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
#define QEDE_RX_HDR_SIZE 256
#define QEDE_MAX_JUMBO_PACKET_SIZE 9600
#define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
+#define for_each_cos_in_txq(edev, var) \
+ for ((var) = 0; (var) < (edev)->dev_info.num_tc; (var)++)
#endif /* _QEDE_H_ */
QEDE_TXQ_XDP_TO_IDX(edev, txq),
qede_tqstats_arr[i].string);
else
- sprintf(*buf, "%d: %s", txq->index,
+ sprintf(*buf, "%d_%d: %s", txq->index, txq->cos,
qede_tqstats_arr[i].string);
*buf += ETH_GSTRING_LEN;
}
if (fp->type & QEDE_FASTPATH_XDP)
qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf);
- if (fp->type & QEDE_FASTPATH_TX)
- qede_get_strings_stats_txq(edev, fp->txq, &buf);
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos)
+ qede_get_strings_stats_txq(edev,
+ &fp->txq[cos], &buf);
+ }
}
/* Account for non-queue statistics */
if (fp->type & QEDE_FASTPATH_XDP)
qede_get_ethtool_stats_txq(fp->xdp_tx, &buf);
- if (fp->type & QEDE_FASTPATH_TX)
- qede_get_ethtool_stats_txq(fp->txq, &buf);
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos)
+ qede_get_ethtool_stats_txq(&fp->txq[cos], &buf);
+ }
}
for (i = 0; i < QEDE_NUM_STATS; i++) {
num_stats--;
/* Account for the Regular Tx statistics */
- num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS;
+ num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS *
+ edev->dev_info.num_tc;
/* Account for the Regular Rx statistics */
num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS;
}
for_each_queue(i) {
+ struct qede_tx_queue *txq;
+
fp = &edev->fp_array[i];
+
+ /* All TX queues of given fastpath uses same
+ * coalescing value, so no need to iterate over
+ * all TCs, TC0 txq should suffice.
+ */
if (fp->type & QEDE_FASTPATH_TX) {
- tx_handle = fp->txq->handle;
+ txq = QEDE_FP_TC0_TXQ(fp);
+ tx_handle = txq->handle;
break;
}
}
}
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ struct qede_tx_queue *txq;
+
+ /* All TX queues of given fastpath uses same
+ * coalescing value, so no need to iterate over
+ * all TCs, TC0 txq should suffice.
+ */
+ txq = QEDE_FP_TC0_TXQ(fp);
+
rc = edev->ops->common->set_coalesce(edev->cdev,
0, txc,
- fp->txq->handle);
+ txq->handle);
if (rc) {
DP_INFO(edev,
"Set TX coalesce error, rc = %d\n", rc);
u16 val;
for_each_queue(i) {
- if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
- txq = edev->fp_array[i].txq;
+ struct qede_fastpath *fp = &edev->fp_array[i];
+
+ if (fp->type & QEDE_FASTPATH_TX) {
+ txq = QEDE_FP_TC0_TXQ(fp);
break;
}
}
static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{
+ unsigned int pkts_compl = 0, bytes_compl = 0;
struct netdev_queue *netdev_txq;
u16 hw_bd_cons;
- unsigned int pkts_compl = 0, bytes_compl = 0;
int rc;
- netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
barrier();
if (qede_txq_has_work(fp->xdp_tx))
return true;
- if (likely(fp->type & QEDE_FASTPATH_TX))
- if (qede_txq_has_work(fp->txq))
- return true;
+ if (likely(fp->type & QEDE_FASTPATH_TX)) {
+ int cos;
+
+ for_each_cos_in_txq(fp->edev, cos) {
+ if (qede_txq_has_work(&fp->txq[cos]))
+ return true;
+ }
+ }
return false;
}
struct qede_dev *edev = fp->edev;
int rx_work_done = 0;
- if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
- qede_tx_int(edev, fp->txq);
+ if (likely(fp->type & QEDE_FASTPATH_TX)) {
+ int cos;
+
+ for_each_cos_in_txq(fp->edev, cos) {
+ if (qede_txq_has_work(&fp->txq[cos]))
+ qede_tx_int(edev, &fp->txq[cos]);
+ }
+ }
if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
qede_xdp_tx_int(edev, fp->xdp_tx);
/* Get tx-queue context and netdev index */
txq_index = skb_get_queue_mapping(skb);
- WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
- txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
+ WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc);
+ txq = QEDE_NDEV_TXQ_ID_TO_TXQ(edev, txq_index);
netdev_txq = netdev_get_tx_queue(ndev, txq_index);
WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
return 0;
}
+int qede_setup_tc(struct net_device *ndev, u8 num_tc)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ int cos, count, offset;
+
+ if (num_tc > edev->dev_info.num_tc)
+ return -EINVAL;
+
+ netdev_reset_tc(ndev);
+ netdev_set_num_tc(ndev, num_tc);
+
+ for_each_cos_in_txq(edev, cos) {
+ count = QEDE_TSS_COUNT(edev);
+ offset = cos * QEDE_TSS_COUNT(edev);
+ netdev_set_tc_queue(ndev, cos, count, offset);
+ }
+
+ return 0;
+}
+
+static int
+qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct tc_mqprio_qopt *mqprio;
+
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ mqprio = type_data;
+
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ return qede_setup_tc(dev, mqprio->num_tc);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = qede_rx_flow_steer,
#endif
+ .ndo_setup_tc = qede_setup_tc_offload,
};
static const struct net_device_ops qede_netdev_vf_ops = {
struct qede_dev *edev;
ndev = alloc_etherdev_mqs(sizeof(*edev),
- info->num_queues, info->num_queues);
+ info->num_queues * info->num_tc,
+ info->num_queues);
if (!ndev) {
pr_err("etherdev allocation failed\n");
return NULL;
}
if (fp->type & QEDE_FASTPATH_TX) {
- fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
+ fp->txq = kcalloc(edev->dev_info.num_tc,
+ sizeof(*fp->txq), GFP_KERNEL);
if (!fp->txq)
goto err;
}
static void qede_update_pf_params(struct qed_dev *cdev)
{
struct qed_pf_params pf_params;
+ u16 num_cons;
/* 64 rx + 64 tx + 64 XDP */
memset(&pf_params, 0, sizeof(struct qed_pf_params));
- pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
+
+ /* 1 rx + 1 xdp + max tx cos */
+ num_cons = QED_MIN_L2_CONS;
+
+ pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons;
/* Same for VFs - make sure they'll have sufficient connections
* to support XDP Tx queues.
if (fp->type & QEDE_FASTPATH_XDP)
qede_free_mem_txq(edev, fp->xdp_tx);
- if (fp->type & QEDE_FASTPATH_TX)
- qede_free_mem_txq(edev, fp->txq);
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos)
+ qede_free_mem_txq(edev, &fp->txq[cos]);
+ }
}
/* This function allocates all memory needed for a single fp (i.e. an entity
}
if (fp->type & QEDE_FASTPATH_TX) {
- rc = qede_alloc_mem_txq(edev, fp->txq);
- if (rc)
- goto out;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_alloc_mem_txq(edev, &fp->txq[cos]);
+ if (rc)
+ goto out;
+ }
}
out:
}
if (fp->type & QEDE_FASTPATH_TX) {
- fp->txq->index = txq_index++;
- if (edev->dev_info.is_legacy)
- fp->txq->is_legacy = 1;
- fp->txq->dev = &edev->pdev->dev;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ struct qede_tx_queue *txq = &fp->txq[cos];
+ u16 ndev_tx_id;
+
+ txq->cos = cos;
+ txq->index = txq_index;
+ ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq);
+ txq->ndev_txq_id = ndev_tx_id;
+
+ if (edev->dev_info.is_legacy)
+ txq->is_legacy = 1;
+ txq->dev = &edev->pdev->dev;
+ }
+
+ txq_index++;
}
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
{
int rc = 0;
- rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
+ rc = netif_set_real_num_tx_queues(edev->ndev,
+ QEDE_TSS_COUNT(edev) *
+ edev->dev_info.num_tc);
if (rc) {
DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
return rc;
fp = &edev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) {
- rc = qede_drain_txq(edev, fp->txq, true);
- if (rc)
- return rc;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_drain_txq(edev, &fp->txq[cos], true);
+ if (rc)
+ return rc;
+ }
}
if (fp->type & QEDE_FASTPATH_XDP) {
/* Stop the Tx Queue(s) */
if (fp->type & QEDE_FASTPATH_TX) {
- rc = qede_stop_txq(edev, fp->txq, i);
- if (rc)
- return rc;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_stop_txq(edev, &fp->txq[cos], i);
+ if (rc)
+ return rc;
+ }
}
/* Stop the Rx Queue */
params.p_sb = fp->sb_info;
params.sb_idx = sb_idx;
+ params.tc = txq->cos;
rc = edev->ops->q_tx_start(edev->cdev, rss_id, ¶ms, phys_table,
page_cnt, &ret_params);
}
if (fp->type & QEDE_FASTPATH_TX) {
- rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
- if (rc)
- goto out;
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_start_txq(edev, fp, &fp->txq[cos], i,
+ TX_PI(cos));
+ if (rc)
+ goto out;
+ }
}
}
bool is_locked)
{
struct qed_link_params link_params;
+ u8 num_tc;
int rc;
DP_INFO(edev, "Starting qede load\n");
goto err4;
DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
+ num_tc = netdev_get_num_tc(edev->ndev);
+ num_tc = num_tc ? num_tc : edev->dev_info.num_tc;
+ qede_setup_tc(edev->ndev, num_tc);
+
/* Program un-configured VLANs */
qede_configure_vlan_filters(edev);
{
struct netdev_queue *netdev_txq;
- netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
if (netif_xmit_stopped(netdev_txq))
return true;
for_each_queue(i) {
fp = &edev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) {
- if (fp->txq->sw_tx_cons != fp->txq->sw_tx_prod)
+ struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp);
+
+ if (txq->sw_tx_cons != txq->sw_tx_prod)
etlv->txqs_empty = false;
- if (qede_is_txq_full(edev, fp->txq))
+ if (qede_is_txq_full(edev, txq))
etlv->num_txqs_full++;
}
if (fp->type & QEDE_FASTPATH_RX) {
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_iov_if.h>
+/* 64 max queues * (1 rx + 4 tx-cos + 1 xdp) */
+#define QED_MIN_L2_CONS (2 + NUM_PHYS_TCS_4PORT_K2)
+#define QED_MAX_L2_CONS (64 * (QED_MIN_L2_CONS))
+
struct qed_queue_start_common_params {
/* Should always be relative to entity sending this. */
u8 vport_id;
struct qed_sb_info *p_sb;
u8 sb_idx;
+
+ u8 tc;
};
struct qed_rxq_start_ret_params {