return err;
}
+int otx2_config_pause_frm(struct otx2_nic *pfvf)
+{
+ struct cgx_pause_frm_cfg *req;
+ int err;
+
+ otx2_mbox_lock(&pfvf->mbox);
+ req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED);
+ req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED);
+ req->set = 1;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ otx2_mbox_unlock(&pfvf->mbox);
+ return err;
+}
+
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
* RED accepts pkts if free pointers > 102 & <= 205.
* Drops pkts if free pointers < 102.
*/
+#define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
if (qidx < pfvf->hw.rx_queues) {
aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
aq->cq.drop_ena = 1;
+
+ /* Enable receive CQ backpressure */
+ aq->cq.bp_ena = 1;
+ aq->cq.bpid = pfvf->bpid[0];
+
+ /* Set backpressure level is same as cq pass level */
+ aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
}
/* Fill AQ info */
aq->aura.fc_addr = pool->fc_addr->iova;
aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
+ /* Enable backpressure for RQ aura */
+ if (aura_id < pfvf->hw.rqpool_cnt) {
+ aq->aura.bp_ena = 0;
+ aq->aura.nix0_bpid = pfvf->bpid[0];
+ /* Set backpressure level for RQ's Aura */
+ aq->aura.bp = RQ_BP_LVL_AURA;
+ }
+
/* Fill AQ info */
aq->ctype = NPA_AQ_CTYPE_AURA;
aq->op = NPA_AQ_INSTOP_INIT;
otx2_mbox_unlock(mbox);
}
+int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
+{
+ struct nix_bp_cfg_req *req;
+
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->chan_base = 0;
+ req->chan_cnt = 1;
+ req->bpid_per_chan = 0;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
/* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp)
pfvf->hw.nix_msixoff = rsp->nix_msixoff;
}
+void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ int chan, chan_id;
+
+ for (chan = 0; chan < rsp->chan_cnt; chan++) {
+ chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F);
+ pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF;
+ }
+}
+
void otx2_free_cints(struct otx2_nic *pfvf, int n)
{
struct otx2_qset *qset = &pfvf->qset;
u16 rbsize; /* Receive buffer size */
#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
+#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
+#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
u64 flags;
struct otx2_qset qset;
struct workqueue_struct *mbox_wq;
u16 pcifunc; /* RVU PF_FUNC */
+ u16 bpid[NIX_MAX_BPID_CHAN];
struct cgx_link_user_info linfo;
u64 reset_count;
void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
void otx2_get_mac_from_af(struct net_device *netdev);
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
+int otx2_config_pause_frm(struct otx2_nic *pfvf);
/* RVU block related APIs */
int otx2_attach_npa_nix(struct otx2_nic *pfvf);
gfp_t gfp);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
+int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
struct nix_txsch_alloc_rsp *rsp);
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp);
+void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
+ struct nix_bp_cfg_rsp *rsp);
/* Device stats APIs */
void otx2_get_dev_stats(struct otx2_nic *pfvf);
return err;
}
+static void otx2_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_pause_frm_cfg *req, *rsp;
+
+ req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
+ if (!req)
+ return;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ rsp = (struct cgx_pause_frm_cfg *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ pause->rx_pause = rsp->rx_pause;
+ pause->tx_pause = rsp->tx_pause;
+ }
+}
+
+static int otx2_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (pause->autoneg)
+ return -EOPNOTSUPP;
+
+ if (pause->rx_pause)
+ pfvf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+
+ if (pause->tx_pause)
+ pfvf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+
+ return otx2_config_pause_frm(pfvf);
+}
+
static void otx2_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
.set_rxfh = otx2_set_rxfh,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
+ .get_pauseparam = otx2_get_pauseparam,
+ .set_pauseparam = otx2_set_pauseparam,
};
void otx2_set_ethtool_ops(struct net_device *netdev)
mbox_handler_nix_txsch_alloc(pf,
(struct nix_txsch_alloc_rsp *)msg);
break;
+ case MBOX_MSG_NIX_BP_ENABLE:
+ mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
+ break;
case MBOX_MSG_CGX_STATS:
mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
break;
if (err)
goto err_free_npa_lf;
+ /* Enable backpressure */
+ otx2_nix_config_bp(pf, true);
+
/* Init Auras and pools used by NIX RQ, for free buffer ptrs */
err = otx2_rq_aura_pool_init(pf);
if (err) {
if (err)
dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
+ otx2_mbox_lock(mbox);
+ /* Disable backpressure */
+ if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
+ otx2_nix_config_bp(pf, false);
+ otx2_mbox_unlock(mbox);
+
/* Disable RQs */
otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);