static bool bnxt_need_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- int cp = bp->cp_nr_rings;
+ int cp = bnxt_cp_rings_in_use(bp);
int rx = bp->rx_nr_rings;
int vnic = 1, grp = rx;
static int __bnxt_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int cp = bnxt_cp_rings_in_use(bp);
int tx = bp->tx_nr_rings;
int rx = bp->rx_nr_rings;
- int cp = bp->cp_nr_rings;
int grp, rx_rings, rc;
bool sh = false;
int vnic = 1;
bp->hw_resc.max_cp_rings = max;
}
-static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
+unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
bp->hw_resc.max_irqs = max_irqs;
}
+int bnxt_get_avail_msix(struct bnxt *bp, int num)
+{
+ int max_cp = bnxt_get_max_func_cp_rings(bp);
+ int max_irq = bnxt_get_max_func_irqs(bp);
+ int total_req = bp->cp_nr_rings + num;
+ int max_idx, avail_msix;
+
+ max_idx = min_t(int, bp->total_irqs, max_cp);
+ avail_msix = max_idx - bp->cp_nr_rings;
+ if (!(bp->flags & BNXT_FLAG_NEW_RM) || avail_msix >= num)
+ return avail_msix;
+
+ if (max_irq < total_req) {
+ num = max_irq - bp->cp_nr_rings;
+ if (num <= 0)
+ return 0;
+ }
+ return num;
+}
+
static int bnxt_get_num_msix(struct bnxt *bp)
{
if (!(bp->flags & BNXT_FLAG_NEW_RM))
static int bnxt_init_msix(struct bnxt *bp)
{
- int i, total_vecs, max, rc = 0, min = 1;
+ int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
struct msix_entry *msix_ent;
total_vecs = bnxt_get_num_msix(bp);
min = 2;
total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
- if (total_vecs < 0) {
+ ulp_msix = bnxt_get_ulp_msix_num(bp);
+ if (total_vecs < 0 || total_vecs < ulp_msix) {
rc = -ENODEV;
goto msix_setup_exit;
}
bp->total_irqs = total_vecs;
/* Trim rings based upon num of vectors allocated */
rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
- total_vecs, min == 1);
+ total_vecs - ulp_msix, min == 1);
if (rc)
goto msix_setup_exit;
bp->flags &= ~BNXT_FLAG_USING_MSIX;
}
-static int bnxt_reserve_rings(struct bnxt *bp)
+int bnxt_reserve_rings(struct bnxt *bp)
{
- int orig_cp = bp->hw_resc.resv_cp_rings;
int tcs = netdev_get_num_tc(bp->dev);
int rc;
netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
return rc;
}
- if ((bp->flags & BNXT_FLAG_NEW_RM) && bp->cp_nr_rings > orig_cp) {
+ if ((bp->flags & BNXT_FLAG_NEW_RM) &&
+ (bnxt_get_num_msix(bp) != bp->total_irqs)) {
bnxt_clear_int_mode(bp);
rc = bnxt_init_int_mode(bp);
if (rc)
void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
+unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max);
+int bnxt_get_avail_msix(struct bnxt *bp, int num);
+int bnxt_reserve_rings(struct bnxt *bp);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
int bnxt_hwrm_set_pause(struct bnxt *);
struct bnxt *bp = netdev_priv(dev);
int max_idx, max_cp_rings;
int avail_msix, i, idx;
+ int rc = 0;
ASSERT_RTNL();
if (ulp_id != BNXT_ROCE_ULP)
return -EAGAIN;
max_cp_rings = bnxt_get_max_func_cp_rings(bp);
- max_idx = min_t(int, bp->total_irqs, max_cp_rings);
- avail_msix = max_idx - bp->cp_nr_rings;
+ avail_msix = bnxt_get_avail_msix(bp, num_msix);
if (!avail_msix)
return -ENOMEM;
if (avail_msix > num_msix)
avail_msix = num_msix;
- if (bp->flags & BNXT_FLAG_NEW_RM)
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
idx = bp->cp_nr_rings;
- else
+ } else {
+ max_idx = min_t(int, bp->total_irqs, max_cp_rings);
idx = max_idx - avail_msix;
+ }
edev->ulp_tbl[ulp_id].msix_base = idx;
+ edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
+ if (bp->total_irqs < (idx + avail_msix)) {
+ if (netif_running(dev)) {
+ bnxt_close_nic(bp, true, false);
+ rc = bnxt_open_nic(bp, true, false);
+ } else {
+ rc = bnxt_reserve_rings(bp);
+ }
+ }
+ if (rc) {
+ edev->ulp_tbl[ulp_id].msix_requested = 0;
+ return -EAGAIN;
+ }
+
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings;
+ edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
+ }
for (i = 0; i < avail_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
ent[i].db_offset = (idx + i) * 0x80;
}
- bnxt_set_max_func_irqs(bp, max_idx - avail_msix);
+ bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix);
bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
- edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
return avail_msix;
}
msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
edev->ulp_tbl[ulp_id].msix_requested = 0;
- bnxt_set_max_func_irqs(bp, bp->total_irqs);
+ bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested);
+ if (netif_running(dev)) {
+ bnxt_close_nic(bp, true, false);
+ bnxt_open_nic(bp, true, false);
+ }
return 0;
}