qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
feat_num[QED_VF_L2_QUE] = min_t(u32,
RESC_NUM(p_hwfn, QED_L2_QUEUE),
- sb_cnt_info.sb_iov_cnt);
+ sb_cnt_info.iov_cnt);
feat_num[QED_PF_L2_QUE] = min_t(u32,
RESC_NUM(p_hwfn, QED_SB) -
non_l2_sbs,
case QED_SB:
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- *p_resc_num = sb_cnt_info.sb_cnt;
+ *p_resc_num = sb_cnt_info.cnt;
break;
default:
return -EINVAL;
bool b_set, bool b_slowpath)
{
u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
- u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
+ u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->usage.cnt;
u32 igu_sb_id = 0, val = 0;
val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
/* Initialize base sb / sb cnt for PFs and VFs */
p_igu_info->igu_base_sb = 0xffff;
- p_igu_info->igu_sb_cnt = 0;
p_igu_info->igu_base_sb_iov = 0xffff;
/* Distinguish between existent and non-existent default SB */
if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) {
if (p_igu_info->igu_base_sb == 0xffff)
p_igu_info->igu_base_sb = igu_sb_id;
- p_igu_info->igu_sb_cnt++;
+ p_igu_info->usage.cnt++;
}
} else if (!(p_block->is_pf) &&
(p_block->function_id >= min_vf) &&
if (p_igu_info->igu_base_sb_iov == 0xffff)
p_igu_info->igu_base_sb_iov = igu_sb_id;
- p_igu_info->free_blks++;
+ p_igu_info->usage.iov_cnt++;
}
/* Mark the First entry belonging to the PF or its VFs
}
/* All non default SB are considered free at this point */
- p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
+ p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
+ p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x\n",
p_igu_info->igu_dsb_id,
- p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov);
+ p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt);
return 0;
}
if (!info || !p_sb_cnt_info)
return;
- p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
- p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
- p_sb_cnt_info->sb_free_blk = info->free_blks;
+ memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info));
}
u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
/* Determine origin of SB id */
if ((sb_id >= p_info->igu_base_sb) &&
- (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
+ (sb_id < p_info->igu_base_sb + p_info->usage.cnt)) {
return sb_id - p_info->igu_base_sb;
} else if ((sb_id >= p_info->igu_base_sb_iov) &&
- (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
+ (sb_id < p_info->igu_base_sb_iov + p_info->usage.iov_cnt)) {
/* We want the first VF queue to be adjacent to the
* last PF queue. Since L2 queues can be partial to
* SBs, we'll use the feature instead.
igu_blocks = p_hwfn->hw_info.p_igu_info->entry;
- if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
- num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
- p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
+ if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
+ num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
+ p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
qed_wr(p_hwfn, p_ptt, addr, val);
p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE;
-
- p_hwfn->hw_info.p_igu_info->free_blks++;
+ p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
}
vf->num_sbs = 0;