#include "hnae3.h"
#include "hns3_enet.h"
-static
-int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
+static int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
return -EOPNOTSUPP;
}
-static
-int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
+static int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
return -EOPNOTSUPP;
}
-static
-int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
+static int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
return -EOPNOTSUPP;
}
-static
-int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
+static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
- tqp_vectors->name,
- tqp_vectors);
+ tqp_vectors->name, tqp_vectors);
if (ret) {
netdev_err(priv->netdev, "request irq(%d) fail\n",
tqp_vectors->vector_irq);
ret = netif_set_real_num_tx_queues(netdev, queue_size);
if (ret) {
netdev_err(netdev,
- "netif_set_real_num_tx_queues fail, ret=%d!\n",
- ret);
+ "netif_set_real_num_tx_queues fail, ret=%d!\n", ret);
return ret;
}
/* get irq resource for all vectors */
ret = hns3_nic_init_irq(priv);
if (ret) {
- netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
+ netdev_err(netdev, "init irq failed! ret=%d\n", ret);
goto free_rmap;
}
ret = hns3_nic_net_up(netdev);
if (ret) {
- netdev_err(netdev,
- "hns net up fail, ret=%d!\n", ret);
+ netdev_err(netdev, "net up fail, ret=%d!\n", ret);
return ret;
}
kinfo = &h->kinfo;
- for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
- netdev_set_prio_tc_map(netdev, i,
- kinfo->prio_tc[i]);
- }
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
+ netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]);
if (h->ae_algo->ops->set_timer_task)
h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
if (l3.v4->version == 4)
l3.v4->check = 0;
- /* tunnel packet.*/
+ /* tunnel packet */
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_UDP_TUNNEL |
l3.v4->check = 0;
}
- /* normal or tunnel packet*/
+ /* normal or tunnel packet */
l4_offset = l4.hdr - skb->data;
hdr_len = (l4.tcp->doff << 2) + l4_offset;
- /* remove payload length from inner pseudo checksum when tso*/
+ /* remove payload length from inner pseudo checksum when tso */
l4_paylen = skb->len - l4_offset;
csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(l4_paylen));
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
il2_hdr = skb_inner_mac_header(skb);
- /* compute OL4 header size, defined in 4 Bytes. */
+ /* compute OL4 header size, defined in 4 Bytes */
l4_len = il2_hdr - l4.hdr;
hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
/* Set txbd */
desc->tx.ol_type_vlan_len_msec =
cpu_to_le32(ol_type_vlan_len_msec);
- desc->tx.type_cs_vlan_tso_len =
- cpu_to_le32(type_cs_vlan_tso);
+ desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.mss = cpu_to_le16(mss);
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
desc_cb->priv = priv;
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
- DESC_TYPE_SKB : DESC_TYPE_PAGE;
+ DESC_TYPE_SKB : DESC_TYPE_PAGE;
/* now, fill the descriptor */
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
- (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
+ (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
frag_end && (k == frag_buf_num - 1) ?
1 : 0);
desc->tx.bdtp_fe_sc_vld_ra_ri =
cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
- /* move ring pointer to next.*/
+ /* move ring pointer to next */
ring_ptr_move_fw(ring, next_to_use);
desc_cb = &ring->desc_cb[ring->next_to_use];
if (h->ae_algo->ops->set_vf_vlan_filter)
ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
- qos, vlan_proto);
+ qos, vlan_proto);
return ret;
}
struct hnae3_ae_dev *ae_dev;
int ret;
- ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
- GFP_KERNEL);
+ ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
if (!ae_dev) {
ret = -ENOMEM;
return ret;
return ret;
}
-/* detach a in-used buffer and replace with a reserved one */
+/* detach a in-used buffer and replace with a reserved one */
static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
struct hns3_desc_cb *res_cb)
{
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
{
ring->desc_cb[i].reuse_flag = 0;
- ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
- + ring->desc_cb[i].page_offset);
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
+ ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0;
}
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
}
-static void
-hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
+static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
+ int cleand_count)
{
struct hns3_desc_cb *desc_cb;
struct hns3_desc_cb res_cbs;
if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
desc_cb->reuse_flag = 1;
- /* Bump ref count on page before it is given*/
+ /* Bump ref count on page before it is given */
get_page(desc_cb->priv);
} else if (page_count(desc_cb->priv) == 1) {
desc_cb->reuse_flag = 1;
*/
if (pending) {
pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
- ring->desc_num;
+ ring->desc_num;
pre_desc = &ring->desc[pre_bd];
bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
} else {
HNS3_RXD_GRO_COUNT_M,
HNS3_RXD_GRO_COUNT_S);
- l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
- HNS3_RXD_L3ID_S);
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
if (l3_type == HNS3_L3_TYPE_IPV4)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
else if (l3_type == HNS3_L3_TYPE_IPV6)
return 0;
}
-int hns3_clean_rx_ring(
- struct hns3_enet_ring *ring, int budget,
- void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
+int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
+ void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int recv_pkts, recv_bds, clean_count, err;
out:
/* Make all data has been write before submit */
if (clean_count + unused_count > 0)
- hns3_nic_alloc_rx_buffers(ring,
- clean_count + unused_count);
+ hns3_nic_alloc_rx_buffers(ring, clean_count + unused_count);
return recv_pkts;
}
if (!vector)
return -ENOMEM;
+ /* save the actual available vector number */
vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
priv->vector_num = vector_num;
struct hnae3_queue *q = ring->tqp;
if (!HNAE3_IS_TX_RING(ring)) {
- hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
- (u32)dma);
+ hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma);
hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
(u32)((dma >> 31) >> 1));
ret);
return ret;
}
- hns3_replace_buffer(ring, ring->next_to_use,
- &res_cbs);
+ hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
}
ring_ptr_move_fw(ring, next_to_use);
}
if (ret) {
set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
netdev_err(kinfo->netdev,
- "hns net up fail, ret=%d!\n", ret);
+ "net up fail, ret=%d!\n", ret);
return ret;
}
}
packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE);
memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN);
+
+ /* The dst mac addr of loopback packet is the same as the host'
+ * mac addr, the SSU component may loop back the packet to host
+ * before the packet reaches mac or serdes, which will defect
+ * the purpose of mac or serdes selftest.
+ */
ethh->h_dest[5] += 0x1f;
eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP);
if (ret)
goto err_tm_pg_cmd_send;
- dev_info(&hdev->pdev->dev, "PRI_SCH pg_id: %u\n", desc.data[0]);
+ dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", desc.data[0]);
cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
if (ret)
goto err_tm_pg_cmd_send;
- dev_info(&hdev->pdev->dev, "QS_SCH pg_id: %u\n", desc.data[0]);
+ dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]);
cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
goto err_tm_pg_cmd_send;
bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
- dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_id: %u\n",
+ dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n",
bp_to_qs_map_cmd->tc_id);
- dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_shapping: 0x%x\n",
+ dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
bp_to_qs_map_cmd->qs_group_id);
dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
bp_to_qs_map_cmd->qs_bit_map);
nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id);
- dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: %u\n",
+ dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
nq_to_qs_map->qset_id);
cmd = HCLGE_OPC_TM_PG_WEIGHT;
enum hclge_err_int_type int_type)
{
struct device *dev = &hdev->pdev->dev;
- int num = 1;
+ int desc_num = 1;
int ret;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
if (flag) {
desc[0].flag |= cpu_to_le16(flag);
hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
- num = 2;
+ desc_num = 2;
}
if (w_num)
desc[0].data[w_num] = cpu_to_le32(int_type);
- ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
if (ret)
dev_err(dev, "query error cmd failed (%d)\n", ret);
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[2];
- int num = 1;
+ int desc_num = 1;
int ret;
/* configure PPU error interrupts */
desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK;
desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK;
desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK;
- num = 2;
+ desc_num = 2;
} else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
if (en)
return -EINVAL;
}
- ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
return ret;
}
int ret;
/* read overflow error status */
- ret = hclge_cmd_query_error(hdev, &desc[0],
- HCLGE_ROCEE_PF_RAS_INT_CMD,
+ ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD,
0, 0, 0);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q);
/* command : HCLGE_OPC_QUERY_IGU_STAT */
- hclge_cmd_setup_basic_desc(&desc[0],
- HCLGE_OPC_QUERY_RX_STATUS,
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
true);
desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
if (ret) {
dev_err(&hdev->pdev->dev,
"Query tqp stat fail, status = %d,queue = %d\n",
- ret, i);
+ ret, i);
return ret;
}
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ /* each tqp has TX & RX two queues */
return kinfo->num_tqps * (2);
}
return count;
}
-static void hclge_get_strings(struct hnae3_handle *handle,
- u32 stringset,
+static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
u8 *data)
{
u8 *p = (char *)data;
if (stringset == ETH_SS_STATS) {
size = ARRAY_SIZE(g_mac_stats_string);
- p = hclge_comm_get_strings(stringset,
- g_mac_stats_string,
- size,
- p);
+ p = hclge_comm_get_strings(stringset, g_mac_stats_string,
+ size, p);
p = hclge_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
- memcpy(p,
- hns3_nic_test_strs[HNAE3_LOOP_APP],
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
- memcpy(p,
- hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
p += ETH_GSTRING_LEN;
}
if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
- memcpy(p,
- hns3_nic_test_strs[HNAE3_LOOP_PHY],
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
struct hclge_dev *hdev = vport->back;
u64 *p;
- p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
- g_mac_stats_string,
- ARRAY_SIZE(g_mac_stats_string),
- data);
+ p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
+ ARRAY_SIZE(g_mac_stats_string), data);
p = hclge_tqps_get_stats(handle, p);
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "query function status failed %d.\n",
- ret);
-
+ "query function status failed %d.\n", ret);
return ret;
}
/* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors.
*/
- hdev->num_msi = hdev->num_roce_msi +
+ hdev->num_msi = hdev->num_roce_msi +
hdev->roce_base_msix_offset;
} else {
hdev->num_msi =
static u8 hclge_check_speed_dup(u8 duplex, int speed)
{
-
if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
duplex = HCLGE_MAC_FULL;
struct hnae3_client *client = hdev->nic_client;
u16 i;
- if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) ||
- !client)
+ if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
return 0;
if (!client->ops->reset_notify)
int ret = 0;
u16 i;
- if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) ||
- !client)
+ if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
return 0;
if (!client->ops->reset_notify)
return 0;
}
-static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
- int vector,
+static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
}
-static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
- int vector,
+static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
if (ret)
dev_err(&handle->pdev->dev,
"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
- vector_id,
- ret);
+ vector_id, ret);
return ret;
}
if (!hclge_fd_rule_exist(hdev, fs->location)) {
dev_err(&hdev->pdev->dev,
- "Delete fail, rule %d is inexistent\n",
- fs->location);
+ "Delete fail, rule %d is inexistent\n", fs->location);
return -ENOENT;
}
- ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
- fs->location, NULL, false);
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
+ NULL, false);
if (ret)
return ret;
is_multicast_ether_addr(addr)) {
dev_err(&hdev->pdev->dev,
"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
- addr,
- is_zero_ether_addr(addr),
+ addr, is_zero_ether_addr(addr),
is_broadcast_ether_addr(addr),
is_multicast_ether_addr(addr));
return -EINVAL;
if (is_zero_ether_addr(addr) ||
is_broadcast_ether_addr(addr) ||
is_multicast_ether_addr(addr)) {
- dev_dbg(&hdev->pdev->dev,
- "Remove mac err! invalid mac:%pM.\n",
- addr);
+ dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
+ addr);
return -EINVAL;
}
ret = hclge_init_fd_config(hdev);
if (ret) {
- dev_err(&pdev->dev,
- "fd table init fail, ret=%d\n", ret);
+ dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
return ret;
}
6 * 8, /* Port level */
6 * 256 /* Qset level */
};
- u8 ir_u_calc = 0, ir_s_calc = 0;
+ u8 ir_u_calc = 0;
+ u8 ir_s_calc = 0;
u32 ir_calc;
u32 tick;
trans_gap = pause_param->pause_trans_gap;
trans_time = le16_to_cpu(pause_param->pause_trans_time);
- return hclge_pause_param_cfg(hdev, mac_addr, trans_gap,
- trans_time);
+ return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
}
static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
struct hclge_desc desc;
opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
- HCLGE_OPC_TM_PG_C_SHAPPING;
+ HCLGE_OPC_TM_PG_C_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, opcode, false);
shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
struct hclge_desc desc;
opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
- HCLGE_OPC_TM_PRI_C_SHAPPING;
+ HCLGE_OPC_TM_PRI_C_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, opcode, false);
max_rss_size = min_t(u16, hdev->rss_size_max,
vport->alloc_tqps / kinfo->num_tc);
+ /* Set to user value, no larger than max_rss_size. */
if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
kinfo->req_rss_size <= max_rss_size) {
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
kinfo->rss_size = kinfo->req_rss_size;
} else if (kinfo->rss_size > max_rss_size ||
(!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
+ /* Set to the maximum specification value (max_rss_size). */
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
kinfo->rss_size, max_rss_size);
kinfo->rss_size = max_rss_size;
/* pg to prio */
for (i = 0; i < hdev->tm_info.num_pg; i++) {
/* Cfg dwrr */
- ret = hclge_tm_pg_weight_cfg(hdev, i,
- hdev->tm_info.pg_dwrr[i]);
+ ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
if (ret)
return ret;
}
struct hclge_mac *mac = &hdev->hw.mac;
return hclge_pause_param_cfg(hdev, mac->mac_addr,
- HCLGE_DEFAULT_PAUSE_TRANS_GAP,
- HCLGE_DEFAULT_PAUSE_TRANS_TIME);
+ HCLGE_DEFAULT_PAUSE_TRANS_GAP,
+ HCLGE_DEFAULT_PAUSE_TRANS_TIME);
}
static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
{
- u8 i, bit_map = 0;
+ u8 bit_map = 0;
+ u8 i;
hdev->tm_info.num_tc = num_tc;
HCLGEVF_TQP_INTR_GL2_REG,
HCLGEVF_TQP_INTR_RL_REG};
-static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
- struct hnae3_handle *handle)
+static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
{
if (!handle->client)
return container_of(handle, struct hclgevf_dev, nic);
if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
&hdev->reset_state)) {
/* PF has initmated that it is about to reset the hardware.
- * We now have to poll & check if harware has actually completed
- * the reset sequence. On hardware reset completion, VF needs to
- * reset the client and ae device.
+ * We now have to poll & check if hardware has actually
+ * completed the reset sequence. On hardware reset completion,
+ * VF needs to reset the client and ae device.
*/
hdev->reset_attempts = 0;
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) {
/* we could be here when either of below happens:
- * 1. reset was initiated due to watchdog timeout due to
+ * 1. reset was initiated due to watchdog timeout caused by
* a. IMP was earlier reset and our TX got choked down and
* which resulted in watchdog reacting and inducing VF
* reset. This also means our cmdq would be unreliable.
}
- /* Initialize RSS indirect table for each vport */
+ /* Initialize RSS indirect table */
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
{
- /* other vlan config(like, VLAN TX/RX offload) would also be added
- * here later
- */
return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
false);
}
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- /* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
hclgevf_request_link_info(hdev);
if (hclgevf_reset_tqp(handle, i))
break;
- /* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
hclgevf_update_link_status(hdev, 0);
}