}
static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
- u32 l234info, u32 bd_base_info)
+ u32 l234info, u32 bd_base_info, u32 ol_info)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int l3_type, l4_type;
return;
}
- ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
+ ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M,
HNS3_RXD_OL4ID_S);
switch (ol4_type) {
case HNS3_OL4_TYPE_MAC_IN_UDP:
static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
struct sk_buff *skb, u32 l234info,
- u32 bd_base_info)
+ u32 bd_base_info, u32 ol_info)
{
u16 gro_count;
u32 l3_type;
HNS3_RXD_GRO_COUNT_S);
/* if there is no HW GRO, do not set gro params */
if (!gro_count) {
- hns3_rx_checksum(ring, skb, l234info, bd_base_info);
+ hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
return 0;
}
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
enum hns3_pkt_l2t_type l2_frame_type;
- u32 bd_base_info, l234info;
+ u32 bd_base_info, l234info, ol_info;
struct hns3_desc *desc;
unsigned int len;
int pre_ntc, ret;
desc = &ring->desc[pre_ntc];
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
l234info = le32_to_cpu(desc->rx.l234_info);
+ ol_info = le32_to_cpu(desc->rx.ol_info);
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
skb->protocol = eth_type_trans(skb, netdev);
/* This is needed in order to enable forwarding support */
- ret = hns3_set_gro_and_checksum(ring, skb, l234info, bd_base_info);
+ ret = hns3_set_gro_and_checksum(ring, skb, l234info,
+ bd_base_info, ol_info);
if (unlikely(ret)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.rx_err_cnt++;