return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
}
-static u32 vxge_get_rx_csum(struct net_device *dev)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- return vdev->rx_csum;
-}
-
-static int vxge_set_rx_csum(struct net_device *dev, u32 data)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- if (data)
- vdev->rx_csum = 1;
- else
- vdev->rx_csum = 0;
-
- return 0;
-}
-
-static int vxge_ethtool_op_set_tso(struct net_device *dev, u32 data)
-{
- if (data)
- dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
- else
- dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
-
- return 0;
-}
-
static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
{
struct vxgedev *vdev = netdev_priv(dev);
}
}
-static int vxge_set_flags(struct net_device *dev, u32 data)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- enum vxge_hw_status status;
-
- if (ethtool_invalid_flags(dev, data, ETH_FLAG_RXHASH))
- return -EINVAL;
-
- if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
- return 0;
-
- if (netif_running(dev) || (vdev->config.rth_steering == NO_STEERING))
- return -EINVAL;
-
- vdev->devh->config.rth_en = !!(data & ETH_FLAG_RXHASH);
-
- /* Enabling RTH requires some of the logic in vxge_device_register and a
- * vpath reset. Due to these restrictions, only allow modification
- * while the interface is down.
- */
- status = vxge_reset_all_vpaths(vdev);
- if (status != VXGE_HW_OK) {
- vdev->devh->config.rth_en = !vdev->devh->config.rth_en;
- return -EFAULT;
- }
-
- if (vdev->devh->config.rth_en)
- dev->features |= NETIF_F_RXHASH;
- else
- dev->features &= ~NETIF_F_RXHASH;
-
- return 0;
-}
-
static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
{
struct vxgedev *vdev = netdev_priv(dev);
.get_link = ethtool_op_get_link,
.get_pauseparam = vxge_ethtool_getpause_data,
.set_pauseparam = vxge_ethtool_setpause_data,
- .get_rx_csum = vxge_get_rx_csum,
- .set_rx_csum = vxge_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
- .get_tso = ethtool_op_get_tso,
- .set_tso = vxge_ethtool_op_set_tso,
.get_strings = vxge_ethtool_get_strings,
.set_phys_id = vxge_ethtool_idnic,
.get_sset_count = vxge_ethtool_get_sset_count,
.get_ethtool_stats = vxge_get_ethtool_stats,
- .set_flags = vxge_set_flags,
.flash_device = vxge_fw_flash,
};
"%s: %s:%d skb protocol = %d",
ring->ndev->name, __func__, __LINE__, skb->protocol);
- if (ring->gro_enable) {
- if (ring->vlgrp && ext_info->vlan &&
- (ring->vlan_tag_strip ==
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
- vlan_gro_receive(ring->napi_p, ring->vlgrp,
- ext_info->vlan, skb);
- else
- napi_gro_receive(ring->napi_p, skb);
- } else {
- if (ring->vlgrp && vlan &&
- (ring->vlan_tag_strip ==
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
- vlan_hwaccel_receive_skb(skb, ring->vlgrp, vlan);
- else
- netif_receive_skb(skb);
- }
+ if (ring->vlgrp && ext_info->vlan &&
+ (ring->vlan_tag_strip ==
+ VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
+ vlan_gro_receive(ring->napi_p, ring->vlgrp,
+ ext_info->vlan, skb);
+ else
+ napi_gro_receive(ring->napi_p, skb);
+
vxge_debug_entryexit(VXGE_TRACE,
"%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
}
if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
!(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
- ring->rx_csum && /* Offload Rx side CSUM */
+ (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
skb->ip_summed = CHECKSUM_UNNECESSARY;
vdev->config.fifo_indicate_max_pkts;
vpath->fifo.tx_vector_no = 0;
vpath->ring.rx_vector_no = 0;
- vpath->ring.rx_csum = vdev->rx_csum;
vpath->ring.rx_hwts = vdev->rx_hwts;
vpath->is_open = 1;
vdev->vp_handles[i] = vpath->handle;
- vpath->ring.gro_enable = vdev->config.gro_enable;
vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
vdev->stats.vpaths_open++;
} else {
mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
}
+static u32 vxge_fix_features(struct net_device *dev, u32 features)
+{
+ u32 changed = dev->features ^ features;
+
+ /* Enabling RTH requires some of the logic in vxge_device_register and a
+ * vpath reset. Due to these restrictions, only allow modification
+ * while the interface is down.
+ */
+ if ((changed & NETIF_F_RXHASH) && netif_running(dev))
+ features ^= NETIF_F_RXHASH;
+
+ return features;
+}
+
+static int vxge_set_features(struct net_device *dev, u32 features)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+ u32 changed = dev->features ^ features;
+
+ if (!(changed & NETIF_F_RXHASH))
+ return 0;
+
+ /* !netif_running() ensured by vxge_fix_features() */
+
+ vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
+ if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) {
+ dev->features = features ^ NETIF_F_RXHASH;
+ vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/**
* vxge_open
* @dev: pointer to the device structure.
.ndo_do_ioctl = vxge_ioctl,
.ndo_set_mac_address = vxge_set_mac_addr,
.ndo_change_mtu = vxge_change_mtu,
+ .ndo_fix_features = vxge_fix_features,
+ .ndo_set_features = vxge_set_features,
.ndo_vlan_rx_register = vxge_vlan_rx_register,
.ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
.ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
vdev->devh = hldev;
vdev->pdev = hldev->pdev;
memcpy(&vdev->config, config, sizeof(struct vxge_config));
- vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
vdev->rx_hwts = 0;
vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
- ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER;
+ ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_HW_VLAN_TX;
+ if (vdev->config.rth_steering != NO_STEERING)
+ ndev->hw_features |= NETIF_F_RXHASH;
+
+ ndev->features |= ndev->hw_features |
+ NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+
/* Driver entry points */
ndev->irq = vdev->pdev->irq;
ndev->base_addr = (unsigned long) hldev->bar0;
vxge_initialize_ethtool_ops(ndev);
- if (vdev->config.rth_steering != NO_STEERING) {
- ndev->features |= NETIF_F_RXHASH;
- hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
- }
-
/* Allocate memory for vpath */
vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
no_of_vpath, GFP_KERNEL);
goto _out1;
}
- ndev->features |= NETIF_F_SG;
-
- ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
"%s : checksuming enabled", __func__);
"%s : using High DMA", __func__);
}
- ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
-
- if (vdev->config.gro_enable)
- ndev->features |= NETIF_F_GRO;
-
ret = register_netdev(ndev);
if (ret) {
vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
vdev->config.tx_steering_type = 0;
}
- if (vdev->config.gro_enable) {
- vxge_debug_init(VXGE_ERR,
- "%s: Generic receive offload enabled",
- vdev->ndev->name);
- } else
- vxge_debug_init(VXGE_TRACE,
- "%s: Generic receive offload disabled",
- vdev->ndev->name);
-
if (vdev->config.addr_learn_en)
vxge_debug_init(VXGE_TRACE,
"%s: MAC Address learning enabled", vdev->ndev->name);
/* set private device info */
pci_set_drvdata(pdev, hldev);
- ll_config->gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
ll_config->addr_learn_en = addr_learn_en;
ll_config->rth_algorithm = RTH_ALG_JENKINS;