{"rx_header_split", IXGBE_STAT(rx_hdr_split)},
{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
+ {"lro_aggregated", IXGBE_STAT(lro_aggregated)},
+ {"lro_flushed", IXGBE_STAT(lro_flushed)},
};
#define IXGBE_QUEUE_STATS_LEN \
int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
int j, k;
int i;
+ u64 aggregated = 0, flushed = 0, no_desc = 0;
ixgbe_update_stats(adapter);
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
i += k;
}
for (j = 0; j < adapter->num_rx_queues; j++) {
+ aggregated += adapter->rx_ring[j].lro_mgr.stats.aggregated;
+ flushed += adapter->rx_ring[j].lro_mgr.stats.flushed;
+ no_desc += adapter->rx_ring[j].lro_mgr.stats.no_desc;
queue_stat = (u64 *)&adapter->rx_ring[j].stats;
for (k = 0; k < stat_count; k++)
data[i + k] = queue_stat[k];
i += k;
}
+ adapter->lro_aggregated = aggregated;
+ adapter->lro_flushed = flushed;
+ adapter->lro_no_desc = no_desc;
}
static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
.get_ethtool_stats = ixgbe_get_ethtool_stats,
.get_coalesce = ixgbe_get_coalesce,
.set_coalesce = ixgbe_set_coalesce,
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = ethtool_op_set_flags,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
* ixgbe_receive_skb - Send a completed packet up the stack
* @adapter: board private structure
* @skb: packet to send up
- * @is_vlan: packet has a VLAN tag
- * @tag: VLAN tag from descriptor
+ * @status: hardware indication of status of receive
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ * @rx_desc: rx descriptor
**/
static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
- struct sk_buff *skb, bool is_vlan,
- u16 tag)
+ struct sk_buff *skb, u8 status,
+ struct ixgbe_ring *ring,
+ union ixgbe_adv_rx_desc *rx_desc)
{
- if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
- if (adapter->vlgrp && is_vlan)
- vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
- else
- netif_receive_skb(skb);
- } else {
+ bool is_vlan = (status & IXGBE_RXD_STAT_VP);
+ u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+ if (adapter->netdev->features & NETIF_F_LRO &&
+ skb->ip_summed == CHECKSUM_UNNECESSARY) {
if (adapter->vlgrp && is_vlan)
- vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+ lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
+ adapter->vlgrp, tag,
+ rx_desc);
else
- netif_rx(skb);
+ lro_receive_skb(&ring->lro_mgr, skb, rx_desc);
+ ring->lro_used = true;
+ } else {
+ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
+ if (adapter->vlgrp && is_vlan)
+ vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
+ else
+ netif_receive_skb(skb);
+ } else {
+ if (adapter->vlgrp && is_vlan)
+ vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+ else
+ netif_rx(skb);
+ }
}
}
struct sk_buff *skb;
unsigned int i;
u32 upper_len, len, staterr;
- u16 hdr_info, vlan_tag;
- bool is_vlan, cleaned = false;
+ u16 hdr_info;
+ bool cleaned = false;
int cleaned_count = 0;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
rx_buffer_info = &rx_ring->rx_buffer_info[i];
- is_vlan = (staterr & IXGBE_RXD_STAT_VP);
- vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan);
while (staterr & IXGBE_RXD_STAT_DD) {
if (*work_done >= work_to_do)
total_rx_packets++;
skb->protocol = eth_type_trans(skb, netdev);
- ixgbe_receive_skb(adapter, skb, is_vlan, vlan_tag);
+ ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
netdev->last_rx = jiffies;
next_desc:
rx_buffer_info = next_buffer;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
- is_vlan = (staterr & IXGBE_RXD_STAT_VP);
- vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+ }
+
+ if (rx_ring->lro_used) {
+ lro_flush_all(&rx_ring->lro_mgr);
+ rx_ring->lro_used = false;
}
rx_ring->next_to_clean = i;
(((S) & (PAGE_SIZE - 1)) ? 1 : 0))
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+/**
+ * ixgbe_get_skb_hdr - helper function for LRO header processing
+ * @skb: pointer to sk_buff to be added to LRO packet
+ * @iphdr: pointer to tcp header structure
+ * @tcph: pointer to tcp header structure
+ * @hdr_flags: pointer to header flags
+ * @priv: private data
+ **/
+static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
+ u64 *hdr_flags, void *priv)
+{
+ union ixgbe_adv_rx_desc *rx_desc = priv;
+
+ /* Verify that this is a valid IPv4 TCP packet */
+ if (!(rx_desc->wb.lower.lo_dword.pkt_info &
+ (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)))
+ return -1;
+
+ /* Set network headers */
+ skb_reset_network_header(skb);
+ skb_set_transport_header(skb, ip_hdrlen(skb));
+ *iphdr = ip_hdr(skb);
+ *tcph = tcp_hdr(skb);
+ *hdr_flags = LRO_IPV4 | LRO_TCP;
+ return 0;
+}
+
/**
* ixgbe_configure_rx - Configure 8254x Receive Unit after Reset
* @adapter: board private structure
adapter->rx_ring[i].tail = IXGBE_RDT(i);
}
+ /* Intitial LRO Settings */
+ adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
+ adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
+ adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
+ adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
+ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
+ adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
+ adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
+ adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
+ adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
/* Fill out redirection table */
for (i = 0, j = 0; i < 128; i++, j++) {
struct pci_dev *pdev = adapter->pdev;
int size;
+ size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
+ rxdr->lro_mgr.lro_arr = vmalloc(size);
+ if (!rxdr->lro_mgr.lro_arr)
+ return -ENOMEM;
+ memset(rxdr->lro_mgr.lro_arr, 0, size);
+
size = sizeof(struct ixgbe_rx_buffer) * rxdr->count;
rxdr->rx_buffer_info = vmalloc(size);
if (!rxdr->rx_buffer_info) {
DPRINTK(PROBE, ERR,
"vmalloc allocation failed for the rx desc ring\n");
- return -ENOMEM;
+ goto alloc_failed;
}
memset(rxdr->rx_buffer_info, 0, size);
DPRINTK(PROBE, ERR,
"Memory allocation failed for the rx desc ring\n");
vfree(rxdr->rx_buffer_info);
- return -ENOMEM;
+ goto alloc_failed;
}
rxdr->next_to_clean = 0;
rxdr->next_to_use = 0;
return 0;
+
+alloc_failed:
+ vfree(rxdr->lro_mgr.lro_arr);
+ rxdr->lro_mgr.lro_arr = NULL;
+ return -ENOMEM;
}
/**
{
struct pci_dev *pdev = adapter->pdev;
+ vfree(rx_ring->lro_mgr.lro_arr);
+ rx_ring->lro_mgr.lro_arr = NULL;
+
ixgbe_clean_rx_ring(adapter, rx_ring);
vfree(rx_ring->rx_buffer_info);
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
+ netdev->features |= NETIF_F_LRO;
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;