#include <linux/netdevice.h>
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
-#include "en_accel/rxtx.h"
#include "en.h"
-static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb,
- struct mlx5e_txqsq *sq,
- struct net_device *dev,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi)
+static inline void
+mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
+{
+ int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
+
+ udp_hdr(skb)->len = htons(payload_len);
+}
+
+static inline struct sk_buff *
+mlx5e_accel_handle_tx(struct sk_buff *skb,
+ struct mlx5e_txqsq *sq,
+ struct net_device *dev,
+ struct mlx5e_tx_wqe **wqe,
+ u16 *pi)
{
#ifdef CONFIG_MLX5_EN_TLS
if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
}
#endif
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
- skb = mlx5e_udp_gso_handle_tx_skb(dev, sq, skb, wqe, pi);
- if (unlikely(!skb))
- return NULL;
- }
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ mlx5e_udp_gso_handle_tx_skb(skb);
return skb;
}
+++ /dev/null
-#include "en_accel/rxtx.h"
-
-static void mlx5e_udp_gso_prepare_last_skb(struct sk_buff *skb,
- struct sk_buff *nskb,
- int remaining)
-{
- int bytes_needed = remaining, remaining_headlen, remaining_page_offset;
- int headlen = skb_transport_offset(skb) + sizeof(struct udphdr);
- int payload_len = remaining + sizeof(struct udphdr);
- int k = 0, i, j;
-
- skb_copy_bits(skb, 0, nskb->data, headlen);
- nskb->dev = skb->dev;
- skb_reset_mac_header(nskb);
- skb_set_network_header(nskb, skb_network_offset(skb));
- skb_set_transport_header(nskb, skb_transport_offset(skb));
- skb_set_tail_pointer(nskb, headlen);
-
- /* How many frags do we need? */
- for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
- bytes_needed -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
- k++;
- if (bytes_needed <= 0)
- break;
- }
-
- /* Fill the first frag and split it if necessary */
- j = skb_shinfo(skb)->nr_frags - k;
- remaining_page_offset = -bytes_needed;
- skb_fill_page_desc(nskb, 0,
- skb_shinfo(skb)->frags[j].page.p,
- skb_shinfo(skb)->frags[j].page_offset + remaining_page_offset,
- skb_shinfo(skb)->frags[j].size - remaining_page_offset);
-
- skb_frag_ref(skb, j);
-
- /* Fill the rest of the frags */
- for (i = 1; i < k; i++) {
- j = skb_shinfo(skb)->nr_frags - k + i;
-
- skb_fill_page_desc(nskb, i,
- skb_shinfo(skb)->frags[j].page.p,
- skb_shinfo(skb)->frags[j].page_offset,
- skb_shinfo(skb)->frags[j].size);
- skb_frag_ref(skb, j);
- }
- skb_shinfo(nskb)->nr_frags = k;
-
- remaining_headlen = remaining - skb->data_len;
-
- /* headlen contains remaining data? */
- if (remaining_headlen > 0)
- skb_copy_bits(skb, skb->len - remaining, nskb->data + headlen,
- remaining_headlen);
- nskb->len = remaining + headlen;
- nskb->data_len = payload_len - sizeof(struct udphdr) +
- max_t(int, 0, remaining_headlen);
- nskb->protocol = skb->protocol;
- if (nskb->protocol == htons(ETH_P_IP)) {
- ip_hdr(nskb)->id = htons(ntohs(ip_hdr(nskb)->id) +
- skb_shinfo(skb)->gso_segs);
- ip_hdr(nskb)->tot_len =
- htons(payload_len + sizeof(struct iphdr));
- } else {
- ipv6_hdr(nskb)->payload_len = htons(payload_len);
- }
- udp_hdr(nskb)->len = htons(payload_len);
- skb_shinfo(nskb)->gso_size = 0;
- nskb->ip_summed = skb->ip_summed;
- nskb->csum_start = skb->csum_start;
- nskb->csum_offset = skb->csum_offset;
- nskb->queue_mapping = skb->queue_mapping;
-}
-
-/* might send skbs and update wqe and pi */
-struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe,
- u16 *pi)
-{
- int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
- int headlen = skb_transport_offset(skb) + sizeof(struct udphdr);
- int remaining = (skb->len - headlen) % skb_shinfo(skb)->gso_size;
- struct sk_buff *nskb;
-
- if (skb->protocol == htons(ETH_P_IP))
- ip_hdr(skb)->tot_len = htons(payload_len + sizeof(struct iphdr));
- else
- ipv6_hdr(skb)->payload_len = htons(payload_len);
- udp_hdr(skb)->len = htons(payload_len);
- if (!remaining)
- return skb;
-
- sq->stats->udp_seg_rem++;
- nskb = alloc_skb(max_t(int, headlen, headlen + remaining - skb->data_len), GFP_ATOMIC);
- if (unlikely(!nskb)) {
- sq->stats->dropped++;
- return NULL;
- }
-
- mlx5e_udp_gso_prepare_last_skb(skb, nskb, remaining);
-
- skb_shinfo(skb)->gso_segs--;
- pskb_trim(skb, skb->len - remaining);
- mlx5e_sq_xmit(sq, skb, *wqe, *pi);
- mlx5e_sq_fetch_wqe(sq, wqe, pi);
- return nskb;
-}
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
- netdev->hw_features |= NETIF_F_GSO_PARTIAL;
netdev->hw_enc_features |= NETIF_F_IP_CSUM;
netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
NETIF_F_GSO_GRE_CSUM;
}
+ netdev->hw_features |= NETIF_F_GSO_PARTIAL;
+ netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
+ netdev->hw_features |= NETIF_F_GSO_UDP_L4;
+ netdev->features |= NETIF_F_GSO_UDP_L4;
+
mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
if (fcs_supported)
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
- netdev->features |= NETIF_F_GSO_UDP_L4;
- netdev->hw_features |= NETIF_F_GSO_UDP_L4;
-
netdev->priv_flags |= IFF_UNICAST_FLT;
mlx5e_set_netdev_dev_addr(netdev);