u16 *pi)
{
#ifdef CONFIG_MLX5_EN_TLS
+ u32 tls_tisn = 0;
+
if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
- if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi)))
+ /* May send SKBs and WQEs. */
+ if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &tls_tisn)))
return false;
}
+
+ *pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ *wqe = MLX5E_TX_FETCH_WQE(sq, *pi);
+
+ (*wqe)->ctrl.tisn = cpu_to_be32(tls_tisn << 8);
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx);
bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, struct mlx5e_tx_wqe **wqe,
- u16 *pi, int datalen);
+ struct sk_buff *skb, u32 *tisn, int datalen);
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc);
}
bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, struct mlx5e_tx_wqe **wqe,
- u16 *pi, int datalen)
+ struct sk_buff *skb, u32 *tisn, int datalen)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_sq_stats *stats = sq->stats;
- struct mlx5_wqe_ctrl_seg *cseg;
u32 seq;
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
- *pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
- *wqe = MLX5E_TX_FETCH_WQE(sq, *pi);
stats->tls_ctx++;
}
switch (ret) {
case MLX5E_KTLS_SYNC_DONE:
- *pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
- *wqe = MLX5E_TX_FETCH_WQE(sq, *pi);
break;
case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
if (likely(!skb->decrypted))
goto out;
WARN_ON_ONCE(1);
/* fall-through */
- default: /* MLX5E_KTLS_SYNC_FAIL */
+ case MLX5E_KTLS_SYNC_FAIL:
goto err_out;
}
}
priv_tx->expected_seq = seq + datalen;
- cseg = &(*wqe)->ctrl;
- cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
+ *tisn = priv_tx->tisn;
stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
stats->tls_encrypted_bytes += datalen;
static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe **wqe, u16 *pi,
struct mlx5e_tls *tls)
{
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
+ struct mlx5e_tx_wqe *wqe;
struct sync_info info;
struct sk_buff *nskb;
int linear_len = 0;
int headln;
+ u16 pi;
int i;
sq->stats->tls_ooo++;
sq->stats->tls_resync_bytes += nskb->len;
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn));
- mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
- *pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
- *wqe = MLX5E_TX_FETCH_WQE(sq, *pi);
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+ mlx5e_sq_xmit(sq, nskb, wqe, pi, true);
+
return true;
err_out:
}
bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, struct mlx5e_tx_wqe **wqe,
- u16 *pi)
+ struct sk_buff *skb, u32 *tisn)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_tls_offload_context_tx *context;
goto err_out;
if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx))
- return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, wqe, pi, datalen);
+ return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, tisn, datalen);
skb_seq = ntohl(tcp_hdr(skb)->seq);
context = mlx5e_get_tls_tx_context(tls_ctx);
expected_seq = context->expected_seq;
if (unlikely(expected_seq != skb_seq))
- return mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls);
+ return mlx5e_tls_handle_ooo(context, sq, skb, priv->tls);
if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
#include "en/txrx.h"
bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
- struct sk_buff *skb, struct mlx5e_tx_wqe **wqe,
- u16 *pi);
+ struct sk_buff *skb, u32 *tisn);
void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
u32 *cqe_bcnt);