mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 15:11:48 +07:00
net/mlx5e: Return bool from TLS and IPSEC offloads
TLS and IPSEC offloads currently return struct sk_buff *, but the value is either NULL or the same skb that was passed as a parameter. Return bool instead to provide stronger guarantees to the calling code (it won't need to support handling a different SKB that could be potentially returned before this change) and to simplify restructuring this code in the following commits. Signed-off-by: Maxim Mikityanskiy <maximmi@mellanox.com> Reviewed-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
76cd622fe2
commit
f02bac9ad6
@ -102,33 +102,30 @@ mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
|
||||
udp_hdr(skb)->len = htons(payload_len);
|
||||
}
|
||||
|
||||
static inline struct sk_buff *
|
||||
mlx5e_accel_handle_tx(struct sk_buff *skb,
|
||||
struct mlx5e_txqsq *sq,
|
||||
struct net_device *dev,
|
||||
struct mlx5e_tx_wqe **wqe,
|
||||
u16 *pi)
|
||||
static inline bool mlx5e_accel_handle_tx(struct sk_buff *skb,
|
||||
struct mlx5e_txqsq *sq,
|
||||
struct net_device *dev,
|
||||
struct mlx5e_tx_wqe **wqe,
|
||||
u16 *pi)
|
||||
{
|
||||
#ifdef CONFIG_MLX5_EN_TLS
|
||||
if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
|
||||
skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi);
|
||||
if (unlikely(!skb))
|
||||
return NULL;
|
||||
if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi)))
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MLX5_EN_IPSEC
|
||||
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) {
|
||||
skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb);
|
||||
if (unlikely(!skb))
|
||||
return NULL;
|
||||
if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb)))
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
|
||||
mlx5e_udp_gso_handle_tx_skb(skb);
|
||||
|
||||
return skb;
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* __MLX5E_EN_ACCEL_H__ */
|
||||
|
@ -233,9 +233,9 @@ static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
|
||||
ntohs(mdata->content.tx.seq));
|
||||
}
|
||||
|
||||
struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
|
||||
struct mlx5e_tx_wqe *wqe,
|
||||
struct sk_buff *skb)
|
||||
bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
|
||||
struct mlx5e_tx_wqe *wqe,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||
@ -245,7 +245,7 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
|
||||
struct sec_path *sp;
|
||||
|
||||
if (!xo)
|
||||
return skb;
|
||||
return true;
|
||||
|
||||
sp = skb_sec_path(skb);
|
||||
if (unlikely(sp->len != 1)) {
|
||||
@ -281,11 +281,11 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
|
||||
sa_entry->set_iv_op(skb, x, xo);
|
||||
mlx5e_ipsec_set_metadata(skb, mdata, xo);
|
||||
|
||||
return skb;
|
||||
return true;
|
||||
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct xfrm_state *
|
||||
|
@ -52,9 +52,9 @@ void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
|
||||
struct xfrm_offload *xo);
|
||||
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
|
||||
struct xfrm_offload *xo);
|
||||
struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
|
||||
struct mlx5e_tx_wqe *wqe,
|
||||
struct sk_buff *skb);
|
||||
bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
|
||||
struct mlx5e_tx_wqe *wqe,
|
||||
struct sk_buff *skb);
|
||||
|
||||
#endif /* CONFIG_MLX5_EN_IPSEC */
|
||||
|
||||
|
@ -95,10 +95,9 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
|
||||
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
|
||||
void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx);
|
||||
|
||||
struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
|
||||
struct mlx5e_txqsq *sq,
|
||||
struct sk_buff *skb,
|
||||
struct mlx5e_tx_wqe **wqe, u16 *pi);
|
||||
bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
|
||||
struct sk_buff *skb, struct mlx5e_tx_wqe **wqe,
|
||||
u16 *pi);
|
||||
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
|
||||
struct mlx5e_tx_wqe_info *wi,
|
||||
u32 *dma_fifo_cc);
|
||||
|
@ -413,10 +413,9 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
|
||||
return MLX5E_KTLS_SYNC_FAIL;
|
||||
}
|
||||
|
||||
struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
|
||||
struct mlx5e_txqsq *sq,
|
||||
struct sk_buff *skb,
|
||||
struct mlx5e_tx_wqe **wqe, u16 *pi)
|
||||
bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
|
||||
struct sk_buff *skb, struct mlx5e_tx_wqe **wqe,
|
||||
u16 *pi)
|
||||
{
|
||||
struct mlx5e_ktls_offload_context_tx *priv_tx;
|
||||
struct mlx5e_sq_stats *stats = sq->stats;
|
||||
@ -474,9 +473,9 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
|
||||
stats->tls_encrypted_bytes += datalen;
|
||||
|
||||
out:
|
||||
return skb;
|
||||
return true;
|
||||
|
||||
err_out:
|
||||
dev_kfree_skb_any(skb);
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
@ -184,12 +184,10 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
|
||||
nskb->queue_mapping = skb->queue_mapping;
|
||||
}
|
||||
|
||||
static struct sk_buff *
|
||||
mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
|
||||
struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5e_tx_wqe **wqe,
|
||||
u16 *pi,
|
||||
struct mlx5e_tls *tls)
|
||||
static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
|
||||
struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5e_tx_wqe **wqe, u16 *pi,
|
||||
struct mlx5e_tls *tls)
|
||||
{
|
||||
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
|
||||
struct sync_info info;
|
||||
@ -217,7 +215,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
|
||||
if (likely(payload <= -info.sync_len))
|
||||
/* SKB payload doesn't require offload
|
||||
*/
|
||||
return skb;
|
||||
return true;
|
||||
|
||||
atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
|
||||
goto err_out;
|
||||
@ -250,18 +248,16 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
|
||||
mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
|
||||
*pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
|
||||
*wqe = MLX5E_TX_FETCH_WQE(sq, *pi);
|
||||
return skb;
|
||||
return true;
|
||||
|
||||
err_out:
|
||||
dev_kfree_skb_any(skb);
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
|
||||
struct mlx5e_txqsq *sq,
|
||||
struct sk_buff *skb,
|
||||
struct mlx5e_tx_wqe **wqe,
|
||||
u16 *pi)
|
||||
bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
|
||||
struct sk_buff *skb, struct mlx5e_tx_wqe **wqe,
|
||||
u16 *pi)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5e_tls_offload_context_tx *context;
|
||||
@ -270,41 +266,35 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
|
||||
int datalen;
|
||||
u32 skb_seq;
|
||||
|
||||
if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) {
|
||||
skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
|
||||
goto out;
|
||||
}
|
||||
if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx))
|
||||
return mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
|
||||
|
||||
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
|
||||
goto out;
|
||||
return true;
|
||||
|
||||
datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
|
||||
if (!datalen)
|
||||
goto out;
|
||||
return true;
|
||||
|
||||
tls_ctx = tls_get_ctx(skb->sk);
|
||||
if (unlikely(tls_ctx->netdev != netdev))
|
||||
goto out;
|
||||
return true;
|
||||
|
||||
skb_seq = ntohl(tcp_hdr(skb)->seq);
|
||||
context = mlx5e_get_tls_tx_context(tls_ctx);
|
||||
expected_seq = context->expected_seq;
|
||||
|
||||
if (unlikely(expected_seq != skb_seq)) {
|
||||
skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls);
|
||||
goto out;
|
||||
}
|
||||
if (unlikely(expected_seq != skb_seq))
|
||||
return mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls);
|
||||
|
||||
if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
|
||||
atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = NULL;
|
||||
goto out;
|
||||
return false;
|
||||
}
|
||||
|
||||
context->expected_seq = skb_seq + datalen;
|
||||
out:
|
||||
return skb;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int tls_update_resync_sn(struct net_device *netdev,
|
||||
|
@ -40,11 +40,9 @@
|
||||
#include "en.h"
|
||||
#include "en/txrx.h"
|
||||
|
||||
struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
|
||||
struct mlx5e_txqsq *sq,
|
||||
struct sk_buff *skb,
|
||||
struct mlx5e_tx_wqe **wqe,
|
||||
u16 *pi);
|
||||
bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
|
||||
struct sk_buff *skb, struct mlx5e_tx_wqe **wqe,
|
||||
u16 *pi);
|
||||
|
||||
void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
|
||||
u32 *cqe_bcnt);
|
||||
|
@ -394,8 +394,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
|
||||
|
||||
/* might send skbs and update wqe and pi */
|
||||
skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
|
||||
if (unlikely(!skb))
|
||||
if (unlikely(!mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi)))
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
|
||||
|
Loading…
Reference in New Issue
Block a user