net/mlx5e: kTLS, Fix corner-case checks in TX resync flow

There are the following cases:

1. Packet ends before start marker: bypass offload.
2. Packet starts before start marker and ends after it: drop,
   not supported, breaks contract with kernel.
3. packet ends before tls record info starts: drop,
   this packet was already acknowledged and its record info
   was released.

Add the above as comment in code.

Mind possible wraparounds of the TCP seq, replace the simple comparison
with a call to the TCP before() method.

In addition, remove logic that handles negative sync_len values,
as it became impossible.

Fixes: d2ead1f360 ("net/mlx5e: Add kTLS TX HW offload support")
Fixes: 46a3ea9807 ("net/mlx5e: kTLS, Enhance TX resync flow")
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Reviewed-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
Tariq Toukan 2020-01-12 16:22:14 +02:00 committed by Saeed Mahameed
parent 3b83b6c2e0
commit ffbd9ca94e

View File

@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
struct tx_sync_info {
u64 rcd_sn;
s32 sync_len;
u32 sync_len;
int nr_frags;
skb_frag_t frags[MAX_SKB_FRAGS];
};
@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval {
static enum mlx5e_ktls_sync_retval
tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
u32 tcp_seq, struct tx_sync_info *info)
u32 tcp_seq, int datalen, struct tx_sync_info *info)
{
struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
struct tls_record_info *record;
int remaining, i = 0;
unsigned long flags;
bool ends_before;
spin_lock_irqsave(&tx_ctx->lock, flags);
record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
goto out;
}
if (unlikely(tcp_seq < tls_record_start_seq(record))) {
ret = tls_record_is_start_marker(record) ?
MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
/* There are the following cases:
* 1. packet ends before start marker: bypass offload.
* 2. packet starts before start marker and ends after it: drop,
* not supported, breaks contract with kernel.
* 3. packet ends before tls record info starts: drop,
* this packet was already acknowledged and its record info
* was released.
*/
ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
if (unlikely(tls_record_is_start_marker(record))) {
ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
goto out;
} else if (ends_before) {
ret = MLX5E_KTLS_SYNC_FAIL;
goto out;
}
@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
u8 num_wqebbs;
int i = 0;
ret = tx_sync_info_get(priv_tx, seq, &info);
ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
stats->tls_skip_no_sync_data++;
@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
goto err_out;
}
if (unlikely(info.sync_len < 0)) {
if (likely(datalen <= -info.sync_len))
return MLX5E_KTLS_SYNC_DONE;
stats->tls_drop_bypass_req++;
goto err_out;
}
stats->tls_ooo++;
tx_post_resync_params(sq, priv_tx, info.rcd_sn);