mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-02-22 12:39:13 +07:00
net/mlx5e: kTLS, Add kTLS RX stats
Add global and per-channel ethtool SW stats for the device offload. Document the new counters in tls-offload.rst. Signed-off-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
0419d8c9d8
commit
76c1e1ac2a
@ -428,6 +428,24 @@ by the driver:
|
|||||||
which were part of a TLS stream.
|
which were part of a TLS stream.
|
||||||
* ``rx_tls_decrypted_bytes`` - number of TLS payload bytes in RX packets
|
* ``rx_tls_decrypted_bytes`` - number of TLS payload bytes in RX packets
|
||||||
which were successfully decrypted.
|
which were successfully decrypted.
|
||||||
|
* ``rx_tls_ctx`` - number of TLS RX HW offload contexts added to device for
|
||||||
|
decryption.
|
||||||
|
* ``rx_tls_del`` - number of TLS RX HW offload contexts deleted from device
|
||||||
|
(connection has finished).
|
||||||
|
* ``rx_tls_resync_req_pkt`` - number of received TLS packets with a resync
|
||||||
|
request.
|
||||||
|
* ``rx_tls_resync_req_start`` - number of times the TLS async resync request
|
||||||
|
was started.
|
||||||
|
* ``rx_tls_resync_req_end`` - number of times the TLS async resync request
|
||||||
|
properly ended with providing the HW tracked tcp-seq.
|
||||||
|
* ``rx_tls_resync_req_skip`` - number of times the TLS async resync request
|
||||||
|
procedure was started by not properly ended.
|
||||||
|
* ``rx_tls_resync_res_ok`` - number of times the TLS resync response call to
|
||||||
|
the driver was successfully handled.
|
||||||
|
* ``rx_tls_resync_res_skip`` - number of times the TLS resync response call to
|
||||||
|
the driver was terminated unsuccessfully.
|
||||||
|
* ``rx_tls_err`` - number of RX packets which were part of a TLS stream
|
||||||
|
but were not decrypted due to unexpected error in the state machine.
|
||||||
* ``tx_tls_encrypted_packets`` - number of TX packets passed to the device
|
* ``tx_tls_encrypted_packets`` - number of TX packets passed to the device
|
||||||
for encryption of their TLS payload.
|
for encryption of their TLS payload.
|
||||||
* ``tx_tls_encrypted_bytes`` - number of TLS payload bytes in TX packets
|
* ``tx_tls_encrypted_bytes`` - number of TLS payload bytes in TX packets
|
||||||
|
@ -46,6 +46,7 @@ struct mlx5e_ktls_offload_context_rx {
|
|||||||
struct tls12_crypto_info_aes_gcm_128 crypto_info;
|
struct tls12_crypto_info_aes_gcm_128 crypto_info;
|
||||||
struct accel_rule rule;
|
struct accel_rule rule;
|
||||||
struct sock *sk;
|
struct sock *sk;
|
||||||
|
struct mlx5e_rq_stats *stats;
|
||||||
struct completion add_ctx;
|
struct completion add_ctx;
|
||||||
u32 tirn;
|
u32 tirn;
|
||||||
u32 key_id;
|
u32 key_id;
|
||||||
@ -203,6 +204,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
|
|||||||
return err;
|
return err;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
|
priv_rx->stats->tls_resync_req_skip++;
|
||||||
err = PTR_ERR(cseg);
|
err = PTR_ERR(cseg);
|
||||||
complete(&priv_rx->add_ctx);
|
complete(&priv_rx->add_ctx);
|
||||||
goto unlock;
|
goto unlock;
|
||||||
@ -296,6 +298,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
|
|||||||
return cseg;
|
return cseg;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
|
priv_rx->stats->tls_resync_req_skip++;
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -362,11 +365,13 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
|
|||||||
|
|
||||||
cseg = post_static_params(sq, priv_rx);
|
cseg = post_static_params(sq, priv_rx);
|
||||||
if (IS_ERR(cseg)) {
|
if (IS_ERR(cseg)) {
|
||||||
|
priv_rx->stats->tls_resync_res_skip++;
|
||||||
err = PTR_ERR(cseg);
|
err = PTR_ERR(cseg);
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
/* Do not increment priv_rx refcnt, CQE handling is empty */
|
/* Do not increment priv_rx refcnt, CQE handling is empty */
|
||||||
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
|
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
|
||||||
|
priv_rx->stats->tls_resync_res_ok++;
|
||||||
unlock:
|
unlock:
|
||||||
spin_unlock(&c->async_icosq_lock);
|
spin_unlock(&c->async_icosq_lock);
|
||||||
|
|
||||||
@ -396,11 +401,14 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
|
|||||||
tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state);
|
tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state);
|
||||||
auth_state = MLX5_GET(tls_progress_params, ctx, auth_state);
|
auth_state = MLX5_GET(tls_progress_params, ctx, auth_state);
|
||||||
if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
|
if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
|
||||||
auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD)
|
auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
|
||||||
|
priv_rx->stats->tls_resync_req_skip++;
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
|
hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
|
||||||
tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
|
tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
|
||||||
|
priv_rx->stats->tls_resync_req_end++;
|
||||||
out:
|
out:
|
||||||
refcount_dec(&resync->refcnt);
|
refcount_dec(&resync->refcnt);
|
||||||
kfree(buf);
|
kfree(buf);
|
||||||
@ -479,6 +487,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
|
|||||||
seq = th->seq;
|
seq = th->seq;
|
||||||
datalen = skb->len - depth;
|
datalen = skb->len - depth;
|
||||||
tls_offload_rx_resync_async_request_start(sk, seq, datalen);
|
tls_offload_rx_resync_async_request_start(sk, seq, datalen);
|
||||||
|
rq->stats->tls_resync_req_start++;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
|
void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
|
||||||
@ -509,18 +518,25 @@ void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
|
|||||||
struct mlx5_cqe64 *cqe, u32 *cqe_bcnt)
|
struct mlx5_cqe64 *cqe, u32 *cqe_bcnt)
|
||||||
{
|
{
|
||||||
u8 tls_offload = get_cqe_tls_offload(cqe);
|
u8 tls_offload = get_cqe_tls_offload(cqe);
|
||||||
|
struct mlx5e_rq_stats *stats;
|
||||||
|
|
||||||
if (likely(tls_offload == CQE_TLS_OFFLOAD_NOT_DECRYPTED))
|
if (likely(tls_offload == CQE_TLS_OFFLOAD_NOT_DECRYPTED))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
stats = rq->stats;
|
||||||
|
|
||||||
switch (tls_offload) {
|
switch (tls_offload) {
|
||||||
case CQE_TLS_OFFLOAD_DECRYPTED:
|
case CQE_TLS_OFFLOAD_DECRYPTED:
|
||||||
skb->decrypted = 1;
|
skb->decrypted = 1;
|
||||||
|
stats->tls_decrypted_packets++;
|
||||||
|
stats->tls_decrypted_bytes += *cqe_bcnt;
|
||||||
break;
|
break;
|
||||||
case CQE_TLS_OFFLOAD_RESYNC:
|
case CQE_TLS_OFFLOAD_RESYNC:
|
||||||
|
stats->tls_resync_req_pkt++;
|
||||||
resync_update_sn(rq, skb);
|
resync_update_sn(rq, skb);
|
||||||
break;
|
break;
|
||||||
default: /* CQE_TLS_OFFLOAD_ERROR: */
|
default: /* CQE_TLS_OFFLOAD_ERROR: */
|
||||||
|
stats->tls_err++;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -562,12 +578,14 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
|
|||||||
|
|
||||||
priv_rx->crypto_info =
|
priv_rx->crypto_info =
|
||||||
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
|
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
|
||||||
priv_rx->sk = sk;
|
|
||||||
priv_rx->rxq = mlx5e_accel_sk_get_rxq(sk);
|
|
||||||
|
|
||||||
|
rxq = mlx5e_accel_sk_get_rxq(sk);
|
||||||
|
priv_rx->rxq = rxq;
|
||||||
|
priv_rx->sk = sk;
|
||||||
|
|
||||||
|
priv_rx->stats = &priv->channel_stats[rxq].rq;
|
||||||
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
|
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
|
||||||
|
|
||||||
rxq = priv_rx->rxq;
|
|
||||||
rqtn = priv->direct_tir[rxq].rqt.rqtn;
|
rqtn = priv->direct_tir[rxq].rqt.rqtn;
|
||||||
|
|
||||||
err = mlx5e_ktls_create_tir(mdev, &priv_rx->tirn, rqtn);
|
err = mlx5e_ktls_create_tir(mdev, &priv_rx->tirn, rqtn);
|
||||||
@ -586,6 +604,8 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
|
|||||||
if (err)
|
if (err)
|
||||||
goto err_post_wqes;
|
goto err_post_wqes;
|
||||||
|
|
||||||
|
priv_rx->stats->tls_ctx++;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_post_wqes:
|
err_post_wqes:
|
||||||
@ -646,6 +666,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
|
|||||||
refcount_dec(&resync->refcnt);
|
refcount_dec(&resync->refcnt);
|
||||||
wait_for_resync(netdev, resync);
|
wait_for_resync(netdev, resync);
|
||||||
|
|
||||||
|
priv_rx->stats->tls_del++;
|
||||||
if (priv_rx->rule.rule)
|
if (priv_rx->rule.rule)
|
||||||
mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
|
mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
|
||||||
|
|
||||||
|
@ -163,6 +163,19 @@ static const struct counter_desc sw_stats_desc[] = {
|
|||||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
|
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
|
||||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
|
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
|
||||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
|
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
|
||||||
|
#ifdef CONFIG_MLX5_EN_TLS
|
||||||
|
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
|
||||||
|
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
|
||||||
|
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) },
|
||||||
|
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) },
|
||||||
|
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
|
||||||
|
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
|
||||||
|
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
|
||||||
|
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
|
||||||
|
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
|
||||||
|
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
|
||||||
|
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
|
||||||
|
#endif
|
||||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
|
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
|
||||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
|
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
|
||||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
|
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
|
||||||
@ -275,6 +288,19 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
|
|||||||
s->rx_congst_umr += rq_stats->congst_umr;
|
s->rx_congst_umr += rq_stats->congst_umr;
|
||||||
s->rx_arfs_err += rq_stats->arfs_err;
|
s->rx_arfs_err += rq_stats->arfs_err;
|
||||||
s->rx_recover += rq_stats->recover;
|
s->rx_recover += rq_stats->recover;
|
||||||
|
#ifdef CONFIG_MLX5_EN_TLS
|
||||||
|
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
|
||||||
|
s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
|
||||||
|
s->rx_tls_ctx += rq_stats->tls_ctx;
|
||||||
|
s->rx_tls_del += rq_stats->tls_del;
|
||||||
|
s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt;
|
||||||
|
s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start;
|
||||||
|
s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
|
||||||
|
s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip;
|
||||||
|
s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok;
|
||||||
|
s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
|
||||||
|
s->rx_tls_err += rq_stats->tls_err;
|
||||||
|
#endif
|
||||||
s->ch_events += ch_stats->events;
|
s->ch_events += ch_stats->events;
|
||||||
s->ch_poll += ch_stats->poll;
|
s->ch_poll += ch_stats->poll;
|
||||||
s->ch_arm += ch_stats->arm;
|
s->ch_arm += ch_stats->arm;
|
||||||
@ -1475,6 +1501,19 @@ static const struct counter_desc rq_stats_desc[] = {
|
|||||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
|
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
|
||||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
|
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
|
||||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
|
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
|
||||||
|
#ifdef CONFIG_MLX5_EN_TLS
|
||||||
|
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
|
||||||
|
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
|
||||||
|
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) },
|
||||||
|
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) },
|
||||||
|
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
|
||||||
|
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
|
||||||
|
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
|
||||||
|
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
|
||||||
|
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
|
||||||
|
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
|
||||||
|
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct counter_desc sq_stats_desc[] = {
|
static const struct counter_desc sq_stats_desc[] = {
|
||||||
|
@ -186,6 +186,18 @@ struct mlx5e_sw_stats {
|
|||||||
u64 tx_tls_skip_no_sync_data;
|
u64 tx_tls_skip_no_sync_data;
|
||||||
u64 tx_tls_drop_no_sync_data;
|
u64 tx_tls_drop_no_sync_data;
|
||||||
u64 tx_tls_drop_bypass_req;
|
u64 tx_tls_drop_bypass_req;
|
||||||
|
|
||||||
|
u64 rx_tls_decrypted_packets;
|
||||||
|
u64 rx_tls_decrypted_bytes;
|
||||||
|
u64 rx_tls_ctx;
|
||||||
|
u64 rx_tls_del;
|
||||||
|
u64 rx_tls_resync_req_pkt;
|
||||||
|
u64 rx_tls_resync_req_start;
|
||||||
|
u64 rx_tls_resync_req_end;
|
||||||
|
u64 rx_tls_resync_req_skip;
|
||||||
|
u64 rx_tls_resync_res_ok;
|
||||||
|
u64 rx_tls_resync_res_skip;
|
||||||
|
u64 rx_tls_err;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
u64 rx_xsk_packets;
|
u64 rx_xsk_packets;
|
||||||
@ -305,6 +317,19 @@ struct mlx5e_rq_stats {
|
|||||||
u64 congst_umr;
|
u64 congst_umr;
|
||||||
u64 arfs_err;
|
u64 arfs_err;
|
||||||
u64 recover;
|
u64 recover;
|
||||||
|
#ifdef CONFIG_MLX5_EN_TLS
|
||||||
|
u64 tls_decrypted_packets;
|
||||||
|
u64 tls_decrypted_bytes;
|
||||||
|
u64 tls_ctx;
|
||||||
|
u64 tls_del;
|
||||||
|
u64 tls_resync_req_pkt;
|
||||||
|
u64 tls_resync_req_start;
|
||||||
|
u64 tls_resync_req_end;
|
||||||
|
u64 tls_resync_req_skip;
|
||||||
|
u64 tls_resync_res_ok;
|
||||||
|
u64 tls_resync_res_skip;
|
||||||
|
u64 tls_err;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5e_sq_stats {
|
struct mlx5e_sq_stats {
|
||||||
|
Loading…
Reference in New Issue
Block a user