Merge branch 'net-tls-add-ctrl-path-tracing-and-statistics'

Jakub Kicinski says:

====================
net/tls: add ctrl path tracing and statistics

This set adds trace events related to TLS offload and basic MIB stats
for TLS.

First patch contains the TLS offload related trace points. Those are
helpful in troubleshooting offload issues, especially around the
resync paths.

Second patch adds a tracepoint to the fastpath of device offload,
it's separated out in case there will be objections to adding
fast path tracepoints. Again, it's quite useful for debugging
offload issues.

Next four patches add MIB statistics. The statistics are implemented
as per-cpu per-netns counters. Since there are currently no fast path
statistics we could move to atomic variables. Per-CPU seem more common.

Most basic statistics are number of created and live sessions, broken
out to offloaded and non-offloaded. Users seem to like those a lot.

Next there is a statistic for decryption errors. These are primarily
useful for device offload debug, in normal deployments decryption
errors should not be common.

Last but not least a counter for device RX resync.
====================

Reviewed-by: Simon Horman <simon.horman@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-10-05 16:29:01 -07:00
commit 128d23c3a8
13 changed files with 425 additions and 15 deletions

View File

@ -213,3 +213,29 @@ A patchset to OpenSSL to use ktls as the record layer is
of calling send directly after a handshake using gnutls.
Since it doesn't implement a full record layer, control
messages are not supported.
Statistics
==========
TLS implementation exposes the following per-namespace statistics
(``/proc/net/tls_stat``):
- ``TlsCurrTxSw``, ``TlsCurrRxSw`` -
number of TX and RX sessions currently installed where host handles
cryptography
- ``TlsCurrTxDevice``, ``TlsCurrRxDevice`` -
number of TX and RX sessions currently installed where NIC handles
cryptography
- ``TlsTxSw``, ``TlsRxSw`` -
number of TX and RX sessions opened with host cryptography
- ``TlsTxDevice``, ``TlsRxDevice`` -
number of TX and RX sessions opened with NIC cryptography
- ``TlsDecryptError`` -
record decryption failed (e.g. due to incorrect authentication tag)
- ``TlsDeviceRxResync`` -
number of RX resyncs sent to NICs handling cryptography

View File

@ -872,7 +872,8 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
/* jump forward, a TX may have gotten lost, need to sync TX */
if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4)
tls_offload_tx_resync_request(nskb->sk);
tls_offload_tx_resync_request(nskb->sk, seq,
ntls->next_seq);
*nr_frags = 0;
return nskb;

View File

@ -24,6 +24,9 @@ struct netns_mib {
#ifdef CONFIG_XFRM_STATISTICS
DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics);
#endif
#if IS_ENABLED(CONFIG_TLS)
DEFINE_SNMP_STAT(struct linux_tls_mib, tls_statistics);
#endif
};
#endif

View File

@ -111,6 +111,12 @@ struct linux_xfrm_mib {
unsigned long mibs[LINUX_MIB_XFRMMAX];
};
/* Linux TLS */
#define LINUX_MIB_TLSMAX __LINUX_MIB_TLSMAX
struct linux_tls_mib {
unsigned long mibs[LINUX_MIB_TLSMAX];
};
#define DEFINE_SNMP_STAT(type, name) \
__typeof__(type) __percpu *name
#define DEFINE_SNMP_STAT_ATOMIC(type, name) \

View File

@ -43,6 +43,7 @@
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
#include <net/net_namespace.h>
#include <net/tcp.h>
#include <net/strparser.h>
#include <crypto/aead.h>
@ -73,6 +74,15 @@
*/
#define TLS_AES_CCM_IV_B0_BYTE 2
#define __TLS_INC_STATS(net, field) \
__SNMP_INC_STATS((net)->mib.tls_statistics, field)
#define TLS_INC_STATS(net, field) \
SNMP_INC_STATS((net)->mib.tls_statistics, field)
#define __TLS_DEC_STATS(net, field) \
__SNMP_DEC_STATS((net)->mib.tls_statistics, field)
#define TLS_DEC_STATS(net, field) \
SNMP_DEC_STATS((net)->mib.tls_statistics, field)
enum {
TLS_BASE,
TLS_SW,
@ -594,13 +604,6 @@ tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
tls_offload_ctx_rx(tls_ctx)->resync_type = type;
}
static inline void tls_offload_tx_resync_request(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
}
/* Driver's seq tracking has to be disabled until resync succeeded */
static inline bool tls_offload_tx_resync_pending(struct sock *sk)
{
@ -612,6 +615,9 @@ static inline bool tls_offload_tx_resync_pending(struct sock *sk)
return ret;
}
int __net_init tls_proc_init(struct net *net);
void __net_exit tls_proc_fini(struct net *net);
int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
unsigned char *record_type);
int decrypt_skb(struct sock *sk, struct sk_buff *skb,
@ -634,6 +640,7 @@ void tls_device_free_resources_tx(struct sock *sk);
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
void tls_device_offload_cleanup_rx(struct sock *sk);
void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
#else
static inline void tls_device_init(void) {}

View File

@ -323,4 +323,21 @@ enum
__LINUX_MIB_XFRMMAX
};
/* linux TLS mib definitions */
enum
{
LINUX_MIB_TLSNUM = 0,
LINUX_MIB_TLSCURRTXSW, /* TlsCurrTxSw */
LINUX_MIB_TLSCURRRXSW, /* TlsCurrRxSw */
LINUX_MIB_TLSCURRTXDEVICE, /* TlsCurrTxDevice */
LINUX_MIB_TLSCURRRXDEVICE, /* TlsCurrRxDevice */
LINUX_MIB_TLSTXSW, /* TlsTxSw */
LINUX_MIB_TLSRXSW, /* TlsRxSw */
LINUX_MIB_TLSTXDEVICE, /* TlsTxDevice */
LINUX_MIB_TLSRXDEVICE, /* TlsRxDevice */
LINUX_MIB_TLSDECRYPTERROR, /* TlsDecryptError */
LINUX_MIB_TLSRXDEVICERESYNC, /* TlsRxDeviceResync */
__LINUX_MIB_TLSMAX
};
#endif /* _LINUX_SNMP_H */

View File

@ -3,9 +3,11 @@
# Makefile for the TLS subsystem.
#
CFLAGS_trace.o := -I$(src)
obj-$(CONFIG_TLS) += tls.o
tls-y := tls_main.o tls_sw.o
tls-y := tls_main.o tls_sw.o tls_proc.o trace.o
tls-$(CONFIG_TLS_TOE) += tls_toe.o
tls-$(CONFIG_TLS_DEVICE) += tls_device.o tls_device_fallback.o

View File

@ -38,6 +38,8 @@
#include <net/tcp.h>
#include <net/tls.h>
#include "trace.h"
/* device_offload_lock is used to synchronize tls_dev_add
* against NETDEV_DOWN notifications.
*/
@ -202,6 +204,15 @@ void tls_device_free_resources_tx(struct sock *sk)
tls_free_partial_record(sk, tls_ctx);
}
void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
trace_tls_device_tx_resync_req(sk, got_seq, exp_seq);
WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
}
EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request);
static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
u32 seq)
{
@ -216,6 +227,7 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
rcd_sn = tls_ctx->tx.rec_seq;
trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
down_read(&device_offload_lock);
netdev = tls_ctx->netdev;
if (netdev)
@ -637,15 +649,19 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
static void tls_device_resync_rx(struct tls_context *tls_ctx,
struct sock *sk, u32 seq, u8 *rcd_sn)
{
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
struct net_device *netdev;
if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
return;
trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
netdev = READ_ONCE(tls_ctx->netdev);
if (netdev)
netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
TLS_OFFLOAD_CTX_DIR_RX);
clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
}
void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
@ -653,8 +669,8 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx;
u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
u32 sock_data, is_req_pending;
struct tls_prot_info *prot;
u32 is_req_pending;
s64 resync_req;
u32 req_seq;
@ -683,8 +699,12 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
/* head of next rec is already in, note that the sock_inq will
* include the currently parsed message when called from parser
*/
if (tcp_inq(sk) > rcd_len)
sock_data = tcp_inq(sk);
if (sock_data > rcd_len) {
trace_tls_device_rx_resync_nh_delay(sk, sock_data,
rcd_len);
return;
}
rx_ctx->resync_nh_do_now = 0;
seq += rcd_len;
@ -728,6 +748,7 @@ static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
/* head of next rec is already in, parser will sync for us */
if (tcp_inq(sk) > rxm->full_len) {
trace_tls_device_rx_resync_nh_schedule(sk);
ctx->resync_nh_do_now = 1;
} else {
struct tls_prot_info *prot = &tls_ctx->prot_info;
@ -830,6 +851,7 @@ int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
struct strp_msg *rxm = strp_msg(skb);
int is_decrypted = skb->decrypted;
int is_encrypted = !is_decrypted;
struct sk_buff *skb_iter;
@ -840,6 +862,10 @@ int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
is_encrypted &= !skb_iter->decrypted;
}
trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
tls_ctx->rx.rec_seq, rxm->full_len,
is_encrypted, is_decrypted);
ctx->sw.decrypted |= is_decrypted;
/* Return immediately if the record is either entirely plaintext or
@ -1013,6 +1039,8 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
&ctx->crypto_send.info,
tcp_sk(sk)->write_seq);
trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
tcp_sk(sk)->write_seq, rec_seq, rc);
if (rc)
goto release_lock;
@ -1049,6 +1077,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
{
struct tls12_crypto_info_aes_gcm_128 *info;
struct tls_offload_context_rx *context;
struct net_device *netdev;
int rc = 0;
@ -1096,6 +1125,9 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
&ctx->crypto_recv.info,
tcp_sk(sk)->copied_seq);
info = (void *)&ctx->crypto_recv.info;
trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX,
tcp_sk(sk)->copied_seq, info->rec_seq, rc);
if (rc)
goto free_sw_resources;

View File

@ -41,6 +41,7 @@
#include <linux/inetdevice.h>
#include <linux/inet_diag.h>
#include <net/snmp.h>
#include <net/tls.h>
#include <net/tls_toe.h>
@ -285,14 +286,19 @@ static void tls_sk_proto_cleanup(struct sock *sk,
kfree(ctx->tx.rec_seq);
kfree(ctx->tx.iv);
tls_sw_release_resources_tx(sk);
TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
} else if (ctx->tx_conf == TLS_HW) {
tls_device_free_resources_tx(sk);
TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
}
if (ctx->rx_conf == TLS_SW)
if (ctx->rx_conf == TLS_SW) {
tls_sw_release_resources_rx(sk);
else if (ctx->rx_conf == TLS_HW)
TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
} else if (ctx->rx_conf == TLS_HW) {
tls_device_offload_cleanup_rx(sk);
TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
}
}
static void tls_sk_proto_close(struct sock *sk, long timeout)
@ -533,19 +539,29 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
if (tx) {
rc = tls_set_device_offload(sk, ctx);
conf = TLS_HW;
if (rc) {
if (!rc) {
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXDEVICE);
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
} else {
rc = tls_set_sw_offload(sk, ctx, 1);
if (rc)
goto err_crypto_info;
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXSW);
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
conf = TLS_SW;
}
} else {
rc = tls_set_device_offload_rx(sk, ctx);
conf = TLS_HW;
if (rc) {
if (!rc) {
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICE);
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
} else {
rc = tls_set_sw_offload(sk, ctx, 0);
if (rc)
goto err_crypto_info;
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXSW);
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
conf = TLS_SW;
}
tls_sw_strparser_arm(sk, ctx);
@ -795,6 +811,35 @@ static size_t tls_get_info_size(const struct sock *sk)
return size;
}
static int __net_init tls_init_net(struct net *net)
{
int err;
net->mib.tls_statistics = alloc_percpu(struct linux_tls_mib);
if (!net->mib.tls_statistics)
return -ENOMEM;
err = tls_proc_init(net);
if (err)
goto err_free_stats;
return 0;
err_free_stats:
free_percpu(net->mib.tls_statistics);
return err;
}
static void __net_exit tls_exit_net(struct net *net)
{
tls_proc_fini(net);
free_percpu(net->mib.tls_statistics);
}
static struct pernet_operations tls_proc_ops = {
.init = tls_init_net,
.exit = tls_exit_net,
};
static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
.name = "tls",
.owner = THIS_MODULE,
@ -806,6 +851,12 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
static int __init tls_register(void)
{
int err;
err = register_pernet_subsys(&tls_proc_ops);
if (err)
return err;
tls_sw_proto_ops = inet_stream_ops;
tls_sw_proto_ops.splice_read = tls_sw_splice_read;
@ -819,6 +870,7 @@ static void __exit tls_unregister(void)
{
tcp_unregister_ulp(&tcp_tls_ulp_ops);
tls_device_cleanup();
unregister_pernet_subsys(&tls_proc_ops);
}
module_init(tls_register);

47
net/tls/tls_proc.c Normal file
View File

@ -0,0 +1,47 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2019 Netronome Systems, Inc. */
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/snmp.h>
#include <net/tls.h>
static const struct snmp_mib tls_mib_list[] = {
SNMP_MIB_ITEM("TlsCurrTxSw", LINUX_MIB_TLSCURRTXSW),
SNMP_MIB_ITEM("TlsCurrRxSw", LINUX_MIB_TLSCURRRXSW),
SNMP_MIB_ITEM("TlsCurrTxDevice", LINUX_MIB_TLSCURRTXDEVICE),
SNMP_MIB_ITEM("TlsCurrRxDevice", LINUX_MIB_TLSCURRRXDEVICE),
SNMP_MIB_ITEM("TlsTxSw", LINUX_MIB_TLSTXSW),
SNMP_MIB_ITEM("TlsRxSw", LINUX_MIB_TLSRXSW),
SNMP_MIB_ITEM("TlsTxDevice", LINUX_MIB_TLSTXDEVICE),
SNMP_MIB_ITEM("TlsRxDevice", LINUX_MIB_TLSRXDEVICE),
SNMP_MIB_ITEM("TlsDecryptError", LINUX_MIB_TLSDECRYPTERROR),
SNMP_MIB_ITEM("TlsRxDeviceResync", LINUX_MIB_TLSRXDEVICERESYNC),
SNMP_MIB_SENTINEL
};
static int tls_statistics_seq_show(struct seq_file *seq, void *v)
{
unsigned long buf[LINUX_MIB_TLSMAX] = {};
struct net *net = seq->private;
int i;
snmp_get_cpu_field_batch(buf, tls_mib_list, net->mib.tls_statistics);
for (i = 0; tls_mib_list[i].name; i++)
seq_printf(seq, "%-32s\t%lu\n", tls_mib_list[i].name, buf[i]);
return 0;
}
int __net_init tls_proc_init(struct net *net)
{
if (!proc_create_net_single("tls_stat", 0444, net->proc_net,
tls_statistics_seq_show, NULL))
return -ENOMEM;
return 0;
}
void __net_exit tls_proc_fini(struct net *net)
{
remove_proc_entry("tls_stat", net->proc_net);
}

View File

@ -168,6 +168,9 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
/* Propagate if there was an err */
if (err) {
if (err == -EBADMSG)
TLS_INC_STATS(sock_net(skb->sk),
LINUX_MIB_TLSDECRYPTERROR);
ctx->async_wait.err = err;
tls_err_abort(skb->sk, err);
} else {
@ -253,6 +256,8 @@ static int tls_do_decryption(struct sock *sk,
return ret;
ret = crypto_wait_req(ret, &ctx->async_wait);
} else if (ret == -EBADMSG) {
TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
}
if (async)

10
net/tls/trace.c Normal file
View File

@ -0,0 +1,10 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2019 Netronome Systems, Inc. */
#include <linux/module.h>
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "trace.h"
#endif

202
net/tls/trace.h Normal file
View File

@ -0,0 +1,202 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (C) 2019 Netronome Systems, Inc. */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM tls
#if !defined(_TLS_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _TLS_TRACE_H_
#include <asm/unaligned.h>
#include <linux/tracepoint.h>
struct sock;
TRACE_EVENT(tls_device_offload_set,
TP_PROTO(struct sock *sk, int dir, u32 tcp_seq, u8 *rec_no, int ret),
TP_ARGS(sk, dir, tcp_seq, rec_no, ret),
TP_STRUCT__entry(
__field( struct sock *, sk )
__field( u64, rec_no )
__field( int, dir )
__field( u32, tcp_seq )
__field( int, ret )
),
TP_fast_assign(
__entry->sk = sk;
__entry->rec_no = get_unaligned_be64(rec_no);
__entry->dir = dir;
__entry->tcp_seq = tcp_seq;
__entry->ret = ret;
),
TP_printk(
"sk=%p direction=%d tcp_seq=%u rec_no=%llu ret=%d",
__entry->sk, __entry->dir, __entry->tcp_seq, __entry->rec_no,
__entry->ret
)
);
TRACE_EVENT(tls_device_decrypted,
TP_PROTO(struct sock *sk, u32 tcp_seq, u8 *rec_no, u32 rec_len,
bool encrypted, bool decrypted),
TP_ARGS(sk, tcp_seq, rec_no, rec_len, encrypted, decrypted),
TP_STRUCT__entry(
__field( struct sock *, sk )
__field( u64, rec_no )
__field( u32, tcp_seq )
__field( u32, rec_len )
__field( bool, encrypted )
__field( bool, decrypted )
),
TP_fast_assign(
__entry->sk = sk;
__entry->rec_no = get_unaligned_be64(rec_no);
__entry->tcp_seq = tcp_seq;
__entry->rec_len = rec_len;
__entry->encrypted = encrypted;
__entry->decrypted = decrypted;
),
TP_printk(
"sk=%p tcp_seq=%u rec_no=%llu len=%u encrypted=%d decrypted=%d",
__entry->sk, __entry->tcp_seq,
__entry->rec_no, __entry->rec_len,
__entry->encrypted, __entry->decrypted
)
);
TRACE_EVENT(tls_device_rx_resync_send,
TP_PROTO(struct sock *sk, u32 tcp_seq, u8 *rec_no, int sync_type),
TP_ARGS(sk, tcp_seq, rec_no, sync_type),
TP_STRUCT__entry(
__field( struct sock *, sk )
__field( u64, rec_no )
__field( u32, tcp_seq )
__field( int, sync_type )
),
TP_fast_assign(
__entry->sk = sk;
__entry->rec_no = get_unaligned_be64(rec_no);
__entry->tcp_seq = tcp_seq;
__entry->sync_type = sync_type;
),
TP_printk(
"sk=%p tcp_seq=%u rec_no=%llu sync_type=%d",
__entry->sk, __entry->tcp_seq, __entry->rec_no,
__entry->sync_type
)
);
TRACE_EVENT(tls_device_rx_resync_nh_schedule,
TP_PROTO(struct sock *sk),
TP_ARGS(sk),
TP_STRUCT__entry(
__field( struct sock *, sk )
),
TP_fast_assign(
__entry->sk = sk;
),
TP_printk(
"sk=%p", __entry->sk
)
);
TRACE_EVENT(tls_device_rx_resync_nh_delay,
TP_PROTO(struct sock *sk, u32 sock_data, u32 rec_len),
TP_ARGS(sk, sock_data, rec_len),
TP_STRUCT__entry(
__field( struct sock *, sk )
__field( u32, sock_data )
__field( u32, rec_len )
),
TP_fast_assign(
__entry->sk = sk;
__entry->sock_data = sock_data;
__entry->rec_len = rec_len;
),
TP_printk(
"sk=%p sock_data=%u rec_len=%u",
__entry->sk, __entry->sock_data, __entry->rec_len
)
);
TRACE_EVENT(tls_device_tx_resync_req,
TP_PROTO(struct sock *sk, u32 tcp_seq, u32 exp_tcp_seq),
TP_ARGS(sk, tcp_seq, exp_tcp_seq),
TP_STRUCT__entry(
__field( struct sock *, sk )
__field( u32, tcp_seq )
__field( u32, exp_tcp_seq )
),
TP_fast_assign(
__entry->sk = sk;
__entry->tcp_seq = tcp_seq;
__entry->exp_tcp_seq = exp_tcp_seq;
),
TP_printk(
"sk=%p tcp_seq=%u exp_tcp_seq=%u",
__entry->sk, __entry->tcp_seq, __entry->exp_tcp_seq
)
);
TRACE_EVENT(tls_device_tx_resync_send,
TP_PROTO(struct sock *sk, u32 tcp_seq, u8 *rec_no),
TP_ARGS(sk, tcp_seq, rec_no),
TP_STRUCT__entry(
__field( struct sock *, sk )
__field( u64, rec_no )
__field( u32, tcp_seq )
),
TP_fast_assign(
__entry->sk = sk;
__entry->rec_no = get_unaligned_be64(rec_no);
__entry->tcp_seq = tcp_seq;
),
TP_printk(
"sk=%p tcp_seq=%u rec_no=%llu",
__entry->sk, __entry->tcp_seq, __entry->rec_no
)
);
#endif /* _TLS_TRACE_H_ */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>