mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 17:47:38 +07:00
Merge branch 'u64_stats_t'
Eric Dumazet says: ==================== net: introduce u64_stats_t KCSAN found a data-race in per-cpu u64 stats accounting. (The stack traces are included in the 8th patch : tun: switch to u64_stats_t) This patch series first consolidate code in five patches. Then the last three patches address the data-race resolution. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
9dfd871481
@ -51,41 +51,15 @@ static void set_multicast_list(struct net_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
struct pcpu_dstats {
|
||||
u64 tx_packets;
|
||||
u64 tx_bytes;
|
||||
struct u64_stats_sync syncp;
|
||||
};
|
||||
|
||||
static void dummy_get_stats64(struct net_device *dev,
|
||||
struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
const struct pcpu_dstats *dstats;
|
||||
u64 tbytes, tpackets;
|
||||
unsigned int start;
|
||||
|
||||
dstats = per_cpu_ptr(dev->dstats, i);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&dstats->syncp);
|
||||
tbytes = dstats->tx_bytes;
|
||||
tpackets = dstats->tx_packets;
|
||||
} while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
|
||||
stats->tx_bytes += tbytes;
|
||||
stats->tx_packets += tpackets;
|
||||
}
|
||||
dev_lstats_read(dev, &stats->tx_packets, &stats->tx_bytes);
|
||||
}
|
||||
|
||||
static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
|
||||
|
||||
u64_stats_update_begin(&dstats->syncp);
|
||||
dstats->tx_packets++;
|
||||
dstats->tx_bytes += skb->len;
|
||||
u64_stats_update_end(&dstats->syncp);
|
||||
dev_lstats_add(dev, skb->len);
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
dev_kfree_skb(skb);
|
||||
@ -94,8 +68,8 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
static int dummy_dev_init(struct net_device *dev)
|
||||
{
|
||||
dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
|
||||
if (!dev->dstats)
|
||||
dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
|
||||
if (!dev->lstats)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
@ -103,7 +77,7 @@ static int dummy_dev_init(struct net_device *dev)
|
||||
|
||||
static void dummy_dev_uninit(struct net_device *dev)
|
||||
{
|
||||
free_percpu(dev->dstats);
|
||||
free_percpu(dev->lstats);
|
||||
}
|
||||
|
||||
static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
|
||||
|
@ -68,7 +68,6 @@ EXPORT_SYMBOL(blackhole_netdev);
|
||||
static netdev_tx_t loopback_xmit(struct sk_buff *skb,
|
||||
struct net_device *dev)
|
||||
{
|
||||
struct pcpu_lstats *lb_stats;
|
||||
int len;
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
@ -85,27 +84,20 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
|
||||
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
|
||||
/* it's OK to use per_cpu_ptr() because BHs are off */
|
||||
lb_stats = this_cpu_ptr(dev->lstats);
|
||||
|
||||
len = skb->len;
|
||||
if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
|
||||
u64_stats_update_begin(&lb_stats->syncp);
|
||||
lb_stats->bytes += len;
|
||||
lb_stats->packets++;
|
||||
u64_stats_update_end(&lb_stats->syncp);
|
||||
}
|
||||
if (likely(netif_rx(skb) == NET_RX_SUCCESS))
|
||||
dev_lstats_add(dev, len);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static void loopback_get_stats64(struct net_device *dev,
|
||||
struct rtnl_link_stats64 *stats)
|
||||
void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes)
|
||||
{
|
||||
u64 bytes = 0;
|
||||
u64 packets = 0;
|
||||
int i;
|
||||
|
||||
*packets = 0;
|
||||
*bytes = 0;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
const struct pcpu_lstats *lb_stats;
|
||||
u64 tbytes, tpackets;
|
||||
@ -114,12 +106,22 @@ static void loopback_get_stats64(struct net_device *dev,
|
||||
lb_stats = per_cpu_ptr(dev->lstats, i);
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&lb_stats->syncp);
|
||||
tbytes = lb_stats->bytes;
|
||||
tpackets = lb_stats->packets;
|
||||
tpackets = u64_stats_read(&lb_stats->packets);
|
||||
tbytes = u64_stats_read(&lb_stats->bytes);
|
||||
} while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start));
|
||||
bytes += tbytes;
|
||||
packets += tpackets;
|
||||
*bytes += tbytes;
|
||||
*packets += tpackets;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(dev_lstats_read);
|
||||
|
||||
static void loopback_get_stats64(struct net_device *dev,
|
||||
struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
u64 packets, bytes;
|
||||
|
||||
dev_lstats_read(dev, &packets, &bytes);
|
||||
|
||||
stats->rx_packets = packets;
|
||||
stats->tx_packets = packets;
|
||||
stats->rx_bytes = bytes;
|
||||
|
@ -9,13 +9,7 @@
|
||||
|
||||
static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
int len = skb->len;
|
||||
struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
|
||||
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->bytes += len;
|
||||
stats->packets++;
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
dev_lstats_add(dev, skb->len);
|
||||
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
@ -56,25 +50,9 @@ static int nlmon_close(struct net_device *dev)
|
||||
static void
|
||||
nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
int i;
|
||||
u64 bytes = 0, packets = 0;
|
||||
u64 packets, bytes;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
const struct pcpu_lstats *nl_stats;
|
||||
u64 tbytes, tpackets;
|
||||
unsigned int start;
|
||||
|
||||
nl_stats = per_cpu_ptr(dev->lstats, i);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&nl_stats->syncp);
|
||||
tbytes = nl_stats->bytes;
|
||||
tpackets = nl_stats->packets;
|
||||
} while (u64_stats_fetch_retry_irq(&nl_stats->syncp, start));
|
||||
|
||||
packets += tpackets;
|
||||
bytes += tbytes;
|
||||
}
|
||||
dev_lstats_read(dev, &packets, &bytes);
|
||||
|
||||
stats->rx_packets = packets;
|
||||
stats->tx_packets = 0;
|
||||
|
@ -136,10 +136,10 @@ struct tap_filter {
|
||||
#define TUN_FLOW_EXPIRE (3 * HZ)
|
||||
|
||||
struct tun_pcpu_stats {
|
||||
u64 rx_packets;
|
||||
u64 rx_bytes;
|
||||
u64 tx_packets;
|
||||
u64 tx_bytes;
|
||||
u64_stats_t rx_packets;
|
||||
u64_stats_t rx_bytes;
|
||||
u64_stats_t tx_packets;
|
||||
u64_stats_t tx_bytes;
|
||||
struct u64_stats_sync syncp;
|
||||
u32 rx_dropped;
|
||||
u32 tx_dropped;
|
||||
@ -1167,10 +1167,10 @@ tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||
p = per_cpu_ptr(tun->pcpu_stats, i);
|
||||
do {
|
||||
start = u64_stats_fetch_begin(&p->syncp);
|
||||
rxpackets = p->rx_packets;
|
||||
rxbytes = p->rx_bytes;
|
||||
txpackets = p->tx_packets;
|
||||
txbytes = p->tx_bytes;
|
||||
rxpackets = u64_stats_read(&p->rx_packets);
|
||||
rxbytes = u64_stats_read(&p->rx_bytes);
|
||||
txpackets = u64_stats_read(&p->tx_packets);
|
||||
txbytes = u64_stats_read(&p->tx_bytes);
|
||||
} while (u64_stats_fetch_retry(&p->syncp, start));
|
||||
|
||||
stats->rx_packets += rxpackets;
|
||||
@ -1998,8 +1998,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||
|
||||
stats = get_cpu_ptr(tun->pcpu_stats);
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += len;
|
||||
u64_stats_inc(&stats->rx_packets);
|
||||
u64_stats_add(&stats->rx_bytes, len);
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
put_cpu_ptr(stats);
|
||||
|
||||
@ -2052,8 +2052,8 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun,
|
||||
|
||||
stats = get_cpu_ptr(tun->pcpu_stats);
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->tx_packets++;
|
||||
stats->tx_bytes += ret;
|
||||
u64_stats_inc(&stats->tx_packets);
|
||||
u64_stats_add(&stats->tx_bytes, ret);
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
put_cpu_ptr(tun->pcpu_stats);
|
||||
|
||||
@ -2147,8 +2147,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
||||
/* caller is in process context, */
|
||||
stats = get_cpu_ptr(tun->pcpu_stats);
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->tx_packets++;
|
||||
stats->tx_bytes += skb->len + vlan_hlen;
|
||||
u64_stats_inc(&stats->tx_packets);
|
||||
u64_stats_add(&stats->tx_bytes, skb->len + vlan_hlen);
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
put_cpu_ptr(tun->pcpu_stats);
|
||||
|
||||
@ -2511,8 +2511,8 @@ static int tun_xdp_one(struct tun_struct *tun,
|
||||
*/
|
||||
stats = this_cpu_ptr(tun->pcpu_stats);
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += datasize;
|
||||
u64_stats_inc(&stats->rx_packets);
|
||||
u64_stats_add(&stats->rx_bytes, datasize);
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
|
||||
if (rxhash)
|
||||
|
@ -260,14 +260,8 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
|
||||
if (!rcv_xdp) {
|
||||
struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
|
||||
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->bytes += length;
|
||||
stats->packets++;
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
}
|
||||
if (!rcv_xdp)
|
||||
dev_lstats_add(dev, length);
|
||||
} else {
|
||||
drop:
|
||||
atomic64_inc(&priv->dropped);
|
||||
@ -281,26 +275,11 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static u64 veth_stats_tx(struct pcpu_lstats *result, struct net_device *dev)
|
||||
static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
|
||||
{
|
||||
struct veth_priv *priv = netdev_priv(dev);
|
||||
int cpu;
|
||||
|
||||
result->packets = 0;
|
||||
result->bytes = 0;
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct pcpu_lstats *stats = per_cpu_ptr(dev->lstats, cpu);
|
||||
u64 packets, bytes;
|
||||
unsigned int start;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&stats->syncp);
|
||||
packets = stats->packets;
|
||||
bytes = stats->bytes;
|
||||
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
|
||||
result->packets += packets;
|
||||
result->bytes += bytes;
|
||||
}
|
||||
dev_lstats_read(dev, packets, bytes);
|
||||
return atomic64_read(&priv->dropped);
|
||||
}
|
||||
|
||||
@ -335,11 +314,11 @@ static void veth_get_stats64(struct net_device *dev,
|
||||
struct veth_priv *priv = netdev_priv(dev);
|
||||
struct net_device *peer;
|
||||
struct veth_rq_stats rx;
|
||||
struct pcpu_lstats tx;
|
||||
u64 packets, bytes;
|
||||
|
||||
tot->tx_dropped = veth_stats_tx(&tx, dev);
|
||||
tot->tx_bytes = tx.bytes;
|
||||
tot->tx_packets = tx.packets;
|
||||
tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
|
||||
tot->tx_bytes = bytes;
|
||||
tot->tx_packets = packets;
|
||||
|
||||
veth_stats_rx(&rx, dev);
|
||||
tot->rx_dropped = rx.xdp_drops;
|
||||
@ -349,9 +328,9 @@ static void veth_get_stats64(struct net_device *dev,
|
||||
rcu_read_lock();
|
||||
peer = rcu_dereference(priv->peer);
|
||||
if (peer) {
|
||||
tot->rx_dropped += veth_stats_tx(&tx, peer);
|
||||
tot->rx_bytes += tx.bytes;
|
||||
tot->rx_packets += tx.packets;
|
||||
tot->rx_dropped += veth_stats_tx(peer, &packets, &bytes);
|
||||
tot->rx_bytes += bytes;
|
||||
tot->rx_packets += packets;
|
||||
|
||||
veth_stats_rx(&rx, peer);
|
||||
tot->tx_bytes += rx.xdp_bytes;
|
||||
|
@ -47,13 +47,7 @@ static int vsockmon_close(struct net_device *dev)
|
||||
|
||||
static netdev_tx_t vsockmon_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
int len = skb->len;
|
||||
struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats);
|
||||
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->bytes += len;
|
||||
stats->packets++;
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
dev_lstats_add(dev, skb->len);
|
||||
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
@ -63,30 +57,9 @@ static netdev_tx_t vsockmon_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
static void
|
||||
vsockmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
int i;
|
||||
u64 bytes = 0, packets = 0;
|
||||
dev_lstats_read(dev, &stats->rx_packets, &stats->rx_bytes);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
const struct pcpu_lstats *vstats;
|
||||
u64 tbytes, tpackets;
|
||||
unsigned int start;
|
||||
|
||||
vstats = per_cpu_ptr(dev->lstats, i);
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&vstats->syncp);
|
||||
tbytes = vstats->bytes;
|
||||
tpackets = vstats->packets;
|
||||
} while (u64_stats_fetch_retry_irq(&vstats->syncp, start));
|
||||
|
||||
packets += tpackets;
|
||||
bytes += tbytes;
|
||||
}
|
||||
|
||||
stats->rx_packets = packets;
|
||||
stats->tx_packets = 0;
|
||||
|
||||
stats->rx_bytes = bytes;
|
||||
stats->tx_bytes = 0;
|
||||
}
|
||||
|
||||
|
@ -2396,11 +2396,23 @@ struct pcpu_sw_netstats {
|
||||
} __aligned(4 * sizeof(u64));
|
||||
|
||||
struct pcpu_lstats {
|
||||
u64 packets;
|
||||
u64 bytes;
|
||||
u64_stats_t packets;
|
||||
u64_stats_t bytes;
|
||||
struct u64_stats_sync syncp;
|
||||
} __aligned(2 * sizeof(u64));
|
||||
|
||||
void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
|
||||
|
||||
static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
|
||||
{
|
||||
struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
|
||||
|
||||
u64_stats_update_begin(&lstats->syncp);
|
||||
u64_stats_add(&lstats->bytes, len);
|
||||
u64_stats_inc(&lstats->packets);
|
||||
u64_stats_update_end(&lstats->syncp);
|
||||
}
|
||||
|
||||
#define __netdev_alloc_pcpu_stats(type, gfp) \
|
||||
({ \
|
||||
typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
|
||||
|
@ -40,8 +40,8 @@
|
||||
* spin_lock_bh(...) or other synchronization to get exclusive access
|
||||
* ...
|
||||
* u64_stats_update_begin(&stats->syncp);
|
||||
* stats->bytes64 += len; // non atomic operation
|
||||
* stats->packets64++; // non atomic operation
|
||||
* u64_stats_add(&stats->bytes64, len); // non atomic operation
|
||||
* u64_stats_inc(&stats->packets64); // non atomic operation
|
||||
* u64_stats_update_end(&stats->syncp);
|
||||
*
|
||||
* While a consumer (reader) should use following template to get consistent
|
||||
@ -52,8 +52,8 @@
|
||||
*
|
||||
* do {
|
||||
* start = u64_stats_fetch_begin(&stats->syncp);
|
||||
* tbytes = stats->bytes64; // non atomic operation
|
||||
* tpackets = stats->packets64; // non atomic operation
|
||||
* tbytes = u64_stats_read(&stats->bytes64); // non atomic operation
|
||||
* tpackets = u64_stats_read(&stats->packets64); // non atomic operation
|
||||
* } while (u64_stats_fetch_retry(&stats->syncp, start));
|
||||
*
|
||||
*
|
||||
@ -68,6 +68,49 @@ struct u64_stats_sync {
|
||||
#endif
|
||||
};
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
#include <asm/local64.h>
|
||||
|
||||
typedef struct {
|
||||
local64_t v;
|
||||
} u64_stats_t ;
|
||||
|
||||
static inline u64 u64_stats_read(const u64_stats_t *p)
|
||||
{
|
||||
return local64_read(&p->v);
|
||||
}
|
||||
|
||||
static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
|
||||
{
|
||||
local64_add(val, &p->v);
|
||||
}
|
||||
|
||||
static inline void u64_stats_inc(u64_stats_t *p)
|
||||
{
|
||||
local64_inc(&p->v);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
typedef struct {
|
||||
u64 v;
|
||||
} u64_stats_t;
|
||||
|
||||
static inline u64 u64_stats_read(const u64_stats_t *p)
|
||||
{
|
||||
return p->v;
|
||||
}
|
||||
|
||||
static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
|
||||
{
|
||||
p->v += val;
|
||||
}
|
||||
|
||||
static inline void u64_stats_inc(u64_stats_t *p)
|
||||
{
|
||||
p->v++;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void u64_stats_init(struct u64_stats_sync *syncp)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user