mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 10:15:10 +07:00
827da44c61
In order to enable lockdep on seqcount/seqlock structures, we must explicitly initialize any locks. The u64_stats_sync structure, uses a seqcount, and thus we need to introduce a u64_stats_init() function and use it to initialize the structure. This unfortunately adds a lot of fairly trivial initialization code to a number of drivers. But the benefit of ensuring correctness makes this worth while. Because these changes are required for lockdep to be enabled, and the changes are quite trivial, I've not yet split this patch out into 30-some separate patches, as I figured it would be better to get the various maintainers thoughts on how to best merge this change along with the seqcount lockdep enablement. Feedback would be appreciated! Signed-off-by: John Stultz <john.stultz@linaro.org> Acked-by: Julian Anastasov <ja@ssi.bg> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> Cc: "David S. Miller" <davem@davemloft.net> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org> Cc: James Morris <jmorris@namei.org> Cc: Jesse Gross <jesse@nicira.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: Mirko Lindner <mlindner@marvell.com> Cc: Patrick McHardy <kaber@trash.net> Cc: Roger Luethi <rl@hellgate.ch> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Simon Horman <horms@verge.net.au> Cc: Stephen Hemminger <stephen@networkplumber.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> Cc: Wensong Zhang <wensong@linux-vs.org> Cc: netdev@vger.kernel.org Link: http://lkml.kernel.org/r/1381186321-4906-2-git-send-email-john.stultz@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
148 lines
4.3 KiB
C
148 lines
4.3 KiB
C
#ifndef _LINUX_U64_STATS_SYNC_H
|
|
#define _LINUX_U64_STATS_SYNC_H
|
|
|
|
/*
|
|
* To properly implement 64bits network statistics on 32bit and 64bit hosts,
|
|
* we provide a synchronization point, that is a noop on 64bit or UP kernels.
|
|
*
|
|
* Key points :
|
|
* 1) Use a seqcount on SMP 32bits, with low overhead.
|
|
* 2) Whole thing is a noop on 64bit arches or UP kernels.
|
|
* 3) Write side must ensure mutual exclusion or one seqcount update could
|
|
* be lost, thus blocking readers forever.
|
|
* If this synchronization point is not a mutex, but a spinlock or
|
|
* spinlock_bh() or disable_bh() :
|
|
* 3.1) Write side should not sleep.
|
|
* 3.2) Write side should not allow preemption.
|
|
* 3.3) If applicable, interrupts should be disabled.
|
|
*
|
|
* 4) If reader fetches several counters, there is no guarantee the whole values
|
|
* are consistent (remember point 1) : this is a noop on 64bit arches anyway)
|
|
*
|
|
* 5) readers are allowed to sleep or be preempted/interrupted : They perform
|
|
* pure reads. But if they have to fetch many values, it's better to not allow
|
|
* preemptions/interruptions to avoid many retries.
|
|
*
|
|
* 6) If counter might be written by an interrupt, readers should block interrupts.
|
|
* (On UP, there is no seqcount_t protection, a reader allowing interrupts could
|
|
* read partial values)
|
|
*
|
|
* 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and
|
|
* u64_stats_fetch_retry_bh() helpers
|
|
*
|
|
* Usage :
|
|
*
|
|
* Stats producer (writer) should use following template granted it already got
|
|
* an exclusive access to counters (a lock is already taken, or per cpu
|
|
* data is used [in a non preemptable context])
|
|
*
|
|
* spin_lock_bh(...) or other synchronization to get exclusive access
|
|
* ...
|
|
* u64_stats_update_begin(&stats->syncp);
|
|
* stats->bytes64 += len; // non atomic operation
|
|
* stats->packets64++; // non atomic operation
|
|
* u64_stats_update_end(&stats->syncp);
|
|
*
|
|
* While a consumer (reader) should use following template to get consistent
|
|
* snapshot for each variable (but no guarantee on several ones)
|
|
*
|
|
* u64 tbytes, tpackets;
|
|
* unsigned int start;
|
|
*
|
|
* do {
|
|
* start = u64_stats_fetch_begin(&stats->syncp);
|
|
* tbytes = stats->bytes64; // non atomic operation
|
|
* tpackets = stats->packets64; // non atomic operation
|
|
* } while (u64_stats_fetch_retry(&stats->syncp, start));
|
|
*
|
|
*
|
|
* Example of use in drivers/net/loopback.c, using per_cpu containers,
|
|
* in BH disabled context.
|
|
*/
|
|
#include <linux/seqlock.h>
|
|
|
|
struct u64_stats_sync {
|
|
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
|
seqcount_t seq;
|
|
#endif
|
|
};
|
|
|
|
|
|
#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
|
|
# define u64_stats_init(syncp) seqcount_init(syncp.seq)
|
|
#else
|
|
# define u64_stats_init(syncp) do { } while (0)
|
|
#endif
|
|
|
|
static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
|
|
{
|
|
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
|
write_seqcount_begin(&syncp->seq);
|
|
#endif
|
|
}
|
|
|
|
static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
|
|
{
|
|
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
|
write_seqcount_end(&syncp->seq);
|
|
#endif
|
|
}
|
|
|
|
static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
|
|
{
|
|
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
|
return read_seqcount_begin(&syncp->seq);
|
|
#else
|
|
#if BITS_PER_LONG==32
|
|
preempt_disable();
|
|
#endif
|
|
return 0;
|
|
#endif
|
|
}
|
|
|
|
static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
|
|
unsigned int start)
|
|
{
|
|
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
|
return read_seqcount_retry(&syncp->seq, start);
|
|
#else
|
|
#if BITS_PER_LONG==32
|
|
preempt_enable();
|
|
#endif
|
|
return false;
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* In case softirq handlers can update u64 counters, readers can use following helpers
|
|
* - SMP 32bit arches use seqcount protection, irq safe.
|
|
* - UP 32bit must disable BH.
|
|
* - 64bit have no problem atomically reading u64 values, irq safe.
|
|
*/
|
|
static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
|
|
{
|
|
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
|
return read_seqcount_begin(&syncp->seq);
|
|
#else
|
|
#if BITS_PER_LONG==32
|
|
local_bh_disable();
|
|
#endif
|
|
return 0;
|
|
#endif
|
|
}
|
|
|
|
static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
|
|
unsigned int start)
|
|
{
|
|
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
|
return read_seqcount_retry(&syncp->seq, start);
|
|
#else
|
|
#if BITS_PER_LONG==32
|
|
local_bh_enable();
|
|
#endif
|
|
return false;
|
|
#endif
|
|
}
|
|
|
|
#endif /* _LINUX_U64_STATS_SYNC_H */
|