mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-16 06:06:44 +07:00
e192832869
Pull locking updates from Ingo Molnar: "The main changes in this cycle are: - rwsem scalability improvements, phase #2, by Waiman Long, which are rather impressive: "On a 2-socket 40-core 80-thread Skylake system with 40 reader and writer locking threads, the min/mean/max locking operations done in a 5-second testing window before the patchset were: 40 readers, Iterations Min/Mean/Max = 1,807/1,808/1,810 40 writers, Iterations Min/Mean/Max = 1,807/50,344/151,255 After the patchset, they became: 40 readers, Iterations Min/Mean/Max = 30,057/31,359/32,741 40 writers, Iterations Min/Mean/Max = 94,466/95,845/97,098" There's a lot of changes to the locking implementation that makes it similar to qrwlock, including owner handoff for more fair locking. Another microbenchmark shows how across the spectrum the improvements are: "With a locking microbenchmark running on 5.1 based kernel, the total locking rates (in kops/s) on a 2-socket Skylake system with equal numbers of readers and writers (mixed) before and after this patchset were: # of Threads Before Patch After Patch ------------ ------------ ----------- 2 2,618 4,193 4 1,202 3,726 8 802 3,622 16 729 3,359 32 319 2,826 64 102 2,744" The changes are extensive and the patch-set has been through several iterations addressing various locking workloads. There might be more regressions, but unless they are pathological I believe we want to use this new implementation as the baseline going forward. - jump-label optimizations by Daniel Bristot de Oliveira: the primary motivation was to remove IPI disturbance of isolated RT-workload CPUs, which resulted in the implementation of batched jump-label updates. Beyond the improvement of the real-time characteristics kernel, in one test this patchset improved static key update overhead from 57 msecs to just 1.4 msecs - which is a nice speedup as well. - atomic64_t cross-arch type cleanups by Mark Rutland: over the last ~10 years of atomic64_t existence the various types used by the APIs only had to be self-consistent within each architecture - which means they became wildly inconsistent across architectures. Mark puts and end to this by reworking all the atomic64 implementations to use 's64' as the base type for atomic64_t, and to ensure that this type is consistently used for parameters and return values in the API, avoiding further problems in this area. - A large set of small improvements to lockdep by Yuyang Du: type cleanups, output cleanups, function return type and othr cleanups all around the place. - A set of percpu ops cleanups and fixes by Peter Zijlstra. - Misc other changes - please see the Git log for more details" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (82 commits) locking/lockdep: increase size of counters for lockdep statistics locking/atomics: Use sed(1) instead of non-standard head(1) option locking/lockdep: Move mark_lock() inside CONFIG_TRACE_IRQFLAGS && CONFIG_PROVE_LOCKING x86/jump_label: Make tp_vec_nr static x86/percpu: Optimize raw_cpu_xchg() x86/percpu, sched/fair: Avoid local_clock() x86/percpu, x86/irq: Relax {set,get}_irq_regs() x86/percpu: Relax smp_processor_id() x86/percpu: Differentiate this_cpu_{}() and __this_cpu_{}() locking/rwsem: Guard against making count negative locking/rwsem: Adaptive disabling of reader optimistic spinning locking/rwsem: Enable time-based spinning on reader-owned rwsem locking/rwsem: Make rwsem->owner an atomic_long_t locking/rwsem: Enable readers spinning on writer locking/rwsem: Clarify usage of owner's nonspinaable bit locking/rwsem: Wake up almost all readers in wait queue locking/rwsem: More optimal RT task handling of null owner locking/rwsem: Always release wait_lock before waking up tasks locking/rwsem: Implement lock handoff to prevent lock starvation locking/rwsem: Make rwsem_spin_on_owner() return owner state ...
245 lines
6.0 KiB
C
245 lines
6.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __LINUX_SMP_H
|
|
#define __LINUX_SMP_H
|
|
|
|
/*
|
|
* Generic SMP support
|
|
* Alan Cox. <alan@redhat.com>
|
|
*/
|
|
|
|
#include <linux/errno.h>
|
|
#include <linux/types.h>
|
|
#include <linux/list.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/init.h>
|
|
#include <linux/llist.h>
|
|
|
|
typedef void (*smp_call_func_t)(void *info);
|
|
struct __call_single_data {
|
|
struct llist_node llist;
|
|
smp_call_func_t func;
|
|
void *info;
|
|
unsigned int flags;
|
|
};
|
|
|
|
/* Use __aligned() to avoid to use 2 cache lines for 1 csd */
|
|
typedef struct __call_single_data call_single_data_t
|
|
__aligned(sizeof(struct __call_single_data));
|
|
|
|
/* total number of cpus in this system (may exceed NR_CPUS) */
|
|
extern unsigned int total_cpus;
|
|
|
|
int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
|
|
int wait);
|
|
|
|
/*
|
|
* Call a function on all processors
|
|
*/
|
|
void on_each_cpu(smp_call_func_t func, void *info, int wait);
|
|
|
|
/*
|
|
* Call a function on processors specified by mask, which might include
|
|
* the local one.
|
|
*/
|
|
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
|
|
void *info, bool wait);
|
|
|
|
/*
|
|
* Call a function on each processor for which the supplied function
|
|
* cond_func returns a positive value. This may include the local
|
|
* processor.
|
|
*/
|
|
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
|
smp_call_func_t func, void *info, bool wait,
|
|
gfp_t gfp_flags);
|
|
|
|
void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
|
|
smp_call_func_t func, void *info, bool wait,
|
|
gfp_t gfp_flags, const struct cpumask *mask);
|
|
|
|
int smp_call_function_single_async(int cpu, call_single_data_t *csd);
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#include <linux/preempt.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/compiler.h>
|
|
#include <linux/thread_info.h>
|
|
#include <asm/smp.h>
|
|
|
|
/*
|
|
* main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
|
|
* (defined in asm header):
|
|
*/
|
|
|
|
/*
|
|
* stops all CPUs but the current one:
|
|
*/
|
|
extern void smp_send_stop(void);
|
|
|
|
/*
|
|
* sends a 'reschedule' event to another CPU:
|
|
*/
|
|
extern void smp_send_reschedule(int cpu);
|
|
|
|
|
|
/*
|
|
* Prepare machine for booting other CPUs.
|
|
*/
|
|
extern void smp_prepare_cpus(unsigned int max_cpus);
|
|
|
|
/*
|
|
* Bring a CPU up
|
|
*/
|
|
extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
|
|
|
|
/*
|
|
* Final polishing of CPUs
|
|
*/
|
|
extern void smp_cpus_done(unsigned int max_cpus);
|
|
|
|
/*
|
|
* Call a function on all other processors
|
|
*/
|
|
void smp_call_function(smp_call_func_t func, void *info, int wait);
|
|
void smp_call_function_many(const struct cpumask *mask,
|
|
smp_call_func_t func, void *info, bool wait);
|
|
|
|
int smp_call_function_any(const struct cpumask *mask,
|
|
smp_call_func_t func, void *info, int wait);
|
|
|
|
void kick_all_cpus_sync(void);
|
|
void wake_up_all_idle_cpus(void);
|
|
|
|
/*
|
|
* Generic and arch helpers
|
|
*/
|
|
void __init call_function_init(void);
|
|
void generic_smp_call_function_single_interrupt(void);
|
|
#define generic_smp_call_function_interrupt \
|
|
generic_smp_call_function_single_interrupt
|
|
|
|
/*
|
|
* Mark the boot cpu "online" so that it can call console drivers in
|
|
* printk() and can access its per-cpu storage.
|
|
*/
|
|
void smp_prepare_boot_cpu(void);
|
|
|
|
extern unsigned int setup_max_cpus;
|
|
extern void __init setup_nr_cpu_ids(void);
|
|
extern void __init smp_init(void);
|
|
|
|
extern int __boot_cpu_id;
|
|
|
|
static inline int get_boot_cpu_id(void)
|
|
{
|
|
return __boot_cpu_id;
|
|
}
|
|
|
|
#else /* !SMP */
|
|
|
|
static inline void smp_send_stop(void) { }
|
|
|
|
/*
|
|
* These macros fold the SMP functionality into a single CPU system
|
|
*/
|
|
#define raw_smp_processor_id() 0
|
|
static inline void up_smp_call_function(smp_call_func_t func, void *info)
|
|
{
|
|
}
|
|
#define smp_call_function(func, info, wait) \
|
|
(up_smp_call_function(func, info))
|
|
|
|
static inline void smp_send_reschedule(int cpu) { }
|
|
#define smp_prepare_boot_cpu() do {} while (0)
|
|
#define smp_call_function_many(mask, func, info, wait) \
|
|
(up_smp_call_function(func, info))
|
|
static inline void call_function_init(void) { }
|
|
|
|
static inline int
|
|
smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
|
|
void *info, int wait)
|
|
{
|
|
return smp_call_function_single(0, func, info, wait);
|
|
}
|
|
|
|
static inline void kick_all_cpus_sync(void) { }
|
|
static inline void wake_up_all_idle_cpus(void) { }
|
|
|
|
#ifdef CONFIG_UP_LATE_INIT
|
|
extern void __init up_late_init(void);
|
|
static inline void smp_init(void) { up_late_init(); }
|
|
#else
|
|
static inline void smp_init(void) { }
|
|
#endif
|
|
|
|
static inline int get_boot_cpu_id(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#endif /* !SMP */
|
|
|
|
/**
|
|
* raw_processor_id() - get the current (unstable) CPU id
|
|
*
|
|
* For then you know what you are doing and need an unstable
|
|
* CPU id.
|
|
*/
|
|
|
|
/**
|
|
* smp_processor_id() - get the current (stable) CPU id
|
|
*
|
|
* This is the normal accessor to the CPU id and should be used
|
|
* whenever possible.
|
|
*
|
|
* The CPU id is stable when:
|
|
*
|
|
* - IRQs are disabled;
|
|
* - preemption is disabled;
|
|
* - the task is CPU affine.
|
|
*
|
|
* When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN
|
|
* when smp_processor_id() is used when the CPU id is not stable.
|
|
*/
|
|
|
|
/*
|
|
* Allow the architecture to differentiate between a stable and unstable read.
|
|
* For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a
|
|
* regular asm read for the stable.
|
|
*/
|
|
#ifndef __smp_processor_id
|
|
#define __smp_processor_id(x) raw_smp_processor_id(x)
|
|
#endif
|
|
|
|
#ifdef CONFIG_DEBUG_PREEMPT
|
|
extern unsigned int debug_smp_processor_id(void);
|
|
# define smp_processor_id() debug_smp_processor_id()
|
|
#else
|
|
# define smp_processor_id() __smp_processor_id()
|
|
#endif
|
|
|
|
#define get_cpu() ({ preempt_disable(); __smp_processor_id(); })
|
|
#define put_cpu() preempt_enable()
|
|
|
|
/*
|
|
* Callback to arch code if there's nosmp or maxcpus=0 on the
|
|
* boot command line:
|
|
*/
|
|
extern void arch_disable_smp_support(void);
|
|
|
|
extern void arch_enable_nonboot_cpus_begin(void);
|
|
extern void arch_enable_nonboot_cpus_end(void);
|
|
|
|
void smp_setup_processor_id(void);
|
|
|
|
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
|
|
bool phys);
|
|
|
|
/* SMP core functions */
|
|
int smpcfd_prepare_cpu(unsigned int cpu);
|
|
int smpcfd_dead_cpu(unsigned int cpu);
|
|
int smpcfd_dying_cpu(unsigned int cpu);
|
|
|
|
#endif /* __LINUX_SMP_H */
|