mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
43224b96af
Pull timer updates from Thomas Gleixner: "A rather largish update for everything time and timer related: - Cache footprint optimizations for both hrtimers and timer wheel - Lower the NOHZ impact on systems which have NOHZ or timer migration disabled at runtime. - Optimize run time overhead of hrtimer interrupt by making the clock offset updates smarter - hrtimer cleanups and removal of restrictions to tackle some problems in sched/perf - Some more leap second tweaks - Another round of changes addressing the 2038 problem - First step to change the internals of clock event devices by introducing the necessary infrastructure - Allow constant folding for usecs/msecs_to_jiffies() - The usual pile of clockevent/clocksource driver updates The hrtimer changes contain updates to sched, perf and x86 as they depend on them plus changes all over the tree to cleanup API changes and redundant code, which got copied all over the place. The y2038 changes touch s390 to remove the last non 2038 safe code related to boot/persistant clock" * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (114 commits) clocksource: Increase dependencies of timer-stm32 to limit build wreckage timer: Minimize nohz off overhead timer: Reduce timer migration overhead if disabled timer: Stats: Simplify the flags handling timer: Replace timer base by a cpu index timer: Use hlist for the timer wheel hash buckets timer: Remove FIFO "guarantee" timers: Sanitize catchup_timer_jiffies() usage hrtimer: Allow hrtimer::function() to free the timer seqcount: Introduce raw_write_seqcount_barrier() seqcount: Rename write_seqcount_barrier() hrtimer: Fix hrtimer_is_queued() hole hrtimer: Remove HRTIMER_STATE_MIGRATE selftest: Timers: Avoid signal deadlock in leap-a-day timekeeping: Copy the shadow-timekeeper over the real timekeeper last clockevents: Check state instead of mode in suspend/resume path selftests: timers: Add leap-second timer edge testing to leap-a-day.c ntp: Do leapsecond adjustment in adjtimex read path time: Prevent early expiry of hrtimers[CLOCK_REALTIME] at the leap second edge ntp: Introduce and use SECS_PER_DAY macro instead of 86400 ...
109 lines
3.6 KiB
C
109 lines
3.6 KiB
C
/*
|
|
* Read-Copy Update mechanism for mutual exclusion (tree-based version)
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, you can access it online at
|
|
* http://www.gnu.org/licenses/gpl-2.0.html.
|
|
*
|
|
* Copyright IBM Corporation, 2008
|
|
*
|
|
* Author: Dipankar Sarma <dipankar@in.ibm.com>
|
|
* Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm
|
|
*
|
|
* Based on the original work by Paul McKenney <paulmck@us.ibm.com>
|
|
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
|
|
*
|
|
* For detailed explanation of Read-Copy Update mechanism see -
|
|
* Documentation/RCU
|
|
*/
|
|
|
|
#ifndef __LINUX_RCUTREE_H
|
|
#define __LINUX_RCUTREE_H
|
|
|
|
void rcu_note_context_switch(void);
|
|
int rcu_needs_cpu(u64 basem, u64 *nextevt);
|
|
void rcu_cpu_stall_reset(void);
|
|
|
|
/*
|
|
* Note a virtualization-based context switch. This is simply a
|
|
* wrapper around rcu_note_context_switch(), which allows TINY_RCU
|
|
* to save a few bytes.
|
|
*/
|
|
static inline void rcu_virt_note_context_switch(int cpu)
|
|
{
|
|
rcu_note_context_switch();
|
|
}
|
|
|
|
void synchronize_rcu_bh(void);
|
|
void synchronize_sched_expedited(void);
|
|
void synchronize_rcu_expedited(void);
|
|
|
|
void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
|
|
|
|
/**
|
|
* synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
|
|
*
|
|
* Wait for an RCU-bh grace period to elapse, but use a "big hammer"
|
|
* approach to force the grace period to end quickly. This consumes
|
|
* significant time on all CPUs and is unfriendly to real-time workloads,
|
|
* so is thus not recommended for any sort of common-case code. In fact,
|
|
* if you are using synchronize_rcu_bh_expedited() in a loop, please
|
|
* restructure your code to batch your updates, and then use a single
|
|
* synchronize_rcu_bh() instead.
|
|
*
|
|
* Note that it is illegal to call this function while holding any lock
|
|
* that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
|
|
* to call this function from a CPU-hotplug notifier. Failing to observe
|
|
* these restriction will result in deadlock.
|
|
*/
|
|
static inline void synchronize_rcu_bh_expedited(void)
|
|
{
|
|
synchronize_sched_expedited();
|
|
}
|
|
|
|
void rcu_barrier(void);
|
|
void rcu_barrier_bh(void);
|
|
void rcu_barrier_sched(void);
|
|
unsigned long get_state_synchronize_rcu(void);
|
|
void cond_synchronize_rcu(unsigned long oldstate);
|
|
|
|
extern unsigned long rcutorture_testseq;
|
|
extern unsigned long rcutorture_vernum;
|
|
unsigned long rcu_batches_started(void);
|
|
unsigned long rcu_batches_started_bh(void);
|
|
unsigned long rcu_batches_started_sched(void);
|
|
unsigned long rcu_batches_completed(void);
|
|
unsigned long rcu_batches_completed_bh(void);
|
|
unsigned long rcu_batches_completed_sched(void);
|
|
void show_rcu_gp_kthreads(void);
|
|
|
|
void rcu_force_quiescent_state(void);
|
|
void rcu_bh_force_quiescent_state(void);
|
|
void rcu_sched_force_quiescent_state(void);
|
|
|
|
void rcu_idle_enter(void);
|
|
void rcu_idle_exit(void);
|
|
void rcu_irq_enter(void);
|
|
void rcu_irq_exit(void);
|
|
|
|
void exit_rcu(void);
|
|
|
|
void rcu_scheduler_starting(void);
|
|
extern int rcu_scheduler_active __read_mostly;
|
|
|
|
bool rcu_is_watching(void);
|
|
|
|
void rcu_all_qs(void);
|
|
|
|
#endif /* __LINUX_RCUTREE_H */
|