mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-16 16:57:04 +07:00
54b6680090
Add a native implementation for the sched_clock() function which utilizes the processor-internal cycle counter (Control Register 16) as high-resolution time source. With this patch we now get much more fine-grained resolutions in various in-kernel time measurements (e.g. when viewing the function tracing logs), and probably a more accurate scheduling on SMP systems. There are a few specific implementation details in this patch: 1. On a 32bit kernel we emulate the higher 32bits of the required 64-bit resolution of sched_clock() by increasing a per-cpu counter at every wrap-around of the 32bit cycle counter. 2. In a SMP system, the cycle counters of the various CPUs are not syncronized (similiar to the TSC in a x86_64 system). To cope with this we define HAVE_UNSTABLE_SCHED_CLOCK and let the upper layers do the adjustment work. 3. Since we need HAVE_UNSTABLE_SCHED_CLOCK, we need to provide a cmpxchg64() function even on a 32-bit kernel. 4. A 64-bit SMP kernel which is started on a UP system will mark the sched_clock() implementation as "stable", which means that we don't expect any jumps in the returned counter. This is true because we then run only on one CPU. Signed-off-by: Helge Deller <deller@gmx.de>
81 lines
1.7 KiB
C
81 lines
1.7 KiB
C
/*
|
|
* bitops.c: atomic operations which got too long to be inlined all over
|
|
* the place.
|
|
*
|
|
* Copyright 1999 Philipp Rumpf (prumpf@tux.org)
|
|
* Copyright 2000 Grant Grundler (grundler@cup.hp.com)
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/atomic.h>
|
|
|
|
#ifdef CONFIG_SMP
|
|
arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
|
|
[0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
|
|
};
|
|
#endif
|
|
|
|
#ifdef CONFIG_64BIT
|
|
unsigned long __xchg64(unsigned long x, unsigned long *ptr)
|
|
{
|
|
unsigned long temp, flags;
|
|
|
|
_atomic_spin_lock_irqsave(ptr, flags);
|
|
temp = *ptr;
|
|
*ptr = x;
|
|
_atomic_spin_unlock_irqrestore(ptr, flags);
|
|
return temp;
|
|
}
|
|
#endif
|
|
|
|
unsigned long __xchg32(int x, int *ptr)
|
|
{
|
|
unsigned long flags;
|
|
long temp;
|
|
|
|
_atomic_spin_lock_irqsave(ptr, flags);
|
|
temp = (long) *ptr; /* XXX - sign extension wanted? */
|
|
*ptr = x;
|
|
_atomic_spin_unlock_irqrestore(ptr, flags);
|
|
return (unsigned long)temp;
|
|
}
|
|
|
|
|
|
unsigned long __xchg8(char x, char *ptr)
|
|
{
|
|
unsigned long flags;
|
|
long temp;
|
|
|
|
_atomic_spin_lock_irqsave(ptr, flags);
|
|
temp = (long) *ptr; /* XXX - sign extension wanted? */
|
|
*ptr = x;
|
|
_atomic_spin_unlock_irqrestore(ptr, flags);
|
|
return (unsigned long)temp;
|
|
}
|
|
|
|
|
|
u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new)
|
|
{
|
|
unsigned long flags;
|
|
u64 prev;
|
|
|
|
_atomic_spin_lock_irqsave(ptr, flags);
|
|
if ((prev = *ptr) == old)
|
|
*ptr = new;
|
|
_atomic_spin_unlock_irqrestore(ptr, flags);
|
|
return prev;
|
|
}
|
|
|
|
unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
|
|
{
|
|
unsigned long flags;
|
|
unsigned int prev;
|
|
|
|
_atomic_spin_lock_irqsave(ptr, flags);
|
|
if ((prev = *ptr) == old)
|
|
*ptr = new;
|
|
_atomic_spin_unlock_irqrestore(ptr, flags);
|
|
return (unsigned long)prev;
|
|
}
|