mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 21:32:06 +07:00
0195c00244
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.12 (GNU/Linux) iQIVAwUAT3NKzROxKuMESys7AQKElw/+JyDxJSlj+g+nymkx8IVVuU8CsEwNLgRk 8KEnRfLhGtkXFLSJYWO6jzGo16F8Uqli1PdMFte/wagSv0285/HZaKlkkBVHdJ/m u40oSjgT013bBh6MQ0Oaf8pFezFUiQB5zPOA9QGaLVGDLXCmgqUgd7exaD5wRIwB ZmyItjZeAVnDfk1R+ZiNYytHAi8A5wSB+eFDCIQYgyulA1Igd1UnRtx+dRKbvc/m rWQ6KWbZHIdvP1ksd8wHHkrlUD2pEeJ8glJLsZUhMm/5oMf/8RmOCvmo8rvE/qwl eDQ1h4cGYlfjobxXZMHqAN9m7Jg2bI946HZjdb7/7oCeO6VW3FwPZ/Ic75p+wp45 HXJTItufERYk6QxShiOKvA+QexnYwY0IT5oRP4DrhdVB/X9cl2MoaZHC+RbYLQy+ /5VNZKi38iK4F9AbFamS7kd0i5QszA/ZzEzKZ6VMuOp3W/fagpn4ZJT1LIA3m4A9 Q0cj24mqeyCfjysu0TMbPtaN+Yjeu1o1OFRvM8XffbZsp5bNzuTDEvviJ2NXw4vK 4qUHulhYSEWcu9YgAZXvEWDEM78FXCkg2v/CrZXH5tyc95kUkMPcgG+QZBB5wElR FaOKpiC/BuNIGEf02IZQ4nfDxE90QwnDeoYeV+FvNj9UEOopJ5z5bMPoTHxm4cCD NypQthI85pc= =G9mT -----END PGP SIGNATURE----- Merge tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system Pull "Disintegrate and delete asm/system.h" from David Howells: "Here are a bunch of patches to disintegrate asm/system.h into a set of separate bits to relieve the problem of circular inclusion dependencies. I've built all the working defconfigs from all the arches that I can and made sure that they don't break. The reason for these patches is that I recently encountered a circular dependency problem that came about when I produced some patches to optimise get_order() by rewriting it to use ilog2(). This uses bitops - and on the SH arch asm/bitops.h drags in asm-generic/get_order.h by a circuituous route involving asm/system.h. The main difficulty seems to be asm/system.h. It holds a number of low level bits with no/few dependencies that are commonly used (eg. memory barriers) and a number of bits with more dependencies that aren't used in many places (eg. switch_to()). These patches break asm/system.h up into the following core pieces: (1) asm/barrier.h Move memory barriers here. This already done for MIPS and Alpha. (2) asm/switch_to.h Move switch_to() and related stuff here. (3) asm/exec.h Move arch_align_stack() here. Other process execution related bits could perhaps go here from asm/processor.h. (4) asm/cmpxchg.h Move xchg() and cmpxchg() here as they're full word atomic ops and frequently used by atomic_xchg() and atomic_cmpxchg(). (5) asm/bug.h Move die() and related bits. (6) asm/auxvec.h Move AT_VECTOR_SIZE_ARCH here. Other arch headers are created as needed on a per-arch basis." Fixed up some conflicts from other header file cleanups and moving code around that has happened in the meantime, so David's testing is somewhat weakened by that. We'll find out anything that got broken and fix it.. * tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system: (38 commits) Delete all instances of asm/system.h Remove all #inclusions of asm/system.h Add #includes needed to permit the removal of asm/system.h Move all declarations of free_initmem() to linux/mm.h Disintegrate asm/system.h for OpenRISC Split arch_align_stack() out from asm-generic/system.h Split the switch_to() wrapper out of asm-generic/system.h Move the asm-generic/system.h xchg() implementation to asm-generic/cmpxchg.h Create asm-generic/barrier.h Make asm-generic/cmpxchg.h #include asm-generic/cmpxchg-local.h Disintegrate asm/system.h for Xtensa Disintegrate asm/system.h for Unicore32 [based on ver #3, changed by gxt] Disintegrate asm/system.h for Tile Disintegrate asm/system.h for Sparc Disintegrate asm/system.h for SH Disintegrate asm/system.h for Score Disintegrate asm/system.h for S390 Disintegrate asm/system.h for PowerPC Disintegrate asm/system.h for PA-RISC Disintegrate asm/system.h for MN10300 ...
398 lines
10 KiB
C
398 lines
10 KiB
C
#ifndef __LINUX_SPINLOCK_H
|
|
#define __LINUX_SPINLOCK_H
|
|
|
|
/*
|
|
* include/linux/spinlock.h - generic spinlock/rwlock declarations
|
|
*
|
|
* here's the role of the various spinlock/rwlock related include files:
|
|
*
|
|
* on SMP builds:
|
|
*
|
|
* asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
|
|
* initializers
|
|
*
|
|
* linux/spinlock_types.h:
|
|
* defines the generic type and initializers
|
|
*
|
|
* asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
|
|
* implementations, mostly inline assembly code
|
|
*
|
|
* (also included on UP-debug builds:)
|
|
*
|
|
* linux/spinlock_api_smp.h:
|
|
* contains the prototypes for the _spin_*() APIs.
|
|
*
|
|
* linux/spinlock.h: builds the final spin_*() APIs.
|
|
*
|
|
* on UP builds:
|
|
*
|
|
* linux/spinlock_type_up.h:
|
|
* contains the generic, simplified UP spinlock type.
|
|
* (which is an empty structure on non-debug builds)
|
|
*
|
|
* linux/spinlock_types.h:
|
|
* defines the generic type and initializers
|
|
*
|
|
* linux/spinlock_up.h:
|
|
* contains the arch_spin_*()/etc. version of UP
|
|
* builds. (which are NOPs on non-debug, non-preempt
|
|
* builds)
|
|
*
|
|
* (included on UP-non-debug builds:)
|
|
*
|
|
* linux/spinlock_api_up.h:
|
|
* builds the _spin_*() APIs.
|
|
*
|
|
* linux/spinlock.h: builds the final spin_*() APIs.
|
|
*/
|
|
|
|
#include <linux/typecheck.h>
|
|
#include <linux/preempt.h>
|
|
#include <linux/linkage.h>
|
|
#include <linux/compiler.h>
|
|
#include <linux/irqflags.h>
|
|
#include <linux/thread_info.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/stringify.h>
|
|
#include <linux/bottom_half.h>
|
|
#include <asm/barrier.h>
|
|
|
|
|
|
/*
|
|
* Must define these before including other files, inline functions need them
|
|
*/
|
|
#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
|
|
|
|
#define LOCK_SECTION_START(extra) \
|
|
".subsection 1\n\t" \
|
|
extra \
|
|
".ifndef " LOCK_SECTION_NAME "\n\t" \
|
|
LOCK_SECTION_NAME ":\n\t" \
|
|
".endif\n"
|
|
|
|
#define LOCK_SECTION_END \
|
|
".previous\n\t"
|
|
|
|
#define __lockfunc __attribute__((section(".spinlock.text")))
|
|
|
|
/*
|
|
* Pull the arch_spinlock_t and arch_rwlock_t definitions:
|
|
*/
|
|
#include <linux/spinlock_types.h>
|
|
|
|
/*
|
|
* Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
|
|
*/
|
|
#ifdef CONFIG_SMP
|
|
# include <asm/spinlock.h>
|
|
#else
|
|
# include <linux/spinlock_up.h>
|
|
#endif
|
|
|
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
|
extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
|
|
struct lock_class_key *key);
|
|
# define raw_spin_lock_init(lock) \
|
|
do { \
|
|
static struct lock_class_key __key; \
|
|
\
|
|
__raw_spin_lock_init((lock), #lock, &__key); \
|
|
} while (0)
|
|
|
|
#else
|
|
# define raw_spin_lock_init(lock) \
|
|
do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
|
|
#endif
|
|
|
|
#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
|
|
|
|
#ifdef CONFIG_GENERIC_LOCKBREAK
|
|
#define raw_spin_is_contended(lock) ((lock)->break_lock)
|
|
#else
|
|
|
|
#ifdef arch_spin_is_contended
|
|
#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
|
|
#else
|
|
#define raw_spin_is_contended(lock) (((void)(lock), 0))
|
|
#endif /*arch_spin_is_contended*/
|
|
#endif
|
|
|
|
/* The lock does not imply full memory barrier. */
|
|
#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
|
|
static inline void smp_mb__after_lock(void) { smp_mb(); }
|
|
#endif
|
|
|
|
/**
|
|
* raw_spin_unlock_wait - wait until the spinlock gets unlocked
|
|
* @lock: the spinlock in question.
|
|
*/
|
|
#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
|
|
|
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
|
extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
|
|
#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
|
|
extern int do_raw_spin_trylock(raw_spinlock_t *lock);
|
|
extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
|
|
#else
|
|
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
|
|
{
|
|
__acquire(lock);
|
|
arch_spin_lock(&lock->raw_lock);
|
|
}
|
|
|
|
static inline void
|
|
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
|
|
{
|
|
__acquire(lock);
|
|
arch_spin_lock_flags(&lock->raw_lock, *flags);
|
|
}
|
|
|
|
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
|
|
{
|
|
return arch_spin_trylock(&(lock)->raw_lock);
|
|
}
|
|
|
|
static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
|
|
{
|
|
arch_spin_unlock(&lock->raw_lock);
|
|
__release(lock);
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Define the various spin_lock methods. Note we define these
|
|
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
|
|
* various methods are defined as nops in the case they are not
|
|
* required.
|
|
*/
|
|
#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
|
|
|
|
#define raw_spin_lock(lock) _raw_spin_lock(lock)
|
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
# define raw_spin_lock_nested(lock, subclass) \
|
|
_raw_spin_lock_nested(lock, subclass)
|
|
|
|
# define raw_spin_lock_nest_lock(lock, nest_lock) \
|
|
do { \
|
|
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
|
|
_raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
|
|
} while (0)
|
|
#else
|
|
# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
|
|
# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
|
|
#endif
|
|
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
|
|
#define raw_spin_lock_irqsave(lock, flags) \
|
|
do { \
|
|
typecheck(unsigned long, flags); \
|
|
flags = _raw_spin_lock_irqsave(lock); \
|
|
} while (0)
|
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
|
|
do { \
|
|
typecheck(unsigned long, flags); \
|
|
flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
|
|
} while (0)
|
|
#else
|
|
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
|
|
do { \
|
|
typecheck(unsigned long, flags); \
|
|
flags = _raw_spin_lock_irqsave(lock); \
|
|
} while (0)
|
|
#endif
|
|
|
|
#else
|
|
|
|
#define raw_spin_lock_irqsave(lock, flags) \
|
|
do { \
|
|
typecheck(unsigned long, flags); \
|
|
_raw_spin_lock_irqsave(lock, flags); \
|
|
} while (0)
|
|
|
|
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
|
|
raw_spin_lock_irqsave(lock, flags)
|
|
|
|
#endif
|
|
|
|
#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
|
|
#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
|
|
#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
|
|
#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
|
|
|
|
#define raw_spin_unlock_irqrestore(lock, flags) \
|
|
do { \
|
|
typecheck(unsigned long, flags); \
|
|
_raw_spin_unlock_irqrestore(lock, flags); \
|
|
} while (0)
|
|
#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
|
|
|
|
#define raw_spin_trylock_bh(lock) \
|
|
__cond_lock(lock, _raw_spin_trylock_bh(lock))
|
|
|
|
#define raw_spin_trylock_irq(lock) \
|
|
({ \
|
|
local_irq_disable(); \
|
|
raw_spin_trylock(lock) ? \
|
|
1 : ({ local_irq_enable(); 0; }); \
|
|
})
|
|
|
|
#define raw_spin_trylock_irqsave(lock, flags) \
|
|
({ \
|
|
local_irq_save(flags); \
|
|
raw_spin_trylock(lock) ? \
|
|
1 : ({ local_irq_restore(flags); 0; }); \
|
|
})
|
|
|
|
/**
|
|
* raw_spin_can_lock - would raw_spin_trylock() succeed?
|
|
* @lock: the spinlock in question.
|
|
*/
|
|
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
|
|
|
|
/* Include rwlock functions */
|
|
#include <linux/rwlock.h>
|
|
|
|
/*
|
|
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
|
|
*/
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
# include <linux/spinlock_api_smp.h>
|
|
#else
|
|
# include <linux/spinlock_api_up.h>
|
|
#endif
|
|
|
|
/*
|
|
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
|
|
*/
|
|
|
|
static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
|
|
{
|
|
return &lock->rlock;
|
|
}
|
|
|
|
#define spin_lock_init(_lock) \
|
|
do { \
|
|
spinlock_check(_lock); \
|
|
raw_spin_lock_init(&(_lock)->rlock); \
|
|
} while (0)
|
|
|
|
static inline void spin_lock(spinlock_t *lock)
|
|
{
|
|
raw_spin_lock(&lock->rlock);
|
|
}
|
|
|
|
static inline void spin_lock_bh(spinlock_t *lock)
|
|
{
|
|
raw_spin_lock_bh(&lock->rlock);
|
|
}
|
|
|
|
static inline int spin_trylock(spinlock_t *lock)
|
|
{
|
|
return raw_spin_trylock(&lock->rlock);
|
|
}
|
|
|
|
#define spin_lock_nested(lock, subclass) \
|
|
do { \
|
|
raw_spin_lock_nested(spinlock_check(lock), subclass); \
|
|
} while (0)
|
|
|
|
#define spin_lock_nest_lock(lock, nest_lock) \
|
|
do { \
|
|
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
|
|
} while (0)
|
|
|
|
static inline void spin_lock_irq(spinlock_t *lock)
|
|
{
|
|
raw_spin_lock_irq(&lock->rlock);
|
|
}
|
|
|
|
#define spin_lock_irqsave(lock, flags) \
|
|
do { \
|
|
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
|
|
} while (0)
|
|
|
|
#define spin_lock_irqsave_nested(lock, flags, subclass) \
|
|
do { \
|
|
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
|
|
} while (0)
|
|
|
|
static inline void spin_unlock(spinlock_t *lock)
|
|
{
|
|
raw_spin_unlock(&lock->rlock);
|
|
}
|
|
|
|
static inline void spin_unlock_bh(spinlock_t *lock)
|
|
{
|
|
raw_spin_unlock_bh(&lock->rlock);
|
|
}
|
|
|
|
static inline void spin_unlock_irq(spinlock_t *lock)
|
|
{
|
|
raw_spin_unlock_irq(&lock->rlock);
|
|
}
|
|
|
|
static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
|
|
{
|
|
raw_spin_unlock_irqrestore(&lock->rlock, flags);
|
|
}
|
|
|
|
static inline int spin_trylock_bh(spinlock_t *lock)
|
|
{
|
|
return raw_spin_trylock_bh(&lock->rlock);
|
|
}
|
|
|
|
static inline int spin_trylock_irq(spinlock_t *lock)
|
|
{
|
|
return raw_spin_trylock_irq(&lock->rlock);
|
|
}
|
|
|
|
#define spin_trylock_irqsave(lock, flags) \
|
|
({ \
|
|
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
|
|
})
|
|
|
|
static inline void spin_unlock_wait(spinlock_t *lock)
|
|
{
|
|
raw_spin_unlock_wait(&lock->rlock);
|
|
}
|
|
|
|
static inline int spin_is_locked(spinlock_t *lock)
|
|
{
|
|
return raw_spin_is_locked(&lock->rlock);
|
|
}
|
|
|
|
static inline int spin_is_contended(spinlock_t *lock)
|
|
{
|
|
return raw_spin_is_contended(&lock->rlock);
|
|
}
|
|
|
|
static inline int spin_can_lock(spinlock_t *lock)
|
|
{
|
|
return raw_spin_can_lock(&lock->rlock);
|
|
}
|
|
|
|
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
|
|
|
|
/*
|
|
* Pull the atomic_t declaration:
|
|
* (asm-mips/atomic.h needs above definitions)
|
|
*/
|
|
#include <linux/atomic.h>
|
|
/**
|
|
* atomic_dec_and_lock - lock on reaching reference count zero
|
|
* @atomic: the atomic counter
|
|
* @lock: the spinlock in question
|
|
*
|
|
* Decrements @atomic by 1. If the result is 0, returns true and locks
|
|
* @lock. Returns false for all other cases.
|
|
*/
|
|
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
|
|
#define atomic_dec_and_lock(atomic, lock) \
|
|
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
|
|
|
|
#endif /* __LINUX_SPINLOCK_H */
|