mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 07:30:53 +07:00
360f54796e
We can be more aggressive about this, if we are clever and careful. This is subtle. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
52 lines
1.4 KiB
C
52 lines
1.4 KiB
C
#ifndef __LINUX_LOCKREF_H
|
|
#define __LINUX_LOCKREF_H
|
|
|
|
/*
|
|
* Locked reference counts.
|
|
*
|
|
* These are different from just plain atomic refcounts in that they
|
|
* are atomic with respect to the spinlock that goes with them. In
|
|
* particular, there can be implementations that don't actually get
|
|
* the spinlock for the common decrement/increment operations, but they
|
|
* still have to check that the operation is done semantically as if
|
|
* the spinlock had been taken (using a cmpxchg operation that covers
|
|
* both the lock and the count word, or using memory transactions, for
|
|
* example).
|
|
*/
|
|
|
|
#include <linux/spinlock.h>
|
|
#include <generated/bounds.h>
|
|
|
|
#define USE_CMPXCHG_LOCKREF \
|
|
(IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
|
|
IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
|
|
|
|
struct lockref {
|
|
union {
|
|
#if USE_CMPXCHG_LOCKREF
|
|
aligned_u64 lock_count;
|
|
#endif
|
|
struct {
|
|
spinlock_t lock;
|
|
int count;
|
|
};
|
|
};
|
|
};
|
|
|
|
extern void lockref_get(struct lockref *);
|
|
extern int lockref_put_return(struct lockref *);
|
|
extern int lockref_get_not_zero(struct lockref *);
|
|
extern int lockref_get_or_lock(struct lockref *);
|
|
extern int lockref_put_or_lock(struct lockref *);
|
|
|
|
extern void lockref_mark_dead(struct lockref *);
|
|
extern int lockref_get_not_dead(struct lockref *);
|
|
|
|
/* Must be called under spinlock for reliable results */
|
|
static inline int __lockref_is_dead(const struct lockref *l)
|
|
{
|
|
return ((int)l->count < 0);
|
|
}
|
|
|
|
#endif /* __LINUX_LOCKREF_H */
|