mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-07 02:56:45 +07:00
a436ed9c51
i386: Rearrange the cmpxchg code to allow atomic.h to get it without needing to include system.h. This kills warnings in the UML build from atomic.h about implicit declarations of cmpxchg symbols. The i386 build presumably isn't seeing this because a separate inclusion of system.h is covering it over. The cmpxchg stuff is moved to asm-i386/cmpxchg.h, with an include left in system.h for the benefit of generic code which expects cmpxchg there. Meanwhile, atomic.h includes cmpxchg.h. This causes no noticable damage to the i386 build. x86_64: Move cmpxchg into its own header. atomic.h already included system.h, so this is changed to include cmpxchg.h. This is purely cleanup - it's not fixing any warnings - so if the x86_64 system.h isn't considered as cleanup-worthy as i386, then this can be dropped. It causes no noticable damage to the x86_64 build. uml: The i386 and x86_64 cmpxchg patches require an asm-um/cmpxchg.h for the UML build. Signed-off-by: Jeff Dike <jdike@linux.intel.com> Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
467 lines
10 KiB
C
467 lines
10 KiB
C
#ifndef __ARCH_X86_64_ATOMIC__
|
|
#define __ARCH_X86_64_ATOMIC__
|
|
|
|
#include <asm/alternative.h>
|
|
#include <asm/cmpxchg.h>
|
|
|
|
/* atomic_t should be 32 bit signed type */
|
|
|
|
/*
|
|
* Atomic operations that C can't guarantee us. Useful for
|
|
* resource counting etc..
|
|
*/
|
|
|
|
#ifdef CONFIG_SMP
|
|
#define LOCK "lock ; "
|
|
#else
|
|
#define LOCK ""
|
|
#endif
|
|
|
|
/*
|
|
* Make sure gcc doesn't try to be clever and move things around
|
|
* on us. We need to use _exactly_ the address the user gave us,
|
|
* not some alias that contains the same information.
|
|
*/
|
|
typedef struct { int counter; } atomic_t;
|
|
|
|
#define ATOMIC_INIT(i) { (i) }
|
|
|
|
/**
|
|
* atomic_read - read atomic variable
|
|
* @v: pointer of type atomic_t
|
|
*
|
|
* Atomically reads the value of @v.
|
|
*/
|
|
#define atomic_read(v) ((v)->counter)
|
|
|
|
/**
|
|
* atomic_set - set atomic variable
|
|
* @v: pointer of type atomic_t
|
|
* @i: required value
|
|
*
|
|
* Atomically sets the value of @v to @i.
|
|
*/
|
|
#define atomic_set(v,i) (((v)->counter) = (i))
|
|
|
|
/**
|
|
* atomic_add - add integer to atomic variable
|
|
* @i: integer value to add
|
|
* @v: pointer of type atomic_t
|
|
*
|
|
* Atomically adds @i to @v.
|
|
*/
|
|
static __inline__ void atomic_add(int i, atomic_t *v)
|
|
{
|
|
__asm__ __volatile__(
|
|
LOCK_PREFIX "addl %1,%0"
|
|
:"=m" (v->counter)
|
|
:"ir" (i), "m" (v->counter));
|
|
}
|
|
|
|
/**
|
|
* atomic_sub - subtract the atomic variable
|
|
* @i: integer value to subtract
|
|
* @v: pointer of type atomic_t
|
|
*
|
|
* Atomically subtracts @i from @v.
|
|
*/
|
|
static __inline__ void atomic_sub(int i, atomic_t *v)
|
|
{
|
|
__asm__ __volatile__(
|
|
LOCK_PREFIX "subl %1,%0"
|
|
:"=m" (v->counter)
|
|
:"ir" (i), "m" (v->counter));
|
|
}
|
|
|
|
/**
|
|
* atomic_sub_and_test - subtract value from variable and test result
|
|
* @i: integer value to subtract
|
|
* @v: pointer of type atomic_t
|
|
*
|
|
* Atomically subtracts @i from @v and returns
|
|
* true if the result is zero, or false for all
|
|
* other cases.
|
|
*/
|
|
static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
|
|
{
|
|
unsigned char c;
|
|
|
|
__asm__ __volatile__(
|
|
LOCK_PREFIX "subl %2,%0; sete %1"
|
|
:"=m" (v->counter), "=qm" (c)
|
|
:"ir" (i), "m" (v->counter) : "memory");
|
|
return c;
|
|
}
|
|
|
|
/**
|
|
* atomic_inc - increment atomic variable
|
|
* @v: pointer of type atomic_t
|
|
*
|
|
* Atomically increments @v by 1.
|
|
*/
|
|
static __inline__ void atomic_inc(atomic_t *v)
|
|
{
|
|
__asm__ __volatile__(
|
|
LOCK_PREFIX "incl %0"
|
|
:"=m" (v->counter)
|
|
:"m" (v->counter));
|
|
}
|
|
|
|
/**
|
|
* atomic_dec - decrement atomic variable
|
|
* @v: pointer of type atomic_t
|
|
*
|
|
* Atomically decrements @v by 1.
|
|
*/
|
|
static __inline__ void atomic_dec(atomic_t *v)
|
|
{
|
|
__asm__ __volatile__(
|
|
LOCK_PREFIX "decl %0"
|
|
:"=m" (v->counter)
|
|
:"m" (v->counter));
|
|
}
|
|
|
|
/**
|
|
* atomic_dec_and_test - decrement and test
|
|
* @v: pointer of type atomic_t
|
|
*
|
|
* Atomically decrements @v by 1 and
|
|
* returns true if the result is 0, or false for all other
|
|
* cases.
|
|
*/
|
|
static __inline__ int atomic_dec_and_test(atomic_t *v)
|
|
{
|
|
unsigned char c;
|
|
|
|
__asm__ __volatile__(
|
|
LOCK_PREFIX "decl %0; sete %1"
|
|
:"=m" (v->counter), "=qm" (c)
|
|
:"m" (v->counter) : "memory");
|
|
return c != 0;
|
|
}
|
|
|
|
/**
|
|
* atomic_inc_and_test - increment and test
|
|
* @v: pointer of type atomic_t
|
|
*
|
|
* Atomically increments @v by 1
|
|
* and returns true if the result is zero, or false for all
|
|
* other cases.
|
|
*/
|
|
static __inline__ int atomic_inc_and_test(atomic_t *v)
|
|
{
|
|
unsigned char c;
|
|
|
|
__asm__ __volatile__(
|
|
LOCK_PREFIX "incl %0; sete %1"
|
|
:"=m" (v->counter), "=qm" (c)
|
|
:"m" (v->counter) : "memory");
|
|
return c != 0;
|
|
}
|
|
|
|
/**
|
|
* atomic_add_negative - add and test if negative
|
|
* @i: integer value to add
|
|
* @v: pointer of type atomic_t
|
|
*
|
|
* Atomically adds @i to @v and returns true
|
|
* if the result is negative, or false when
|
|
* result is greater than or equal to zero.
|
|
*/
|
|
static __inline__ int atomic_add_negative(int i, atomic_t *v)
|
|
{
|
|
unsigned char c;
|
|
|
|
__asm__ __volatile__(
|
|
LOCK_PREFIX "addl %2,%0; sets %1"
|
|
:"=m" (v->counter), "=qm" (c)
|
|
:"ir" (i), "m" (v->counter) : "memory");
|
|
return c;
|
|
}
|
|
|
|
/**
|
|
* atomic_add_return - add and return
|
|
* @i: integer value to add
|
|
* @v: pointer of type atomic_t
|
|
*
|
|
* Atomically adds @i to @v and returns @i + @v
|
|
*/
|
|
static __inline__ int atomic_add_return(int i, atomic_t *v)
|
|
{
|
|
int __i = i;
|
|
__asm__ __volatile__(
|
|
LOCK_PREFIX "xaddl %0, %1"
|
|
:"+r" (i), "+m" (v->counter)
|
|
: : "memory");
|
|
return i + __i;
|
|
}
|
|
|
|
static __inline__ int atomic_sub_return(int i, atomic_t *v)
|
|
{
|
|
return atomic_add_return(-i,v);
|
|
}
|
|
|
|
#define atomic_inc_return(v) (atomic_add_return(1,v))
|
|
#define atomic_dec_return(v) (atomic_sub_return(1,v))
|
|
|
|
/* An 64bit atomic type */
|
|
|
|
typedef struct { volatile long counter; } atomic64_t;
|
|
|
|
#define ATOMIC64_INIT(i) { (i) }
|
|
|
|
/**
|
|
* atomic64_read - read atomic64 variable
|
|
* @v: pointer of type atomic64_t
|
|
*
|
|
* Atomically reads the value of @v.
|
|
* Doesn't imply a read memory barrier.
|
|
*/
|
|
#define atomic64_read(v) ((v)->counter)
|
|
|
|
/**
|
|
* atomic64_set - set atomic64 variable
|
|
* @v: pointer to type atomic64_t
|
|
* @i: required value
|
|
*
|
|
* Atomically sets the value of @v to @i.
|
|
*/
|
|
#define atomic64_set(v,i) (((v)->counter) = (i))
|
|
|
|
/**
|
|
* atomic64_add - add integer to atomic64 variable
|
|
* @i: integer value to add
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically adds @i to @v.
|
|
*/
|
|
static __inline__ void atomic64_add(long i, atomic64_t *v)
|
|
{
|
|
__asm__ __volatile__(
|
|
LOCK_PREFIX "addq %1,%0"
|
|
:"=m" (v->counter)
|
|
:"ir" (i), "m" (v->counter));
|
|
}
|
|
|
|
/**
|
|
* atomic64_sub - subtract the atomic64 variable
|
|
* @i: integer value to subtract
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically subtracts @i from @v.
|
|
*/
|
|
static __inline__ void atomic64_sub(long i, atomic64_t *v)
|
|
{
|
|
__asm__ __volatile__(
|
|
LOCK_PREFIX "subq %1,%0"
|
|
:"=m" (v->counter)
|
|
:"ir" (i), "m" (v->counter));
|
|
}
|
|
|
|
/**
|
|
* atomic64_sub_and_test - subtract value from variable and test result
|
|
* @i: integer value to subtract
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically subtracts @i from @v and returns
|
|
* true if the result is zero, or false for all
|
|
* other cases.
|
|
*/
|
|
static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v)
|
|
{
|
|
unsigned char c;
|
|
|
|
__asm__ __volatile__(
|
|
LOCK_PREFIX "subq %2,%0; sete %1"
|
|
:"=m" (v->counter), "=qm" (c)
|
|
:"ir" (i), "m" (v->counter) : "memory");
|
|
return c;
|
|
}
|
|
|
|
/**
|
|
* atomic64_inc - increment atomic64 variable
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically increments @v by 1.
|
|
*/
|
|
static __inline__ void atomic64_inc(atomic64_t *v)
|
|
{
|
|
__asm__ __volatile__(
|
|
LOCK_PREFIX "incq %0"
|
|
:"=m" (v->counter)
|
|
:"m" (v->counter));
|
|
}
|
|
|
|
/**
|
|
* atomic64_dec - decrement atomic64 variable
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically decrements @v by 1.
|
|
*/
|
|
static __inline__ void atomic64_dec(atomic64_t *v)
|
|
{
|
|
__asm__ __volatile__(
|
|
LOCK_PREFIX "decq %0"
|
|
:"=m" (v->counter)
|
|
:"m" (v->counter));
|
|
}
|
|
|
|
/**
|
|
* atomic64_dec_and_test - decrement and test
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically decrements @v by 1 and
|
|
* returns true if the result is 0, or false for all other
|
|
* cases.
|
|
*/
|
|
static __inline__ int atomic64_dec_and_test(atomic64_t *v)
|
|
{
|
|
unsigned char c;
|
|
|
|
__asm__ __volatile__(
|
|
LOCK_PREFIX "decq %0; sete %1"
|
|
:"=m" (v->counter), "=qm" (c)
|
|
:"m" (v->counter) : "memory");
|
|
return c != 0;
|
|
}
|
|
|
|
/**
|
|
* atomic64_inc_and_test - increment and test
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically increments @v by 1
|
|
* and returns true if the result is zero, or false for all
|
|
* other cases.
|
|
*/
|
|
static __inline__ int atomic64_inc_and_test(atomic64_t *v)
|
|
{
|
|
unsigned char c;
|
|
|
|
__asm__ __volatile__(
|
|
LOCK_PREFIX "incq %0; sete %1"
|
|
:"=m" (v->counter), "=qm" (c)
|
|
:"m" (v->counter) : "memory");
|
|
return c != 0;
|
|
}
|
|
|
|
/**
|
|
* atomic64_add_negative - add and test if negative
|
|
* @i: integer value to add
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically adds @i to @v and returns true
|
|
* if the result is negative, or false when
|
|
* result is greater than or equal to zero.
|
|
*/
|
|
static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
|
|
{
|
|
unsigned char c;
|
|
|
|
__asm__ __volatile__(
|
|
LOCK_PREFIX "addq %2,%0; sets %1"
|
|
:"=m" (v->counter), "=qm" (c)
|
|
:"ir" (i), "m" (v->counter) : "memory");
|
|
return c;
|
|
}
|
|
|
|
/**
|
|
* atomic64_add_return - add and return
|
|
* @i: integer value to add
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
* Atomically adds @i to @v and returns @i + @v
|
|
*/
|
|
static __inline__ long atomic64_add_return(long i, atomic64_t *v)
|
|
{
|
|
long __i = i;
|
|
__asm__ __volatile__(
|
|
LOCK_PREFIX "xaddq %0, %1;"
|
|
:"+r" (i), "+m" (v->counter)
|
|
: : "memory");
|
|
return i + __i;
|
|
}
|
|
|
|
static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
|
|
{
|
|
return atomic64_add_return(-i,v);
|
|
}
|
|
|
|
#define atomic64_inc_return(v) (atomic64_add_return(1,v))
|
|
#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
|
|
|
|
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
|
|
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
|
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
|
|
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
|
/**
|
|
* atomic_add_unless - add unless the number is a given value
|
|
* @v: pointer of type atomic_t
|
|
* @a: the amount to add to v...
|
|
* @u: ...unless v is equal to u.
|
|
*
|
|
* Atomically adds @a to @v, so long as it was not @u.
|
|
* Returns non-zero if @v was not @u, and zero otherwise.
|
|
*/
|
|
static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
int c, old;
|
|
c = atomic_read(v);
|
|
for (;;) {
|
|
if (unlikely(c == (u)))
|
|
break;
|
|
old = atomic_cmpxchg((v), c, c + (a));
|
|
if (likely(old == c))
|
|
break;
|
|
c = old;
|
|
}
|
|
return c != (u);
|
|
}
|
|
|
|
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
|
|
|
/**
|
|
* atomic64_add_unless - add unless the number is a given value
|
|
* @v: pointer of type atomic64_t
|
|
* @a: the amount to add to v...
|
|
* @u: ...unless v is equal to u.
|
|
*
|
|
* Atomically adds @a to @v, so long as it was not @u.
|
|
* Returns non-zero if @v was not @u, and zero otherwise.
|
|
*/
|
|
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
|
|
{
|
|
long c, old;
|
|
c = atomic64_read(v);
|
|
for (;;) {
|
|
if (unlikely(c == (u)))
|
|
break;
|
|
old = atomic64_cmpxchg((v), c, c + (a));
|
|
if (likely(old == c))
|
|
break;
|
|
c = old;
|
|
}
|
|
return c != (u);
|
|
}
|
|
|
|
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
|
|
|
/* These are x86-specific, used by some header files */
|
|
#define atomic_clear_mask(mask, addr) \
|
|
__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
|
|
: : "r" (~(mask)),"m" (*addr) : "memory")
|
|
|
|
#define atomic_set_mask(mask, addr) \
|
|
__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
|
|
: : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
|
|
|
|
/* Atomic operations are already serializing on x86 */
|
|
#define smp_mb__before_atomic_dec() barrier()
|
|
#define smp_mb__after_atomic_dec() barrier()
|
|
#define smp_mb__before_atomic_inc() barrier()
|
|
#define smp_mb__after_atomic_inc() barrier()
|
|
|
|
#include <asm-generic/atomic.h>
|
|
#endif
|