linux_dsm_epyc7002/arch/x86/include/asm/atomic64_32.h
Mark Rutland 79c53a83d7 locking/atomic, x86: Use s64 for atomic64
As a step towards making the atomic64 API use consistent types treewide,
let's have the x86 atomic64 implementation use s64 as the underlying
type for atomic64_t, rather than long or long long, matching the
generated headers.

Note that the x86 arch_atomic64 implementation is already wrapped by the
generic instrumented atomic64 implementation, which uses s64
consistently.

Otherwise, there should be no functional change as a result of this
patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: aou@eecs.berkeley.edu
Cc: arnd@arndb.de
Cc: catalin.marinas@arm.com
Cc: davem@davemloft.net
Cc: fenghua.yu@intel.com
Cc: heiko.carstens@de.ibm.com
Cc: herbert@gondor.apana.org.au
Cc: ink@jurassic.park.msu.ru
Cc: jhogan@kernel.org
Cc: mattst88@gmail.com
Cc: mpe@ellerman.id.au
Cc: palmer@sifive.com
Cc: paul.burton@mips.com
Cc: paulus@samba.org
Cc: ralf@linux-mips.org
Cc: rth@twiddle.net
Cc: tony.luck@intel.com
Cc: vgupta@synopsys.com
Link: https://lkml.kernel.org/r/20190522132250.26499-16-mark.rutland@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-06-03 12:32:57 +02:00

334 lines
7.7 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_ATOMIC64_32_H
#define _ASM_X86_ATOMIC64_32_H
#include <linux/compiler.h>
#include <linux/types.h>
//#include <asm/cmpxchg.h>
/* An 64bit atomic type */
typedef struct {
s64 __aligned(8) counter;
} atomic64_t;
#define ATOMIC64_INIT(val) { (val) }
#define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
#ifndef ATOMIC64_EXPORT
#define ATOMIC64_DECL_ONE __ATOMIC64_DECL
#else
#define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \
ATOMIC64_EXPORT(atomic64_##sym)
#endif
#ifdef CONFIG_X86_CMPXCHG64
#define __alternative_atomic64(f, g, out, in...) \
asm volatile("call %P[func]" \
: out : [func] "i" (atomic64_##g##_cx8), ## in)
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
#else
#define __alternative_atomic64(f, g, out, in...) \
alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
ATOMIC64_DECL_ONE(sym##_386)
ATOMIC64_DECL_ONE(add_386);
ATOMIC64_DECL_ONE(sub_386);
ATOMIC64_DECL_ONE(inc_386);
ATOMIC64_DECL_ONE(dec_386);
#endif
#define alternative_atomic64(f, out, in...) \
__alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
ATOMIC64_DECL(read);
ATOMIC64_DECL(set);
ATOMIC64_DECL(xchg);
ATOMIC64_DECL(add_return);
ATOMIC64_DECL(sub_return);
ATOMIC64_DECL(inc_return);
ATOMIC64_DECL(dec_return);
ATOMIC64_DECL(dec_if_positive);
ATOMIC64_DECL(inc_not_zero);
ATOMIC64_DECL(add_unless);
#undef ATOMIC64_DECL
#undef ATOMIC64_DECL_ONE
#undef __ATOMIC64_DECL
#undef ATOMIC64_EXPORT
/**
* arch_atomic64_cmpxchg - cmpxchg atomic64 variable
* @v: pointer to type atomic64_t
* @o: expected value
* @n: new value
*
* Atomically sets @v to @n if it was equal to @o and returns
* the old value.
*/
static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
return arch_cmpxchg64(&v->counter, o, n);
}
/**
* arch_atomic64_xchg - xchg atomic64 variable
* @v: pointer to type atomic64_t
* @n: value to assign
*
* Atomically xchgs the value of @v to @n and returns
* the old value.
*/
static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
{
s64 o;
unsigned high = (unsigned)(n >> 32);
unsigned low = (unsigned)n;
alternative_atomic64(xchg, "=&A" (o),
"S" (v), "b" (low), "c" (high)
: "memory");
return o;
}
/**
* arch_atomic64_set - set atomic64 variable
* @v: pointer to type atomic64_t
* @i: value to assign
*
* Atomically sets the value of @v to @n.
*/
static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
unsigned high = (unsigned)(i >> 32);
unsigned low = (unsigned)i;
alternative_atomic64(set, /* no output */,
"S" (v), "b" (low), "c" (high)
: "eax", "edx", "memory");
}
/**
* arch_atomic64_read - read atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically reads the value of @v and returns it.
*/
static inline s64 arch_atomic64_read(const atomic64_t *v)
{
s64 r;
alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
return r;
}
/**
* arch_atomic64_add_return - add and return
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v and returns @i + *@v
*/
static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
{
alternative_atomic64(add_return,
ASM_OUTPUT2("+A" (i), "+c" (v)),
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
/*
* Other variants with different arithmetic operators:
*/
static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
{
alternative_atomic64(sub_return,
ASM_OUTPUT2("+A" (i), "+c" (v)),
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
static inline s64 arch_atomic64_inc_return(atomic64_t *v)
{
s64 a;
alternative_atomic64(inc_return, "=&A" (a),
"S" (v) : "memory", "ecx");
return a;
}
#define arch_atomic64_inc_return arch_atomic64_inc_return
static inline s64 arch_atomic64_dec_return(atomic64_t *v)
{
s64 a;
alternative_atomic64(dec_return, "=&A" (a),
"S" (v) : "memory", "ecx");
return a;
}
#define arch_atomic64_dec_return arch_atomic64_dec_return
/**
* arch_atomic64_add - add integer to atomic64 variable
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v.
*/
static inline s64 arch_atomic64_add(s64 i, atomic64_t *v)
{
__alternative_atomic64(add, add_return,
ASM_OUTPUT2("+A" (i), "+c" (v)),
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
/**
* arch_atomic64_sub - subtract the atomic64 variable
* @i: integer value to subtract
* @v: pointer to type atomic64_t
*
* Atomically subtracts @i from @v.
*/
static inline s64 arch_atomic64_sub(s64 i, atomic64_t *v)
{
__alternative_atomic64(sub, sub_return,
ASM_OUTPUT2("+A" (i), "+c" (v)),
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
/**
* arch_atomic64_inc - increment atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically increments @v by 1.
*/
static inline void arch_atomic64_inc(atomic64_t *v)
{
__alternative_atomic64(inc, inc_return, /* no output */,
"S" (v) : "memory", "eax", "ecx", "edx");
}
#define arch_atomic64_inc arch_atomic64_inc
/**
* arch_atomic64_dec - decrement atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically decrements @v by 1.
*/
static inline void arch_atomic64_dec(atomic64_t *v)
{
__alternative_atomic64(dec, dec_return, /* no output */,
"S" (v) : "memory", "eax", "ecx", "edx");
}
#define arch_atomic64_dec arch_atomic64_dec
/**
* arch_atomic64_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if the add was done, zero otherwise.
*/
static inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned low = (unsigned)u;
unsigned high = (unsigned)(u >> 32);
alternative_atomic64(add_unless,
ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)),
"S" (v) : "memory");
return (int)a;
}
static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
{
int r;
alternative_atomic64(inc_not_zero, "=&a" (r),
"S" (v) : "ecx", "edx", "memory");
return r;
}
#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 r;
alternative_atomic64(dec_if_positive, "=&A" (r),
"S" (v) : "ecx", "memory");
return r;
}
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#undef alternative_atomic64
#undef __alternative_atomic64
static inline void arch_atomic64_and(s64 i, atomic64_t *v)
{
s64 old, c = 0;
while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c)
c = old;
}
static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
{
s64 old, c = 0;
while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c)
c = old;
return old;
}
static inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
s64 old, c = 0;
while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c)
c = old;
}
static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
{
s64 old, c = 0;
while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c)
c = old;
return old;
}
static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
s64 old, c = 0;
while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c)
c = old;
}
static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
{
s64 old, c = 0;
while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c)
c = old;
return old;
}
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
s64 old, c = 0;
while ((old = arch_atomic64_cmpxchg(v, c, c + i)) != c)
c = old;
return old;
}
#define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v))
#endif /* _ASM_X86_ATOMIC64_32_H */