mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-05 16:26:39 +07:00
2856f5e31c
atomic_add_unless as inline. Remove system.h atomic.h circular dependency. I agree (with Andi Kleen) this typeof is not needed and more error prone. All the original atomic.h code that uses cmpxchg (which includes the atomic_add_unless) uses defines instead of inline functions, probably to circumvent a circular dependency between system.h and atomic.h on powerpc (which my patch addresses). Therefore, it makes sense to use inline functions that will provide type checking. atomic_add_unless as inline. Remove system.h atomic.h circular dependency. Digging into the FRV architecture shows me that it is also affected by such a circular dependency. Here is the diff applying this against the rest of my atomic.h patches. It applies over the atomic.h standardization patches. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
259 lines
5.1 KiB
C
259 lines
5.1 KiB
C
#ifndef _ASM_GENERIC_ATOMIC_H
|
|
#define _ASM_GENERIC_ATOMIC_H
|
|
/*
|
|
* Copyright (C) 2005 Silicon Graphics, Inc.
|
|
* Christoph Lameter <clameter@sgi.com>
|
|
*
|
|
* Allows to provide arch independent atomic definitions without the need to
|
|
* edit all arch specific atomic.h files.
|
|
*/
|
|
|
|
#include <asm/types.h>
|
|
|
|
/*
|
|
* Suppport for atomic_long_t
|
|
*
|
|
* Casts for parameters are avoided for existing atomic functions in order to
|
|
* avoid issues with cast-as-lval under gcc 4.x and other limitations that the
|
|
* macros of a platform may have.
|
|
*/
|
|
|
|
#if BITS_PER_LONG == 64
|
|
|
|
typedef atomic64_t atomic_long_t;
|
|
|
|
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
|
|
|
|
static inline long atomic_long_read(atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
|
|
return (long)atomic64_read(v);
|
|
}
|
|
|
|
static inline void atomic_long_set(atomic_long_t *l, long i)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
|
|
atomic64_set(v, i);
|
|
}
|
|
|
|
static inline void atomic_long_inc(atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
|
|
atomic64_inc(v);
|
|
}
|
|
|
|
static inline void atomic_long_dec(atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
|
|
atomic64_dec(v);
|
|
}
|
|
|
|
static inline void atomic_long_add(long i, atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
|
|
atomic64_add(i, v);
|
|
}
|
|
|
|
static inline void atomic_long_sub(long i, atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
|
|
atomic64_sub(i, v);
|
|
}
|
|
|
|
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
|
|
return atomic64_sub_and_test(i, v);
|
|
}
|
|
|
|
static inline int atomic_long_dec_and_test(atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
|
|
return atomic64_dec_and_test(v);
|
|
}
|
|
|
|
static inline int atomic_long_inc_and_test(atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
|
|
return atomic64_inc_and_test(v);
|
|
}
|
|
|
|
static inline int atomic_long_add_negative(long i, atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
|
|
return atomic64_add_negative(i, v);
|
|
}
|
|
|
|
static inline long atomic_long_add_return(long i, atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
|
|
return (long)atomic64_add_return(i, v);
|
|
}
|
|
|
|
static inline long atomic_long_sub_return(long i, atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
|
|
return (long)atomic64_sub_return(i, v);
|
|
}
|
|
|
|
static inline long atomic_long_inc_return(atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
|
|
return (long)atomic64_inc_return(v);
|
|
}
|
|
|
|
static inline long atomic_long_dec_return(atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
|
|
return (long)atomic64_dec_return(v);
|
|
}
|
|
|
|
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
|
|
return (long)atomic64_add_unless(v, a, u);
|
|
}
|
|
|
|
#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
|
|
|
|
#define atomic_long_cmpxchg(l, old, new) \
|
|
(atomic_cmpxchg((atomic64_t *)(l), (old), (new)))
|
|
#define atomic_long_xchg(v, new) \
|
|
(atomic_xchg((atomic64_t *)(l), (new)))
|
|
|
|
#else /* BITS_PER_LONG == 64 */
|
|
|
|
typedef atomic_t atomic_long_t;
|
|
|
|
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
|
|
static inline long atomic_long_read(atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
|
|
return (long)atomic_read(v);
|
|
}
|
|
|
|
static inline void atomic_long_set(atomic_long_t *l, long i)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
|
|
atomic_set(v, i);
|
|
}
|
|
|
|
static inline void atomic_long_inc(atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
|
|
atomic_inc(v);
|
|
}
|
|
|
|
static inline void atomic_long_dec(atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
|
|
atomic_dec(v);
|
|
}
|
|
|
|
static inline void atomic_long_add(long i, atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
|
|
atomic_add(i, v);
|
|
}
|
|
|
|
static inline void atomic_long_sub(long i, atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
|
|
atomic_sub(i, v);
|
|
}
|
|
|
|
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
|
|
return atomic_sub_and_test(i, v);
|
|
}
|
|
|
|
static inline int atomic_long_dec_and_test(atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
|
|
return atomic_dec_and_test(v);
|
|
}
|
|
|
|
static inline int atomic_long_inc_and_test(atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
|
|
return atomic_inc_and_test(v);
|
|
}
|
|
|
|
static inline int atomic_long_add_negative(long i, atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
|
|
return atomic_add_negative(i, v);
|
|
}
|
|
|
|
static inline long atomic_long_add_return(long i, atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
|
|
return (long)atomic_add_return(i, v);
|
|
}
|
|
|
|
static inline long atomic_long_sub_return(long i, atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
|
|
return (long)atomic_sub_return(i, v);
|
|
}
|
|
|
|
static inline long atomic_long_inc_return(atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
|
|
return (long)atomic_inc_return(v);
|
|
}
|
|
|
|
static inline long atomic_long_dec_return(atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
|
|
return (long)atomic_dec_return(v);
|
|
}
|
|
|
|
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
|
|
return (long)atomic_add_unless(v, a, u);
|
|
}
|
|
|
|
#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
|
|
|
|
#define atomic_long_cmpxchg(l, old, new) \
|
|
(atomic_cmpxchg((atomic_t *)(l), (old), (new)))
|
|
#define atomic_long_xchg(v, new) \
|
|
(atomic_xchg((atomic_t *)(l), (new)))
|
|
|
|
#endif /* BITS_PER_LONG == 64 */
|
|
|
|
#endif /* _ASM_GENERIC_ATOMIC_H */
|