mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-26 00:40:55 +07:00
sh: Fix sh4a llsc-based cmpxchg()
This fixes up a typo in the ll/sc based cmpxchg code which apparently wasn't getting a lot of testing due to the swapped old/new pair. With that fixed up, the ll/sc code also starts using it and provides its own atomic_add_unless(). Signed-off-by: Aoi Shinkai <shinkoi2005@gmail.com> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
f168dd00a9
commit
4c7c997886
@ -104,4 +104,31 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
|
||||
: "t");
|
||||
}
|
||||
|
||||
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
|
||||
|
||||
/**
|
||||
* atomic_add_unless - add unless the number is a given value
|
||||
* @v: pointer of type atomic_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
*
|
||||
* Atomically adds @a to @v, so long as it was not @u.
|
||||
* Returns non-zero if @v was not @u, and zero otherwise.
|
||||
*/
|
||||
static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
c = atomic_read(v);
|
||||
for (;;) {
|
||||
if (unlikely(c == (u)))
|
||||
break;
|
||||
old = atomic_cmpxchg((v), c, c + (a));
|
||||
if (likely(old == c))
|
||||
break;
|
||||
c = old;
|
||||
}
|
||||
|
||||
return c != (u);
|
||||
}
|
||||
|
||||
#endif /* __ASM_SH_ATOMIC_LLSC_H */
|
||||
|
@ -45,7 +45,7 @@
|
||||
#define atomic_inc(v) atomic_add(1,(v))
|
||||
#define atomic_dec(v) atomic_sub(1,(v))
|
||||
|
||||
#ifndef CONFIG_GUSA_RB
|
||||
#if !defined(CONFIG_GUSA_RB) && !defined(CONFIG_CPU_SH4A)
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
int ret;
|
||||
@ -73,7 +73,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
|
||||
return ret != u;
|
||||
}
|
||||
#endif
|
||||
#endif /* !CONFIG_GUSA_RB && !CONFIG_CPU_SH4A */
|
||||
|
||||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
||||
|
@ -55,7 +55,7 @@ __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
|
||||
"mov %0, %1 \n\t"
|
||||
"cmp/eq %1, %3 \n\t"
|
||||
"bf 2f \n\t"
|
||||
"mov %3, %0 \n\t"
|
||||
"mov %4, %0 \n\t"
|
||||
"2: \n\t"
|
||||
"movco.l %0, @%2 \n\t"
|
||||
"bf 1b \n\t"
|
||||
|
@ -26,7 +26,7 @@
|
||||
#define __raw_spin_is_locked(x) ((x)->lock <= 0)
|
||||
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
||||
#define __raw_spin_unlock_wait(x) \
|
||||
do { cpu_relax(); } while ((x)->lock)
|
||||
do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0)
|
||||
|
||||
/*
|
||||
* Simple spin lock operations. There are two variants, one clears IRQ's
|
||||
|
Loading…
Reference in New Issue
Block a user