mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 03:58:02 +07:00
5e2d059b52
Notable changes: - A fix for a bug in our page table fragment allocator, where a page table page could be freed and reallocated for something else while still in use, leading to memory corruption etc. The fix reuses pt_mm in struct page (x86 only) for a powerpc only refcount. - Fixes to our pkey support. Several are user-visible changes, but bring us in to line with x86 behaviour and/or fix outright bugs. Thanks to Florian Weimer for reporting many of these. - A series to improve the hvc driver & related OPAL console code, which have been seen to cause hardlockups at times. The hvc driver changes in particular have been in linux-next for ~month. - Increase our MAX_PHYSMEM_BITS to 128TB when SPARSEMEM_VMEMMAP=y. - Remove Power8 DD1 and Power9 DD1 support, neither chip should be in use anywhere other than as a paper weight. - An optimised memcmp implementation using Power7-or-later VMX instructions - Support for barrier_nospec on some NXP CPUs. - Support for flushing the count cache on context switch on some IBM CPUs (controlled by firmware), as a Spectre v2 mitigation. - A series to enhance the information we print on unhandled signals to bring it into line with other arches, including showing the offending VMA and dumping the instructions around the fault. Thanks to: Aaro Koskinen, Akshay Adiga, Alastair D'Silva, Alexey Kardashevskiy, Alexey Spirkov, Alistair Popple, Andrew Donnellan, Aneesh Kumar K.V, Anju T Sudhakar, Arnd Bergmann, Bartosz Golaszewski, Benjamin Herrenschmidt, Bharat Bhushan, Bjoern Noetel, Boqun Feng, Breno Leitao, Bryant G. Ly, Camelia Groza, Christophe Leroy, Christoph Hellwig, Cyril Bur, Dan Carpenter, Daniel Klamt, Darren Stevens, Dave Young, David Gibson, Diana Craciun, Finn Thain, Florian Weimer, Frederic Barrat, Gautham R. Shenoy, Geert Uytterhoeven, Geoff Levand, Guenter Roeck, Gustavo Romero, Haren Myneni, Hari Bathini, Joel Stanley, Jonathan Neuschäfer, Kees Cook, Madhavan Srinivasan, Mahesh Salgaonkar, Markus Elfring, Mathieu Malaterre, Mauro S. M. Rodrigues, Michael Hanselmann, Michael Neuling, Michael Schmitz, Mukesh Ojha, Murilo Opsfelder Araujo, Nicholas Piggin, Parth Y Shah, Paul Mackerras, Paul Menzel, Ram Pai, Randy Dunlap, Rashmica Gupta, Reza Arbab, Rodrigo R. Galvao, Russell Currey, Sam Bobroff, Scott Wood, Shilpasri G Bhat, Simon Guo, Souptick Joarder, Stan Johnson, Thiago Jung Bauermann, Tyrel Datwyler, Vaibhav Jain, Vasant Hegde, Venkat Rao B, zhong jiang. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEJFGtCPCthwEv2Y/bUevqPMjhpYAFAlt2O6cTHG1wZUBlbGxl cm1hbi5pZC5hdQAKCRBR6+o8yOGlgC7hD/4+cj796Df7GsVsIMxzQm7SS9dklIdO JuKj2Nr5HRzTH59jWlXukLG9mfTNCFgFJB4gEpK1ArDOTcHTCI9RRsLZTZ/kum66 7Pd+7T40dLYXB5uecuUs0vMXa2fI3syKh1VLzACSXv3Dh9BBIKQBwW/aD2eww4YI 1fS5LnXZ2PSxfr6KNAC6ogZnuaiD0sHXOYrtGHq+S/TFC7+Z6ySa6+AnPS+hPVoo /rHDE1Khr66aj7uk+PP2IgUrCFj6Sbj6hTVlS/iAuwbMjUl9ty6712PmvX9x6wMZ 13hJQI+g6Ci+lqLKqmqVUpXGSr6y4NJGPS/Hko4IivBTJApI+qV/tF2H9nxU+6X0 0RqzsMHPHy13n2torA1gC7ttzOuXPI4hTvm6JWMSsfmfjTxLANJng3Dq3ejh6Bqw 76EMowpDLexwpy7/glPpqNdsP4ySf2Qm8yq3mR7qpL4m3zJVRGs11x+s5DW8NKBL Fl5SqZvd01abH+sHwv6NLaLkEtayUyohxvyqu2RU3zu5M5vi7DhqstybTPjKPGu0 icSPh7b2y10WpOUpC6lxpdi8Me8qH47mVc/trZ+SpgBrsuEmtJhGKszEnzRCOqos o2IhYHQv3lQv86kpaAFQlg/RO+Lv+Lo5qbJ209V+hfU5nYzXpEulZs4dx1fbA+ze fK8GEh+u0L4uJg== =PzRz -----END PGP SIGNATURE----- Merge tag 'powerpc-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc updates from Michael Ellerman: "Notable changes: - A fix for a bug in our page table fragment allocator, where a page table page could be freed and reallocated for something else while still in use, leading to memory corruption etc. The fix reuses pt_mm in struct page (x86 only) for a powerpc only refcount. - Fixes to our pkey support. Several are user-visible changes, but bring us in to line with x86 behaviour and/or fix outright bugs. Thanks to Florian Weimer for reporting many of these. - A series to improve the hvc driver & related OPAL console code, which have been seen to cause hardlockups at times. The hvc driver changes in particular have been in linux-next for ~month. - Increase our MAX_PHYSMEM_BITS to 128TB when SPARSEMEM_VMEMMAP=y. - Remove Power8 DD1 and Power9 DD1 support, neither chip should be in use anywhere other than as a paper weight. - An optimised memcmp implementation using Power7-or-later VMX instructions - Support for barrier_nospec on some NXP CPUs. - Support for flushing the count cache on context switch on some IBM CPUs (controlled by firmware), as a Spectre v2 mitigation. - A series to enhance the information we print on unhandled signals to bring it into line with other arches, including showing the offending VMA and dumping the instructions around the fault. Thanks to: Aaro Koskinen, Akshay Adiga, Alastair D'Silva, Alexey Kardashevskiy, Alexey Spirkov, Alistair Popple, Andrew Donnellan, Aneesh Kumar K.V, Anju T Sudhakar, Arnd Bergmann, Bartosz Golaszewski, Benjamin Herrenschmidt, Bharat Bhushan, Bjoern Noetel, Boqun Feng, Breno Leitao, Bryant G. Ly, Camelia Groza, Christophe Leroy, Christoph Hellwig, Cyril Bur, Dan Carpenter, Daniel Klamt, Darren Stevens, Dave Young, David Gibson, Diana Craciun, Finn Thain, Florian Weimer, Frederic Barrat, Gautham R. Shenoy, Geert Uytterhoeven, Geoff Levand, Guenter Roeck, Gustavo Romero, Haren Myneni, Hari Bathini, Joel Stanley, Jonathan Neuschäfer, Kees Cook, Madhavan Srinivasan, Mahesh Salgaonkar, Markus Elfring, Mathieu Malaterre, Mauro S. M. Rodrigues, Michael Hanselmann, Michael Neuling, Michael Schmitz, Mukesh Ojha, Murilo Opsfelder Araujo, Nicholas Piggin, Parth Y Shah, Paul Mackerras, Paul Menzel, Ram Pai, Randy Dunlap, Rashmica Gupta, Reza Arbab, Rodrigo R. Galvao, Russell Currey, Sam Bobroff, Scott Wood, Shilpasri G Bhat, Simon Guo, Souptick Joarder, Stan Johnson, Thiago Jung Bauermann, Tyrel Datwyler, Vaibhav Jain, Vasant Hegde, Venkat Rao, zhong jiang" * tag 'powerpc-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (234 commits) powerpc/mm/book3s/radix: Add mapping statistics powerpc/uaccess: Enable get_user(u64, *p) on 32-bit powerpc/mm/hash: Remove unnecessary do { } while(0) loop powerpc/64s: move machine check SLB flushing to mm/slb.c powerpc/powernv/idle: Fix build error powerpc/mm/tlbflush: update the mmu_gather page size while iterating address range powerpc/mm: remove warning about ‘type’ being set powerpc/32: Include setup.h header file to fix warnings powerpc: Move `path` variable inside DEBUG_PROM powerpc/powermac: Make some functions static powerpc/powermac: Remove variable x that's never read cxl: remove a dead branch powerpc/powermac: Add missing include of header pmac.h powerpc/kexec: Use common error handling code in setup_new_fdt() powerpc/xmon: Add address lookup for percpu symbols powerpc/mm: remove huge_pte_offset_and_shift() prototype powerpc/lib: Use patch_site to patch copy_32 functions once cache is enabled powerpc/pseries: Fix endianness while restoring of r3 in MCE handler. powerpc/fadump: merge adjacent memory ranges to reduce PT_LOAD segements powerpc/fadump: handle crash memory ranges array index overflow ...
562 lines
13 KiB
C
562 lines
13 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_POWERPC_ATOMIC_H_
|
|
#define _ASM_POWERPC_ATOMIC_H_
|
|
|
|
/*
|
|
* PowerPC atomic operations
|
|
*/
|
|
|
|
#ifdef __KERNEL__
|
|
#include <linux/types.h>
|
|
#include <asm/cmpxchg.h>
|
|
#include <asm/barrier.h>
|
|
#include <asm/asm-405.h>
|
|
|
|
#define ATOMIC_INIT(i) { (i) }
|
|
|
|
/*
|
|
* Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
|
|
* a "bne-" instruction at the end, so an isync is enough as a acquire barrier
|
|
* on the platform without lwsync.
|
|
*/
|
|
#define __atomic_acquire_fence() \
|
|
__asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
|
|
|
|
#define __atomic_release_fence() \
|
|
__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
|
|
|
|
static __inline__ int atomic_read(const atomic_t *v)
|
|
{
|
|
int t;
|
|
|
|
__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
|
|
|
|
return t;
|
|
}
|
|
|
|
static __inline__ void atomic_set(atomic_t *v, int i)
|
|
{
|
|
__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
|
|
}
|
|
|
|
#define ATOMIC_OP(op, asm_op) \
|
|
static __inline__ void atomic_##op(int a, atomic_t *v) \
|
|
{ \
|
|
int t; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"1: lwarx %0,0,%3 # atomic_" #op "\n" \
|
|
#asm_op " %0,%2,%0\n" \
|
|
PPC405_ERR77(0,%3) \
|
|
" stwcx. %0,0,%3 \n" \
|
|
" bne- 1b\n" \
|
|
: "=&r" (t), "+m" (v->counter) \
|
|
: "r" (a), "r" (&v->counter) \
|
|
: "cc"); \
|
|
} \
|
|
|
|
#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
|
|
static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
|
|
{ \
|
|
int t; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
|
|
#asm_op " %0,%2,%0\n" \
|
|
PPC405_ERR77(0, %3) \
|
|
" stwcx. %0,0,%3\n" \
|
|
" bne- 1b\n" \
|
|
: "=&r" (t), "+m" (v->counter) \
|
|
: "r" (a), "r" (&v->counter) \
|
|
: "cc"); \
|
|
\
|
|
return t; \
|
|
}
|
|
|
|
#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
|
|
static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
|
|
{ \
|
|
int res, t; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
|
|
#asm_op " %1,%3,%0\n" \
|
|
PPC405_ERR77(0, %4) \
|
|
" stwcx. %1,0,%4\n" \
|
|
" bne- 1b\n" \
|
|
: "=&r" (res), "=&r" (t), "+m" (v->counter) \
|
|
: "r" (a), "r" (&v->counter) \
|
|
: "cc"); \
|
|
\
|
|
return res; \
|
|
}
|
|
|
|
#define ATOMIC_OPS(op, asm_op) \
|
|
ATOMIC_OP(op, asm_op) \
|
|
ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
|
|
ATOMIC_FETCH_OP_RELAXED(op, asm_op)
|
|
|
|
ATOMIC_OPS(add, add)
|
|
ATOMIC_OPS(sub, subf)
|
|
|
|
#define atomic_add_return_relaxed atomic_add_return_relaxed
|
|
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
|
|
|
|
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
|
|
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
|
|
|
|
#undef ATOMIC_OPS
|
|
#define ATOMIC_OPS(op, asm_op) \
|
|
ATOMIC_OP(op, asm_op) \
|
|
ATOMIC_FETCH_OP_RELAXED(op, asm_op)
|
|
|
|
ATOMIC_OPS(and, and)
|
|
ATOMIC_OPS(or, or)
|
|
ATOMIC_OPS(xor, xor)
|
|
|
|
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
|
|
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
|
|
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
|
|
|
|
#undef ATOMIC_OPS
|
|
#undef ATOMIC_FETCH_OP_RELAXED
|
|
#undef ATOMIC_OP_RETURN_RELAXED
|
|
#undef ATOMIC_OP
|
|
|
|
static __inline__ void atomic_inc(atomic_t *v)
|
|
{
|
|
int t;
|
|
|
|
__asm__ __volatile__(
|
|
"1: lwarx %0,0,%2 # atomic_inc\n\
|
|
addic %0,%0,1\n"
|
|
PPC405_ERR77(0,%2)
|
|
" stwcx. %0,0,%2 \n\
|
|
bne- 1b"
|
|
: "=&r" (t), "+m" (v->counter)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer");
|
|
}
|
|
#define atomic_inc atomic_inc
|
|
|
|
static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
|
|
{
|
|
int t;
|
|
|
|
__asm__ __volatile__(
|
|
"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
|
|
" addic %0,%0,1\n"
|
|
PPC405_ERR77(0, %2)
|
|
" stwcx. %0,0,%2\n"
|
|
" bne- 1b"
|
|
: "=&r" (t), "+m" (v->counter)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer");
|
|
|
|
return t;
|
|
}
|
|
|
|
static __inline__ void atomic_dec(atomic_t *v)
|
|
{
|
|
int t;
|
|
|
|
__asm__ __volatile__(
|
|
"1: lwarx %0,0,%2 # atomic_dec\n\
|
|
addic %0,%0,-1\n"
|
|
PPC405_ERR77(0,%2)\
|
|
" stwcx. %0,0,%2\n\
|
|
bne- 1b"
|
|
: "=&r" (t), "+m" (v->counter)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer");
|
|
}
|
|
#define atomic_dec atomic_dec
|
|
|
|
static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
|
|
{
|
|
int t;
|
|
|
|
__asm__ __volatile__(
|
|
"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
|
|
" addic %0,%0,-1\n"
|
|
PPC405_ERR77(0, %2)
|
|
" stwcx. %0,0,%2\n"
|
|
" bne- 1b"
|
|
: "=&r" (t), "+m" (v->counter)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer");
|
|
|
|
return t;
|
|
}
|
|
|
|
#define atomic_inc_return_relaxed atomic_inc_return_relaxed
|
|
#define atomic_dec_return_relaxed atomic_dec_return_relaxed
|
|
|
|
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
|
|
#define atomic_cmpxchg_relaxed(v, o, n) \
|
|
cmpxchg_relaxed(&((v)->counter), (o), (n))
|
|
#define atomic_cmpxchg_acquire(v, o, n) \
|
|
cmpxchg_acquire(&((v)->counter), (o), (n))
|
|
|
|
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
|
|
|
|
/**
|
|
* atomic_fetch_add_unless - add unless the number is a given value
|
|
* @v: pointer of type atomic_t
|
|
* @a: the amount to add to v...
|
|
* @u: ...unless v is equal to u.
|
|
*
|
|
* Atomically adds @a to @v, so long as it was not @u.
|
|
* Returns the old value of @v.
|
|
*/
|
|
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
int t;
|
|
|
|
__asm__ __volatile__ (
|
|
PPC_ATOMIC_ENTRY_BARRIER
|
|
"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
|
|
cmpw 0,%0,%3 \n\
|
|
beq 2f \n\
|
|
add %0,%2,%0 \n"
|
|
PPC405_ERR77(0,%2)
|
|
" stwcx. %0,0,%1 \n\
|
|
bne- 1b \n"
|
|
PPC_ATOMIC_EXIT_BARRIER
|
|
" subf %0,%2,%0 \n\
|
|
2:"
|
|
: "=&r" (t)
|
|
: "r" (&v->counter), "r" (a), "r" (u)
|
|
: "cc", "memory");
|
|
|
|
return t;
|
|
}
|
|
#define atomic_fetch_add_unless atomic_fetch_add_unless
|
|
|
|
/**
|
|
* atomic_inc_not_zero - increment unless the number is zero
|
|
* @v: pointer of type atomic_t
|
|
*
|
|
* Atomically increments @v by 1, so long as @v is non-zero.
|
|
* Returns non-zero if @v was non-zero, and zero otherwise.
|
|
*/
|
|
static __inline__ int atomic_inc_not_zero(atomic_t *v)
|
|
{
|
|
int t1, t2;
|
|
|
|
__asm__ __volatile__ (
|
|
PPC_ATOMIC_ENTRY_BARRIER
|
|
"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
|
|
cmpwi 0,%0,0\n\
|
|
beq- 2f\n\
|
|
addic %1,%0,1\n"
|
|
PPC405_ERR77(0,%2)
|
|
" stwcx. %1,0,%2\n\
|
|
bne- 1b\n"
|
|
PPC_ATOMIC_EXIT_BARRIER
|
|
"\n\
|
|
2:"
|
|
: "=&r" (t1), "=&r" (t2)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer", "memory");
|
|
|
|
return t1;
|
|
}
|
|
#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
|
|
|
|
/*
|
|
* Atomically test *v and decrement if it is greater than 0.
|
|
* The function returns the old value of *v minus 1, even if
|
|
* the atomic variable, v, was not decremented.
|
|
*/
|
|
static __inline__ int atomic_dec_if_positive(atomic_t *v)
|
|
{
|
|
int t;
|
|
|
|
__asm__ __volatile__(
|
|
PPC_ATOMIC_ENTRY_BARRIER
|
|
"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
|
|
cmpwi %0,1\n\
|
|
addi %0,%0,-1\n\
|
|
blt- 2f\n"
|
|
PPC405_ERR77(0,%1)
|
|
" stwcx. %0,0,%1\n\
|
|
bne- 1b"
|
|
PPC_ATOMIC_EXIT_BARRIER
|
|
"\n\
|
|
2:" : "=&b" (t)
|
|
: "r" (&v->counter)
|
|
: "cc", "memory");
|
|
|
|
return t;
|
|
}
|
|
#define atomic_dec_if_positive atomic_dec_if_positive
|
|
|
|
#ifdef __powerpc64__
|
|
|
|
#define ATOMIC64_INIT(i) { (i) }
|
|
|
|
static __inline__ long atomic64_read(const atomic64_t *v)
|
|
{
|
|
long t;
|
|
|
|
__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
|
|
|
|
return t;
|
|
}
|
|
|
|
static __inline__ void atomic64_set(atomic64_t *v, long i)
|
|
{
|
|
__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
|
|
}
|
|
|
|
#define ATOMIC64_OP(op, asm_op) \
|
|
static __inline__ void atomic64_##op(long a, atomic64_t *v) \
|
|
{ \
|
|
long t; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
|
|
#asm_op " %0,%2,%0\n" \
|
|
" stdcx. %0,0,%3 \n" \
|
|
" bne- 1b\n" \
|
|
: "=&r" (t), "+m" (v->counter) \
|
|
: "r" (a), "r" (&v->counter) \
|
|
: "cc"); \
|
|
}
|
|
|
|
#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
|
|
static inline long \
|
|
atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
|
|
{ \
|
|
long t; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
|
|
#asm_op " %0,%2,%0\n" \
|
|
" stdcx. %0,0,%3\n" \
|
|
" bne- 1b\n" \
|
|
: "=&r" (t), "+m" (v->counter) \
|
|
: "r" (a), "r" (&v->counter) \
|
|
: "cc"); \
|
|
\
|
|
return t; \
|
|
}
|
|
|
|
#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
|
|
static inline long \
|
|
atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \
|
|
{ \
|
|
long res, t; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
"1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
|
|
#asm_op " %1,%3,%0\n" \
|
|
" stdcx. %1,0,%4\n" \
|
|
" bne- 1b\n" \
|
|
: "=&r" (res), "=&r" (t), "+m" (v->counter) \
|
|
: "r" (a), "r" (&v->counter) \
|
|
: "cc"); \
|
|
\
|
|
return res; \
|
|
}
|
|
|
|
#define ATOMIC64_OPS(op, asm_op) \
|
|
ATOMIC64_OP(op, asm_op) \
|
|
ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
|
|
ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
|
|
|
|
ATOMIC64_OPS(add, add)
|
|
ATOMIC64_OPS(sub, subf)
|
|
|
|
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
|
|
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
|
|
|
|
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
|
|
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
|
|
|
|
#undef ATOMIC64_OPS
|
|
#define ATOMIC64_OPS(op, asm_op) \
|
|
ATOMIC64_OP(op, asm_op) \
|
|
ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
|
|
|
|
ATOMIC64_OPS(and, and)
|
|
ATOMIC64_OPS(or, or)
|
|
ATOMIC64_OPS(xor, xor)
|
|
|
|
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
|
|
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
|
|
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
|
|
|
|
#undef ATOPIC64_OPS
|
|
#undef ATOMIC64_FETCH_OP_RELAXED
|
|
#undef ATOMIC64_OP_RETURN_RELAXED
|
|
#undef ATOMIC64_OP
|
|
|
|
static __inline__ void atomic64_inc(atomic64_t *v)
|
|
{
|
|
long t;
|
|
|
|
__asm__ __volatile__(
|
|
"1: ldarx %0,0,%2 # atomic64_inc\n\
|
|
addic %0,%0,1\n\
|
|
stdcx. %0,0,%2 \n\
|
|
bne- 1b"
|
|
: "=&r" (t), "+m" (v->counter)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer");
|
|
}
|
|
#define atomic64_inc atomic64_inc
|
|
|
|
static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
|
|
{
|
|
long t;
|
|
|
|
__asm__ __volatile__(
|
|
"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
|
|
" addic %0,%0,1\n"
|
|
" stdcx. %0,0,%2\n"
|
|
" bne- 1b"
|
|
: "=&r" (t), "+m" (v->counter)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer");
|
|
|
|
return t;
|
|
}
|
|
|
|
static __inline__ void atomic64_dec(atomic64_t *v)
|
|
{
|
|
long t;
|
|
|
|
__asm__ __volatile__(
|
|
"1: ldarx %0,0,%2 # atomic64_dec\n\
|
|
addic %0,%0,-1\n\
|
|
stdcx. %0,0,%2\n\
|
|
bne- 1b"
|
|
: "=&r" (t), "+m" (v->counter)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer");
|
|
}
|
|
#define atomic64_dec atomic64_dec
|
|
|
|
static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
|
|
{
|
|
long t;
|
|
|
|
__asm__ __volatile__(
|
|
"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
|
|
" addic %0,%0,-1\n"
|
|
" stdcx. %0,0,%2\n"
|
|
" bne- 1b"
|
|
: "=&r" (t), "+m" (v->counter)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer");
|
|
|
|
return t;
|
|
}
|
|
|
|
#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
|
|
#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
|
|
|
|
/*
|
|
* Atomically test *v and decrement if it is greater than 0.
|
|
* The function returns the old value of *v minus 1.
|
|
*/
|
|
static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
|
|
{
|
|
long t;
|
|
|
|
__asm__ __volatile__(
|
|
PPC_ATOMIC_ENTRY_BARRIER
|
|
"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
|
|
addic. %0,%0,-1\n\
|
|
blt- 2f\n\
|
|
stdcx. %0,0,%1\n\
|
|
bne- 1b"
|
|
PPC_ATOMIC_EXIT_BARRIER
|
|
"\n\
|
|
2:" : "=&r" (t)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer", "memory");
|
|
|
|
return t;
|
|
}
|
|
#define atomic64_dec_if_positive atomic64_dec_if_positive
|
|
|
|
#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
|
|
#define atomic64_cmpxchg_relaxed(v, o, n) \
|
|
cmpxchg_relaxed(&((v)->counter), (o), (n))
|
|
#define atomic64_cmpxchg_acquire(v, o, n) \
|
|
cmpxchg_acquire(&((v)->counter), (o), (n))
|
|
|
|
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
|
#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
|
|
|
|
/**
|
|
* atomic64_fetch_add_unless - add unless the number is a given value
|
|
* @v: pointer of type atomic64_t
|
|
* @a: the amount to add to v...
|
|
* @u: ...unless v is equal to u.
|
|
*
|
|
* Atomically adds @a to @v, so long as it was not @u.
|
|
* Returns the old value of @v.
|
|
*/
|
|
static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
|
|
{
|
|
long t;
|
|
|
|
__asm__ __volatile__ (
|
|
PPC_ATOMIC_ENTRY_BARRIER
|
|
"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
|
|
cmpd 0,%0,%3 \n\
|
|
beq 2f \n\
|
|
add %0,%2,%0 \n"
|
|
" stdcx. %0,0,%1 \n\
|
|
bne- 1b \n"
|
|
PPC_ATOMIC_EXIT_BARRIER
|
|
" subf %0,%2,%0 \n\
|
|
2:"
|
|
: "=&r" (t)
|
|
: "r" (&v->counter), "r" (a), "r" (u)
|
|
: "cc", "memory");
|
|
|
|
return t;
|
|
}
|
|
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
|
|
|
|
/**
|
|
* atomic_inc64_not_zero - increment unless the number is zero
|
|
* @v: pointer of type atomic64_t
|
|
*
|
|
* Atomically increments @v by 1, so long as @v is non-zero.
|
|
* Returns non-zero if @v was non-zero, and zero otherwise.
|
|
*/
|
|
static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
|
|
{
|
|
long t1, t2;
|
|
|
|
__asm__ __volatile__ (
|
|
PPC_ATOMIC_ENTRY_BARRIER
|
|
"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
|
|
cmpdi 0,%0,0\n\
|
|
beq- 2f\n\
|
|
addic %1,%0,1\n\
|
|
stdcx. %1,0,%2\n\
|
|
bne- 1b\n"
|
|
PPC_ATOMIC_EXIT_BARRIER
|
|
"\n\
|
|
2:"
|
|
: "=&r" (t1), "=&r" (t2)
|
|
: "r" (&v->counter)
|
|
: "cc", "xer", "memory");
|
|
|
|
return t1 != 0;
|
|
}
|
|
#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
|
|
|
|
#endif /* __powerpc64__ */
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _ASM_POWERPC_ATOMIC_H_ */
|