mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-15 10:26:42 +07:00
cc07aabc53
Strings library contributed to glibc but re-licensed under GPLv2) - Optimised crypto algorithms making use of the ARMv8 crypto extensions (together with kernel API for using FPSIMD instructions in interrupt context) - Ftrace support - CPU topology parsing from DT - ESR_EL1 (Exception Syndrome Register) exposed to user space signal handlers for SIGSEGV/SIGBUS (useful to emulation tools like Qemu) - 1GB section linear mapping if applicable - Barriers usage clean-up - Default pgprot clean-up -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.9 (GNU/Linux) iQIcBAABAgAGBQJTkb+CAAoJEGvWsS0AyF7xLyEQAJgL8s2SdDyd+R8aukNDu3n9 tCK7yVHO9Kg96dfeXVuSOVEo2jszo6R3nxzUL05FMovr230WBcmoeHvHz8ETGnw1 g0yO8Ltkckjevog4UleCa3wGtYISjvwwrTalzbqoEWzsF2AV8oiqv/yuIn/EdkUr jaOqfNsnAQa8TIz4vMhi/AVdJWTTU/F6WP80oqCbxqXu/WL2InuBlHtOJMbk1HDI u1DJUGDQ1B9OgSVRkAOjCjSsEtz8sDY3lXsg3V1qT5+NbZTyomYM2IiBLdgQcX4P t/rqX9nX4VmRQtzefeP5WhKFks2x80C0BKibWC4teeL++tJHbgbFkyjoZZGcP27o zued3cYABrjrcAEU6ko/LUiL2Q4ozBOzosClpjpWulCxNPzsOps82UZWo3F3XbAt xjE3k7WF9WeNBOJdDGrarEaSLdnjjgCLoWVs8cOUYLpOOrtdSw16D29jJ68U0Y5g 31wdwKxoueC8SFt8M9fP9J9Jyau08g+kvW1xQXrRmroppweFxjSpSy90imARyux/ wUFz79HxkQB79ZHpJ0I5TNrw/w+7pBnfVSKGPOzrk+ZUsaH76caNRBoffUCzFMzz T3Sc8A36TZtOIcGR/Q4DMZNFXlIUXDSzCHP2Iu0QoIjTd5Ex96cqNvy3nswCYWwv yGe3ZEqUq9+WL7snNW4v =Jj8U -----END PGP SIGNATURE----- Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux into next Pull arm64 updates from Catalin Marinas: - Optimised assembly string/memory routines (based on the AArch64 Cortex Strings library contributed to glibc but re-licensed under GPLv2) - Optimised crypto algorithms making use of the ARMv8 crypto extensions (together with kernel API for using FPSIMD instructions in interrupt context) - Ftrace support - CPU topology parsing from DT - ESR_EL1 (Exception Syndrome Register) exposed to user space signal handlers for SIGSEGV/SIGBUS (useful to emulation tools like Qemu) - 1GB section linear mapping if applicable - Barriers usage clean-up - Default pgprot clean-up Conflicts as per Catalin. * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (57 commits) arm64: kernel: initialize broadcast hrtimer based clock event device arm64: ftrace: Add system call tracepoint arm64: ftrace: Add CALLER_ADDRx macros arm64: ftrace: Add dynamic ftrace support arm64: Add ftrace support ftrace: Add arm64 support to recordmcount arm64: Add 'notrace' attribute to unwind_frame() for ftrace arm64: add __ASSEMBLY__ in asm/insn.h arm64: Fix linker script entry point arm64: lib: Implement optimized string length routines arm64: lib: Implement optimized string compare routines arm64: lib: Implement optimized memcmp routine arm64: lib: Implement optimized memset routine arm64: lib: Implement optimized memmove routine arm64: lib: Implement optimized memcpy routine arm64: defconfig: enable a few more common/useful options in defconfig ftrace: Make CALLER_ADDRx macros more generic arm64: Fix deadlock scenario with smp_send_stop() arm64: Fix machine_shutdown() definition arm64: Support arch_irq_work_raise() via self IPIs ...
294 lines
6.5 KiB
C
294 lines
6.5 KiB
C
/*
|
|
* Based on arch/arm/include/asm/atomic.h
|
|
*
|
|
* Copyright (C) 1996 Russell King.
|
|
* Copyright (C) 2002 Deep Blue Solutions Ltd.
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
#ifndef __ASM_ATOMIC_H
|
|
#define __ASM_ATOMIC_H
|
|
|
|
#include <linux/compiler.h>
|
|
#include <linux/types.h>
|
|
|
|
#include <asm/barrier.h>
|
|
#include <asm/cmpxchg.h>
|
|
|
|
#define ATOMIC_INIT(i) { (i) }
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
/*
|
|
* On ARM, ordinary assignment (str instruction) doesn't clear the local
|
|
* strex/ldrex monitor on some implementations. The reason we can use it for
|
|
* atomic_set() is the clrex or dummy strex done on every exception return.
|
|
*/
|
|
#define atomic_read(v) (*(volatile int *)&(v)->counter)
|
|
#define atomic_set(v,i) (((v)->counter) = (i))
|
|
|
|
/*
|
|
* AArch64 UP and SMP safe atomic ops. We use load exclusive and
|
|
* store exclusive to ensure that these are atomic. We may loop
|
|
* to ensure that the update happens.
|
|
*/
|
|
static inline void atomic_add(int i, atomic_t *v)
|
|
{
|
|
unsigned long tmp;
|
|
int result;
|
|
|
|
asm volatile("// atomic_add\n"
|
|
"1: ldxr %w0, %2\n"
|
|
" add %w0, %w0, %w3\n"
|
|
" stxr %w1, %w0, %2\n"
|
|
" cbnz %w1, 1b"
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
: "Ir" (i));
|
|
}
|
|
|
|
static inline int atomic_add_return(int i, atomic_t *v)
|
|
{
|
|
unsigned long tmp;
|
|
int result;
|
|
|
|
asm volatile("// atomic_add_return\n"
|
|
"1: ldxr %w0, %2\n"
|
|
" add %w0, %w0, %w3\n"
|
|
" stlxr %w1, %w0, %2\n"
|
|
" cbnz %w1, 1b"
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
: "Ir" (i)
|
|
: "memory");
|
|
|
|
smp_mb();
|
|
return result;
|
|
}
|
|
|
|
static inline void atomic_sub(int i, atomic_t *v)
|
|
{
|
|
unsigned long tmp;
|
|
int result;
|
|
|
|
asm volatile("// atomic_sub\n"
|
|
"1: ldxr %w0, %2\n"
|
|
" sub %w0, %w0, %w3\n"
|
|
" stxr %w1, %w0, %2\n"
|
|
" cbnz %w1, 1b"
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
: "Ir" (i));
|
|
}
|
|
|
|
static inline int atomic_sub_return(int i, atomic_t *v)
|
|
{
|
|
unsigned long tmp;
|
|
int result;
|
|
|
|
asm volatile("// atomic_sub_return\n"
|
|
"1: ldxr %w0, %2\n"
|
|
" sub %w0, %w0, %w3\n"
|
|
" stlxr %w1, %w0, %2\n"
|
|
" cbnz %w1, 1b"
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
: "Ir" (i)
|
|
: "memory");
|
|
|
|
smp_mb();
|
|
return result;
|
|
}
|
|
|
|
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
|
{
|
|
unsigned long tmp;
|
|
int oldval;
|
|
|
|
smp_mb();
|
|
|
|
asm volatile("// atomic_cmpxchg\n"
|
|
"1: ldxr %w1, %2\n"
|
|
" cmp %w1, %w3\n"
|
|
" b.ne 2f\n"
|
|
" stxr %w0, %w4, %2\n"
|
|
" cbnz %w0, 1b\n"
|
|
"2:"
|
|
: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
|
|
: "Ir" (old), "r" (new)
|
|
: "cc");
|
|
|
|
smp_mb();
|
|
return oldval;
|
|
}
|
|
|
|
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
|
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
int c, old;
|
|
|
|
c = atomic_read(v);
|
|
while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
|
|
c = old;
|
|
return c;
|
|
}
|
|
|
|
#define atomic_inc(v) atomic_add(1, v)
|
|
#define atomic_dec(v) atomic_sub(1, v)
|
|
|
|
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
|
|
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
|
|
#define atomic_inc_return(v) (atomic_add_return(1, v))
|
|
#define atomic_dec_return(v) (atomic_sub_return(1, v))
|
|
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
|
|
|
|
#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
|
|
|
|
/*
|
|
* 64-bit atomic operations.
|
|
*/
|
|
#define ATOMIC64_INIT(i) { (i) }
|
|
|
|
#define atomic64_read(v) (*(volatile long *)&(v)->counter)
|
|
#define atomic64_set(v,i) (((v)->counter) = (i))
|
|
|
|
static inline void atomic64_add(u64 i, atomic64_t *v)
|
|
{
|
|
long result;
|
|
unsigned long tmp;
|
|
|
|
asm volatile("// atomic64_add\n"
|
|
"1: ldxr %0, %2\n"
|
|
" add %0, %0, %3\n"
|
|
" stxr %w1, %0, %2\n"
|
|
" cbnz %w1, 1b"
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
: "Ir" (i));
|
|
}
|
|
|
|
static inline long atomic64_add_return(long i, atomic64_t *v)
|
|
{
|
|
long result;
|
|
unsigned long tmp;
|
|
|
|
asm volatile("// atomic64_add_return\n"
|
|
"1: ldxr %0, %2\n"
|
|
" add %0, %0, %3\n"
|
|
" stlxr %w1, %0, %2\n"
|
|
" cbnz %w1, 1b"
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
: "Ir" (i)
|
|
: "memory");
|
|
|
|
smp_mb();
|
|
return result;
|
|
}
|
|
|
|
static inline void atomic64_sub(u64 i, atomic64_t *v)
|
|
{
|
|
long result;
|
|
unsigned long tmp;
|
|
|
|
asm volatile("// atomic64_sub\n"
|
|
"1: ldxr %0, %2\n"
|
|
" sub %0, %0, %3\n"
|
|
" stxr %w1, %0, %2\n"
|
|
" cbnz %w1, 1b"
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
: "Ir" (i));
|
|
}
|
|
|
|
static inline long atomic64_sub_return(long i, atomic64_t *v)
|
|
{
|
|
long result;
|
|
unsigned long tmp;
|
|
|
|
asm volatile("// atomic64_sub_return\n"
|
|
"1: ldxr %0, %2\n"
|
|
" sub %0, %0, %3\n"
|
|
" stlxr %w1, %0, %2\n"
|
|
" cbnz %w1, 1b"
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
: "Ir" (i)
|
|
: "memory");
|
|
|
|
smp_mb();
|
|
return result;
|
|
}
|
|
|
|
static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
|
|
{
|
|
long oldval;
|
|
unsigned long res;
|
|
|
|
smp_mb();
|
|
|
|
asm volatile("// atomic64_cmpxchg\n"
|
|
"1: ldxr %1, %2\n"
|
|
" cmp %1, %3\n"
|
|
" b.ne 2f\n"
|
|
" stxr %w0, %4, %2\n"
|
|
" cbnz %w0, 1b\n"
|
|
"2:"
|
|
: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
|
|
: "Ir" (old), "r" (new)
|
|
: "cc");
|
|
|
|
smp_mb();
|
|
return oldval;
|
|
}
|
|
|
|
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
|
static inline long atomic64_dec_if_positive(atomic64_t *v)
|
|
{
|
|
long result;
|
|
unsigned long tmp;
|
|
|
|
asm volatile("// atomic64_dec_if_positive\n"
|
|
"1: ldxr %0, %2\n"
|
|
" subs %0, %0, #1\n"
|
|
" b.mi 2f\n"
|
|
" stlxr %w1, %0, %2\n"
|
|
" cbnz %w1, 1b\n"
|
|
" dmb ish\n"
|
|
"2:"
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
:
|
|
: "cc", "memory");
|
|
|
|
return result;
|
|
}
|
|
|
|
static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
|
|
{
|
|
long c, old;
|
|
|
|
c = atomic64_read(v);
|
|
while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
|
|
c = old;
|
|
|
|
return c != u;
|
|
}
|
|
|
|
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
|
|
#define atomic64_inc(v) atomic64_add(1LL, (v))
|
|
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
|
|
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
|
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
|
|
#define atomic64_dec(v) atomic64_sub(1LL, (v))
|
|
#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
|
|
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
|
|
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
|
|
|
|
#endif
|
|
#endif
|