mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
7af8a0f808
- Support for execute-only page permissions - Support for hibernate and DEBUG_PAGEALLOC - Support for heterogeneous systems with mismatches cache line sizes - Errata workarounds (A53 843419 update and QorIQ A-008585 timer bug) - arm64 PMU perf updates, including cpumasks for heterogeneous systems - Set UTS_MACHINE for building rpm packages - Yet another head.S tidy-up - Some cleanups and refactoring, particularly in the NUMA code - Lots of random, non-critical fixes across the board -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABCgAGBQJX7k31AAoJELescNyEwWM0XX0H/iOaWCfKlWOhvBsStGUCsLrK XryTzQT2KjdnLKf3jwP+1ateCuBR5ROurYxoDCX5/7mD63c5KiI338Vbv61a1lE1 AAwjt1stmQVUg/j+kqnuQwB/0DYg+2C8se3D3q5Iyn7zc19cDZJEGcBHNrvLMufc XgHrgHgl/rzBDDlHJXleknDFge/MfhU5/Q1vJMRRb4JYrpAtmIokzCO75CYMRcCT ND2QbmppKtsyuFPGUTVbAFzJlP6dGKb3eruYta7/ct5d0pJQxav3u98D2yWGfjdM YaYq1EmX5Pol7rWumqLtk0+mA9yCFcKLLc+PrJu20Vx0UkvOq8G8Xt70sHNvZU8= =gdPM -----END PGP SIGNATURE----- Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 updates from Will Deacon: "It's a bit all over the place this time with no "killer feature" to speak of. Support for mismatched cache line sizes should help people seeing whacky JIT failures on some SoCs, and the big.LITTLE perf updates have been a long time coming, but a lot of the changes here are cleanups. We stray outside arch/arm64 in a few areas: the arch/arm/ arch_timer workaround is acked by Russell, the DT/OF bits are acked by Rob, the arch_timer clocksource changes acked by Marc, CPU hotplug by tglx and jump_label by Peter (all CC'd). Summary: - Support for execute-only page permissions - Support for hibernate and DEBUG_PAGEALLOC - Support for heterogeneous systems with mismatches cache line sizes - Errata workarounds (A53 843419 update and QorIQ A-008585 timer bug) - arm64 PMU perf updates, including cpumasks for heterogeneous systems - Set UTS_MACHINE for building rpm packages - Yet another head.S tidy-up - Some cleanups and refactoring, particularly in the NUMA code - Lots of random, non-critical fixes across the board" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (100 commits) arm64: tlbflush.h: add __tlbi() macro arm64: Kconfig: remove SMP dependence for NUMA arm64: Kconfig: select OF/ACPI_NUMA under NUMA config arm64: fix dump_backtrace/unwind_frame with NULL tsk arm/arm64: arch_timer: Use archdata to indicate vdso suitability arm64: arch_timer: Work around QorIQ Erratum A-008585 arm64: arch_timer: Add device tree binding for A-008585 erratum arm64: Correctly bounds check virt_addr_valid arm64: migrate exception table users off module.h and onto extable.h arm64: pmu: Hoist pmu platform device name arm64: pmu: Probe default hw/cache counters arm64: pmu: add fallback probe table MAINTAINERS: Update ARM PMU PROFILING AND DEBUGGING entry arm64: Improve kprobes test for atomic sequence arm64/kvm: use alternative auto-nop arm64: use alternative auto-nop arm64: alternative: add auto-nop infrastructure arm64: lse: convert lse alternatives NOP padding to use __nops arm64: barriers: introduce nops and __nops macros for NOP sequences arm64: sysreg: replace open-coded mrs_s/msr_s with {read,write}_sysreg_s ...
372 lines
8.5 KiB
C
372 lines
8.5 KiB
C
/*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
#ifndef __ASM_SPINLOCK_H
|
|
#define __ASM_SPINLOCK_H
|
|
|
|
#include <asm/lse.h>
|
|
#include <asm/spinlock_types.h>
|
|
#include <asm/processor.h>
|
|
|
|
/*
|
|
* Spinlock implementation.
|
|
*
|
|
* The memory barriers are implicit with the load-acquire and store-release
|
|
* instructions.
|
|
*/
|
|
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
|
|
{
|
|
unsigned int tmp;
|
|
arch_spinlock_t lockval;
|
|
u32 owner;
|
|
|
|
/*
|
|
* Ensure prior spin_lock operations to other locks have completed
|
|
* on this CPU before we test whether "lock" is locked.
|
|
*/
|
|
smp_mb();
|
|
owner = READ_ONCE(lock->owner) << 16;
|
|
|
|
asm volatile(
|
|
" sevl\n"
|
|
"1: wfe\n"
|
|
"2: ldaxr %w0, %2\n"
|
|
/* Is the lock free? */
|
|
" eor %w1, %w0, %w0, ror #16\n"
|
|
" cbz %w1, 3f\n"
|
|
/* Lock taken -- has there been a subsequent unlock->lock transition? */
|
|
" eor %w1, %w3, %w0, lsl #16\n"
|
|
" cbz %w1, 1b\n"
|
|
/*
|
|
* The owner has been updated, so there was an unlock->lock
|
|
* transition that we missed. That means we can rely on the
|
|
* store-release of the unlock operation paired with the
|
|
* load-acquire of the lock operation to publish any of our
|
|
* previous stores to the new lock owner and therefore don't
|
|
* need to bother with the writeback below.
|
|
*/
|
|
" b 4f\n"
|
|
"3:\n"
|
|
/*
|
|
* Serialise against any concurrent lockers by writing back the
|
|
* unlocked lock value
|
|
*/
|
|
ARM64_LSE_ATOMIC_INSN(
|
|
/* LL/SC */
|
|
" stxr %w1, %w0, %2\n"
|
|
__nops(2),
|
|
/* LSE atomics */
|
|
" mov %w1, %w0\n"
|
|
" cas %w0, %w0, %2\n"
|
|
" eor %w1, %w1, %w0\n")
|
|
/* Somebody else wrote to the lock, GOTO 10 and reload the value */
|
|
" cbnz %w1, 2b\n"
|
|
"4:"
|
|
: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
|
|
: "r" (owner)
|
|
: "memory");
|
|
}
|
|
|
|
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
|
|
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|
{
|
|
unsigned int tmp;
|
|
arch_spinlock_t lockval, newval;
|
|
|
|
asm volatile(
|
|
/* Atomically increment the next ticket. */
|
|
ARM64_LSE_ATOMIC_INSN(
|
|
/* LL/SC */
|
|
" prfm pstl1strm, %3\n"
|
|
"1: ldaxr %w0, %3\n"
|
|
" add %w1, %w0, %w5\n"
|
|
" stxr %w2, %w1, %3\n"
|
|
" cbnz %w2, 1b\n",
|
|
/* LSE atomics */
|
|
" mov %w2, %w5\n"
|
|
" ldadda %w2, %w0, %3\n"
|
|
__nops(3)
|
|
)
|
|
|
|
/* Did we get the lock? */
|
|
" eor %w1, %w0, %w0, ror #16\n"
|
|
" cbz %w1, 3f\n"
|
|
/*
|
|
* No: spin on the owner. Send a local event to avoid missing an
|
|
* unlock before the exclusive load.
|
|
*/
|
|
" sevl\n"
|
|
"2: wfe\n"
|
|
" ldaxrh %w2, %4\n"
|
|
" eor %w1, %w2, %w0, lsr #16\n"
|
|
" cbnz %w1, 2b\n"
|
|
/* We got the lock. Critical section starts here. */
|
|
"3:"
|
|
: "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
|
|
: "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
|
|
: "memory");
|
|
}
|
|
|
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|
{
|
|
unsigned int tmp;
|
|
arch_spinlock_t lockval;
|
|
|
|
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
/* LL/SC */
|
|
" prfm pstl1strm, %2\n"
|
|
"1: ldaxr %w0, %2\n"
|
|
" eor %w1, %w0, %w0, ror #16\n"
|
|
" cbnz %w1, 2f\n"
|
|
" add %w0, %w0, %3\n"
|
|
" stxr %w1, %w0, %2\n"
|
|
" cbnz %w1, 1b\n"
|
|
"2:",
|
|
/* LSE atomics */
|
|
" ldr %w0, %2\n"
|
|
" eor %w1, %w0, %w0, ror #16\n"
|
|
" cbnz %w1, 1f\n"
|
|
" add %w1, %w0, %3\n"
|
|
" casa %w0, %w1, %2\n"
|
|
" and %w1, %w1, #0xffff\n"
|
|
" eor %w1, %w1, %w0, lsr #16\n"
|
|
"1:")
|
|
: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
|
|
: "I" (1 << TICKET_SHIFT)
|
|
: "memory");
|
|
|
|
return !tmp;
|
|
}
|
|
|
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|
{
|
|
unsigned long tmp;
|
|
|
|
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
/* LL/SC */
|
|
" ldrh %w1, %0\n"
|
|
" add %w1, %w1, #1\n"
|
|
" stlrh %w1, %0",
|
|
/* LSE atomics */
|
|
" mov %w1, #1\n"
|
|
" staddlh %w1, %0\n"
|
|
__nops(1))
|
|
: "=Q" (lock->owner), "=&r" (tmp)
|
|
:
|
|
: "memory");
|
|
}
|
|
|
|
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
|
|
{
|
|
return lock.owner == lock.next;
|
|
}
|
|
|
|
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
|
{
|
|
smp_mb(); /* See arch_spin_unlock_wait */
|
|
return !arch_spin_value_unlocked(READ_ONCE(*lock));
|
|
}
|
|
|
|
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
|
|
{
|
|
arch_spinlock_t lockval = READ_ONCE(*lock);
|
|
return (lockval.next - lockval.owner) > 1;
|
|
}
|
|
#define arch_spin_is_contended arch_spin_is_contended
|
|
|
|
/*
|
|
* Write lock implementation.
|
|
*
|
|
* Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
|
|
* exclusively held.
|
|
*
|
|
* The memory barriers are implicit with the load-acquire and store-release
|
|
* instructions.
|
|
*/
|
|
|
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
|
{
|
|
unsigned int tmp;
|
|
|
|
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
/* LL/SC */
|
|
" sevl\n"
|
|
"1: wfe\n"
|
|
"2: ldaxr %w0, %1\n"
|
|
" cbnz %w0, 1b\n"
|
|
" stxr %w0, %w2, %1\n"
|
|
" cbnz %w0, 2b\n"
|
|
__nops(1),
|
|
/* LSE atomics */
|
|
"1: mov %w0, wzr\n"
|
|
"2: casa %w0, %w2, %1\n"
|
|
" cbz %w0, 3f\n"
|
|
" ldxr %w0, %1\n"
|
|
" cbz %w0, 2b\n"
|
|
" wfe\n"
|
|
" b 1b\n"
|
|
"3:")
|
|
: "=&r" (tmp), "+Q" (rw->lock)
|
|
: "r" (0x80000000)
|
|
: "memory");
|
|
}
|
|
|
|
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
|
{
|
|
unsigned int tmp;
|
|
|
|
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
/* LL/SC */
|
|
"1: ldaxr %w0, %1\n"
|
|
" cbnz %w0, 2f\n"
|
|
" stxr %w0, %w2, %1\n"
|
|
" cbnz %w0, 1b\n"
|
|
"2:",
|
|
/* LSE atomics */
|
|
" mov %w0, wzr\n"
|
|
" casa %w0, %w2, %1\n"
|
|
__nops(2))
|
|
: "=&r" (tmp), "+Q" (rw->lock)
|
|
: "r" (0x80000000)
|
|
: "memory");
|
|
|
|
return !tmp;
|
|
}
|
|
|
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
|
{
|
|
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
" stlr wzr, %0",
|
|
" swpl wzr, wzr, %0")
|
|
: "=Q" (rw->lock) :: "memory");
|
|
}
|
|
|
|
/* write_can_lock - would write_trylock() succeed? */
|
|
#define arch_write_can_lock(x) ((x)->lock == 0)
|
|
|
|
/*
|
|
* Read lock implementation.
|
|
*
|
|
* It exclusively loads the lock value, increments it and stores the new value
|
|
* back if positive and the CPU still exclusively owns the location. If the
|
|
* value is negative, the lock is already held.
|
|
*
|
|
* During unlocking there may be multiple active read locks but no write lock.
|
|
*
|
|
* The memory barriers are implicit with the load-acquire and store-release
|
|
* instructions.
|
|
*
|
|
* Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
|
|
* and LSE implementations may exhibit different behaviour (although this
|
|
* will have no effect on lockdep).
|
|
*/
|
|
static inline void arch_read_lock(arch_rwlock_t *rw)
|
|
{
|
|
unsigned int tmp, tmp2;
|
|
|
|
asm volatile(
|
|
" sevl\n"
|
|
ARM64_LSE_ATOMIC_INSN(
|
|
/* LL/SC */
|
|
"1: wfe\n"
|
|
"2: ldaxr %w0, %2\n"
|
|
" add %w0, %w0, #1\n"
|
|
" tbnz %w0, #31, 1b\n"
|
|
" stxr %w1, %w0, %2\n"
|
|
" cbnz %w1, 2b\n"
|
|
__nops(1),
|
|
/* LSE atomics */
|
|
"1: wfe\n"
|
|
"2: ldxr %w0, %2\n"
|
|
" adds %w1, %w0, #1\n"
|
|
" tbnz %w1, #31, 1b\n"
|
|
" casa %w0, %w1, %2\n"
|
|
" sbc %w0, %w1, %w0\n"
|
|
" cbnz %w0, 2b")
|
|
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
|
|
:
|
|
: "cc", "memory");
|
|
}
|
|
|
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
|
{
|
|
unsigned int tmp, tmp2;
|
|
|
|
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
/* LL/SC */
|
|
"1: ldxr %w0, %2\n"
|
|
" sub %w0, %w0, #1\n"
|
|
" stlxr %w1, %w0, %2\n"
|
|
" cbnz %w1, 1b",
|
|
/* LSE atomics */
|
|
" movn %w0, #0\n"
|
|
" staddl %w0, %2\n"
|
|
__nops(2))
|
|
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
|
|
:
|
|
: "memory");
|
|
}
|
|
|
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|
{
|
|
unsigned int tmp, tmp2;
|
|
|
|
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
/* LL/SC */
|
|
" mov %w1, #1\n"
|
|
"1: ldaxr %w0, %2\n"
|
|
" add %w0, %w0, #1\n"
|
|
" tbnz %w0, #31, 2f\n"
|
|
" stxr %w1, %w0, %2\n"
|
|
" cbnz %w1, 1b\n"
|
|
"2:",
|
|
/* LSE atomics */
|
|
" ldr %w0, %2\n"
|
|
" adds %w1, %w0, #1\n"
|
|
" tbnz %w1, #31, 1f\n"
|
|
" casa %w0, %w1, %2\n"
|
|
" sbc %w1, %w1, %w0\n"
|
|
__nops(1)
|
|
"1:")
|
|
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
|
|
:
|
|
: "cc", "memory");
|
|
|
|
return !tmp2;
|
|
}
|
|
|
|
/* read_can_lock - would read_trylock() succeed? */
|
|
#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
|
|
|
|
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
|
|
|
#define arch_spin_relax(lock) cpu_relax()
|
|
#define arch_read_relax(lock) cpu_relax()
|
|
#define arch_write_relax(lock) cpu_relax()
|
|
|
|
/*
|
|
* Accesses appearing in program order before a spin_lock() operation
|
|
* can be reordered with accesses inside the critical section, by virtue
|
|
* of arch_spin_lock being constructed using acquire semantics.
|
|
*
|
|
* In cases where this is problematic (e.g. try_to_wake_up), an
|
|
* smp_mb__before_spinlock() can restore the required ordering.
|
|
*/
|
|
#define smp_mb__before_spinlock() smp_mb()
|
|
|
|
#endif /* __ASM_SPINLOCK_H */
|