mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-15 15:46:52 +07:00
b62f134391
Use the new generic cmpxchg_local (disables interrupt). Also use the generic cmpxchg as fallback if SMP is not set. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Cc: Miles Bader <miles.bader@necel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
124 lines
3.1 KiB
C
124 lines
3.1 KiB
C
/*
|
|
* include/asm-v850/system.h -- Low-level interrupt/thread ops
|
|
*
|
|
* Copyright (C) 2001,02,03 NEC Electronics Corporation
|
|
* Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General
|
|
* Public License. See the file COPYING in the main directory of this
|
|
* archive for more details.
|
|
*
|
|
* Written by Miles Bader <miles@gnu.org>
|
|
*/
|
|
|
|
#ifndef __V850_SYSTEM_H__
|
|
#define __V850_SYSTEM_H__
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/ptrace.h>
|
|
|
|
|
|
/*
|
|
* switch_to(n) should switch tasks to task ptr, first checking that
|
|
* ptr isn't the current task, in which case it does nothing.
|
|
*/
|
|
struct thread_struct;
|
|
extern void *switch_thread (struct thread_struct *last,
|
|
struct thread_struct *next);
|
|
#define switch_to(prev,next,last) \
|
|
do { \
|
|
if (prev != next) { \
|
|
(last) = switch_thread (&prev->thread, &next->thread); \
|
|
} \
|
|
} while (0)
|
|
|
|
|
|
/* Enable/disable interrupts. */
|
|
#define local_irq_enable() __asm__ __volatile__ ("ei")
|
|
#define local_irq_disable() __asm__ __volatile__ ("di")
|
|
|
|
#define local_save_flags(flags) \
|
|
__asm__ __volatile__ ("stsr %1, %0" : "=r" (flags) : "i" (SR_PSW))
|
|
#define local_restore_flags(flags) \
|
|
__asm__ __volatile__ ("ldsr %0, %1" :: "r" (flags), "i" (SR_PSW))
|
|
|
|
/* For spinlocks etc */
|
|
#define local_irq_save(flags) \
|
|
do { local_save_flags (flags); local_irq_disable (); } while (0)
|
|
#define local_irq_restore(flags) \
|
|
local_restore_flags (flags);
|
|
|
|
|
|
static inline int irqs_disabled (void)
|
|
{
|
|
unsigned flags;
|
|
local_save_flags (flags);
|
|
return !!(flags & 0x20);
|
|
}
|
|
|
|
|
|
/*
|
|
* Force strict CPU ordering.
|
|
* Not really required on v850...
|
|
*/
|
|
#define nop() __asm__ __volatile__ ("nop")
|
|
#define mb() __asm__ __volatile__ ("" ::: "memory")
|
|
#define rmb() mb ()
|
|
#define wmb() mb ()
|
|
#define read_barrier_depends() ((void)0)
|
|
#define set_mb(var, value) do { xchg (&var, value); } while (0)
|
|
|
|
#define smp_mb() mb ()
|
|
#define smp_rmb() rmb ()
|
|
#define smp_wmb() wmb ()
|
|
#define smp_read_barrier_depends() read_barrier_depends()
|
|
|
|
#define xchg(ptr, with) \
|
|
((__typeof__ (*(ptr)))__xchg ((unsigned long)(with), (ptr), sizeof (*(ptr))))
|
|
|
|
static inline unsigned long __xchg (unsigned long with,
|
|
__volatile__ void *ptr, int size)
|
|
{
|
|
unsigned long tmp, flags;
|
|
|
|
local_irq_save (flags);
|
|
|
|
switch (size) {
|
|
case 1:
|
|
tmp = *(unsigned char *)ptr;
|
|
*(unsigned char *)ptr = with;
|
|
break;
|
|
case 2:
|
|
tmp = *(unsigned short *)ptr;
|
|
*(unsigned short *)ptr = with;
|
|
break;
|
|
case 4:
|
|
tmp = *(unsigned long *)ptr;
|
|
*(unsigned long *)ptr = with;
|
|
break;
|
|
}
|
|
|
|
local_irq_restore (flags);
|
|
|
|
return tmp;
|
|
}
|
|
|
|
#include <asm-generic/cmpxchg-local.h>
|
|
|
|
/*
|
|
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
|
|
* them available.
|
|
*/
|
|
#define cmpxchg_local(ptr, o, n) \
|
|
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
|
|
(unsigned long)(n), sizeof(*(ptr))))
|
|
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
|
|
|
|
#ifndef CONFIG_SMP
|
|
#include <asm-generic/cmpxchg.h>
|
|
#endif
|
|
|
|
#define arch_align_stack(x) (x)
|
|
|
|
#endif /* __V850_SYSTEM_H__ */
|