linux_dsm_epyc7002/arch/alpha/include/asm/processor.h
Christian Borntraeger 79ab11cdb9 locking/core: Introduce cpu_relax_yield()
For spinning loops people do often use barrier() or cpu_relax().
For most architectures cpu_relax and barrier are the same, but on
some architectures cpu_relax can add some latency.
For example on power,sparc64 and arc, cpu_relax can shift the CPU
towards other hardware threads in an SMT environment.
On s390 cpu_relax does even more, it uses an hypercall to the
hypervisor to give up the timeslice.
In contrast to the SMT yielding this can result in larger latencies.
In some places this latency is unwanted, so another variant
"cpu_relax_lowlatency" was introduced. Before this is used in more
and more places, lets revert the logic and provide a cpu_relax_yield
that can be called in places where yielding is more important than
latency. By default this is the same as cpu_relax on all architectures.

Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Noam Camus <noamc@ezchip.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linuxppc-dev@lists.ozlabs.org
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1477386195-32736-2-git-send-email-borntraeger@de.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-11-16 10:15:09 +01:00

91 lines
2.1 KiB
C

/*
* include/asm-alpha/processor.h
*
* Copyright (C) 1994 Linus Torvalds
*/
#ifndef __ASM_ALPHA_PROCESSOR_H
#define __ASM_ALPHA_PROCESSOR_H
#include <linux/personality.h> /* for ADDR_LIMIT_32BIT */
/*
* Returns current instruction pointer ("program counter").
*/
#define current_text_addr() \
({ void *__pc; __asm__ ("br %0,.+4" : "=r"(__pc)); __pc; })
/*
* We have a 42-bit user address space: 4TB user VM...
*/
#define TASK_SIZE (0x40000000000UL)
#define STACK_TOP \
(current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
#define STACK_TOP_MAX 0x00120000000UL
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE \
((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2)
typedef struct {
unsigned long seg;
} mm_segment_t;
/* This is dead. Everything has been moved to thread_info. */
struct thread_struct { };
#define INIT_THREAD { }
/* Return saved PC of a blocked thread. */
struct task_struct;
extern unsigned long thread_saved_pc(struct task_struct *);
/* Do necessary setup to start up a newly executed thread. */
struct pt_regs;
extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
#define KSTK_ESP(tsk) \
((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
#define cpu_relax_lowlatency() cpu_relax()
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
#ifndef CONFIG_SMP
/* Nothing to prefetch. */
#define spin_lock_prefetch(lock) do { } while (0)
#endif
extern inline void prefetch(const void *ptr)
{
__builtin_prefetch(ptr, 0, 3);
}
extern inline void prefetchw(const void *ptr)
{
__builtin_prefetch(ptr, 1, 3);
}
#ifdef CONFIG_SMP
extern inline void spin_lock_prefetch(const void *ptr)
{
__builtin_prefetch(ptr, 1, 3);
}
#endif
#endif /* __ASM_ALPHA_PROCESSOR_H */