mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-12 03:06:46 +07:00
bd9a4c7df2
Add a platform-independent hwspinlock framework. Hardware spinlock devices are needed, e.g., in order to access data that is shared between remote processors, that otherwise have no alternative mechanism to accomplish synchronization and mutual exclusion operations. Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com> Cc: Hari Kanigeri <h-kanigeri2@ti.com> Cc: Benoit Cousson <b-cousson@ti.com> Cc: Kevin Hilman <khilman@ti.com> Cc: Grant Likely <grant.likely@secretlab.ca> Cc: Paul Walmsley <paul@pwsan.com> Cc: Russell King <linux@arm.linux.org.uk> Acked-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Tony Lindgren <tony@atomide.com>
293 lines
9.9 KiB
C
293 lines
9.9 KiB
C
/*
|
|
* Hardware spinlock public header
|
|
*
|
|
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
|
|
*
|
|
* Contact: Ohad Ben-Cohen <ohad@wizery.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms of the GNU General Public License version 2 as published
|
|
* by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*/
|
|
|
|
#ifndef __LINUX_HWSPINLOCK_H
|
|
#define __LINUX_HWSPINLOCK_H
|
|
|
|
#include <linux/err.h>
|
|
#include <linux/sched.h>
|
|
|
|
/* hwspinlock mode argument */
|
|
#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
|
|
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
|
|
|
|
struct hwspinlock;
|
|
|
|
#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE)
|
|
|
|
int hwspin_lock_register(struct hwspinlock *lock);
|
|
struct hwspinlock *hwspin_lock_unregister(unsigned int id);
|
|
struct hwspinlock *hwspin_lock_request(void);
|
|
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
|
|
int hwspin_lock_free(struct hwspinlock *hwlock);
|
|
int hwspin_lock_get_id(struct hwspinlock *hwlock);
|
|
int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
|
|
unsigned long *);
|
|
int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
|
|
void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
|
|
|
|
#else /* !CONFIG_HWSPINLOCK */
|
|
|
|
/*
|
|
* We don't want these functions to fail if CONFIG_HWSPINLOCK is not
|
|
* enabled. We prefer to silently succeed in this case, and let the
|
|
* code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
|
|
* required on a given setup, users will still work.
|
|
*
|
|
* The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
|
|
* we _do_ want users to fail (no point in registering hwspinlock instances if
|
|
* the framework is not available).
|
|
*
|
|
* Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
|
|
* users. Others, which care, can still check this with IS_ERR.
|
|
*/
|
|
static inline struct hwspinlock *hwspin_lock_request(void)
|
|
{
|
|
return ERR_PTR(-ENODEV);
|
|
}
|
|
|
|
static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
|
|
{
|
|
return ERR_PTR(-ENODEV);
|
|
}
|
|
|
|
static inline int hwspin_lock_free(struct hwspinlock *hwlock)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline
|
|
int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
|
|
int mode, unsigned long *flags)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline
|
|
int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline
|
|
void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int hwspin_lock_register(struct hwspinlock *hwlock)
|
|
{
|
|
return -ENODEV;
|
|
}
|
|
|
|
static inline struct hwspinlock *hwspin_lock_unregister(unsigned int id)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
#endif /* !CONFIG_HWSPINLOCK */
|
|
|
|
/**
|
|
* hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
|
|
* @hwlock: an hwspinlock which we want to trylock
|
|
* @flags: a pointer to where the caller's interrupt state will be saved at
|
|
*
|
|
* This function attempts to lock the underlying hwspinlock, and will
|
|
* immediately fail if the hwspinlock is already locked.
|
|
*
|
|
* Upon a successful return from this function, preemption and local
|
|
* interrupts are disabled (previous interrupts state is saved at @flags),
|
|
* so the caller must not sleep, and is advised to release the hwspinlock
|
|
* as soon as possible.
|
|
*
|
|
* Returns 0 if we successfully locked the hwspinlock, -EBUSY if
|
|
* the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
|
|
*/
|
|
static inline
|
|
int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
|
|
{
|
|
return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
|
|
}
|
|
|
|
/**
|
|
* hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
|
|
* @hwlock: an hwspinlock which we want to trylock
|
|
*
|
|
* This function attempts to lock the underlying hwspinlock, and will
|
|
* immediately fail if the hwspinlock is already locked.
|
|
*
|
|
* Upon a successful return from this function, preemption and local
|
|
* interrupts are disabled, so the caller must not sleep, and is advised
|
|
* to release the hwspinlock as soon as possible.
|
|
*
|
|
* Returns 0 if we successfully locked the hwspinlock, -EBUSY if
|
|
* the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
|
|
*/
|
|
static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
|
|
{
|
|
return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
|
|
}
|
|
|
|
/**
|
|
* hwspin_trylock() - attempt to lock a specific hwspinlock
|
|
* @hwlock: an hwspinlock which we want to trylock
|
|
*
|
|
* This function attempts to lock an hwspinlock, and will immediately fail
|
|
* if the hwspinlock is already taken.
|
|
*
|
|
* Upon a successful return from this function, preemption is disabled,
|
|
* so the caller must not sleep, and is advised to release the hwspinlock
|
|
* as soon as possible. This is required in order to minimize remote cores
|
|
* polling on the hardware interconnect.
|
|
*
|
|
* Returns 0 if we successfully locked the hwspinlock, -EBUSY if
|
|
* the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
|
|
*/
|
|
static inline int hwspin_trylock(struct hwspinlock *hwlock)
|
|
{
|
|
return __hwspin_trylock(hwlock, 0, NULL);
|
|
}
|
|
|
|
/**
|
|
* hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
|
|
* @hwlock: the hwspinlock to be locked
|
|
* @to: timeout value in msecs
|
|
* @flags: a pointer to where the caller's interrupt state will be saved at
|
|
*
|
|
* This function locks the underlying @hwlock. If the @hwlock
|
|
* is already taken, the function will busy loop waiting for it to
|
|
* be released, but give up when @timeout msecs have elapsed.
|
|
*
|
|
* Upon a successful return from this function, preemption and local interrupts
|
|
* are disabled (plus previous interrupt state is saved), so the caller must
|
|
* not sleep, and is advised to release the hwspinlock as soon as possible.
|
|
*
|
|
* Returns 0 when the @hwlock was successfully taken, and an appropriate
|
|
* error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
|
|
* busy after @timeout msecs). The function will never sleep.
|
|
*/
|
|
static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
|
|
unsigned int to, unsigned long *flags)
|
|
{
|
|
return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
|
|
}
|
|
|
|
/**
|
|
* hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
|
|
* @hwlock: the hwspinlock to be locked
|
|
* @to: timeout value in msecs
|
|
*
|
|
* This function locks the underlying @hwlock. If the @hwlock
|
|
* is already taken, the function will busy loop waiting for it to
|
|
* be released, but give up when @timeout msecs have elapsed.
|
|
*
|
|
* Upon a successful return from this function, preemption and local interrupts
|
|
* are disabled so the caller must not sleep, and is advised to release the
|
|
* hwspinlock as soon as possible.
|
|
*
|
|
* Returns 0 when the @hwlock was successfully taken, and an appropriate
|
|
* error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
|
|
* busy after @timeout msecs). The function will never sleep.
|
|
*/
|
|
static inline
|
|
int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
|
|
{
|
|
return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
|
|
}
|
|
|
|
/**
|
|
* hwspin_lock_timeout() - lock an hwspinlock with timeout limit
|
|
* @hwlock: the hwspinlock to be locked
|
|
* @to: timeout value in msecs
|
|
*
|
|
* This function locks the underlying @hwlock. If the @hwlock
|
|
* is already taken, the function will busy loop waiting for it to
|
|
* be released, but give up when @timeout msecs have elapsed.
|
|
*
|
|
* Upon a successful return from this function, preemption is disabled
|
|
* so the caller must not sleep, and is advised to release the hwspinlock
|
|
* as soon as possible.
|
|
* This is required in order to minimize remote cores polling on the
|
|
* hardware interconnect.
|
|
*
|
|
* Returns 0 when the @hwlock was successfully taken, and an appropriate
|
|
* error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
|
|
* busy after @timeout msecs). The function will never sleep.
|
|
*/
|
|
static inline
|
|
int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
|
|
{
|
|
return __hwspin_lock_timeout(hwlock, to, 0, NULL);
|
|
}
|
|
|
|
/**
|
|
* hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
|
|
* @hwlock: a previously-acquired hwspinlock which we want to unlock
|
|
* @flags: previous caller's interrupt state to restore
|
|
*
|
|
* This function will unlock a specific hwspinlock, enable preemption and
|
|
* restore the previous state of the local interrupts. It should be used
|
|
* to undo, e.g., hwspin_trylock_irqsave().
|
|
*
|
|
* @hwlock must be already locked before calling this function: it is a bug
|
|
* to call unlock on a @hwlock that is already unlocked.
|
|
*/
|
|
static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
|
|
unsigned long *flags)
|
|
{
|
|
__hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
|
|
}
|
|
|
|
/**
|
|
* hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
|
|
* @hwlock: a previously-acquired hwspinlock which we want to unlock
|
|
*
|
|
* This function will unlock a specific hwspinlock, enable preemption and
|
|
* enable local interrupts. Should be used to undo hwspin_lock_irq().
|
|
*
|
|
* @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
|
|
* calling this function: it is a bug to call unlock on a @hwlock that is
|
|
* already unlocked.
|
|
*/
|
|
static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
|
|
{
|
|
__hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
|
|
}
|
|
|
|
/**
|
|
* hwspin_unlock() - unlock hwspinlock
|
|
* @hwlock: a previously-acquired hwspinlock which we want to unlock
|
|
*
|
|
* This function will unlock a specific hwspinlock and enable preemption
|
|
* back.
|
|
*
|
|
* @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
|
|
* this function: it is a bug to call unlock on a @hwlock that is already
|
|
* unlocked.
|
|
*/
|
|
static inline void hwspin_unlock(struct hwspinlock *hwlock)
|
|
{
|
|
__hwspin_unlock(hwlock, 0, NULL);
|
|
}
|
|
|
|
#endif /* __LINUX_HWSPINLOCK_H */
|