mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-25 19:25:46 +07:00
58d6ea3085
xa_store() differs from radix_tree_insert() in that it will overwrite an existing element in the array rather than returning an error. This is the behaviour which most users want, and those that want more complex behaviour generally want to use the xas family of routines anyway. For memory allocation, xa_store() will first attempt to request memory from the slab allocator; if memory is not immediately available, it will drop the xa_lock and allocate memory, keeping a pointer in the xa_state. It does not use the per-CPU cache, although those will continue to exist until all radix tree users are converted to the xarray. This patch also includes xa_erase() and __xa_erase() for a streamlined way to store NULL. Since there is no need to allocate memory in order to store a NULL in the XArray, we do not need to trouble the user with deciding what memory allocation flags to use. Signed-off-by: Matthew Wilcox <willy@infradead.org>
43 lines
1.2 KiB
C
43 lines
1.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __LINUX_SPINLOCK_H_
|
|
#define __LINUX_SPINLOCK_H_
|
|
|
|
#include <pthread.h>
|
|
#include <stdbool.h>
|
|
|
|
#define spinlock_t pthread_mutex_t
|
|
#define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER
|
|
#define __SPIN_LOCK_UNLOCKED(x) (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER
|
|
#define spin_lock_init(x) pthread_mutex_init(x, NULL)
|
|
|
|
#define spin_lock(x) pthread_mutex_lock(x)
|
|
#define spin_unlock(x) pthread_mutex_unlock(x)
|
|
#define spin_lock_bh(x) pthread_mutex_lock(x)
|
|
#define spin_unlock_bh(x) pthread_mutex_unlock(x)
|
|
#define spin_lock_irq(x) pthread_mutex_lock(x)
|
|
#define spin_unlock_irq(x) pthread_mutex_unlock(x)
|
|
#define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x)
|
|
#define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x)
|
|
|
|
#define arch_spinlock_t pthread_mutex_t
|
|
#define __ARCH_SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
|
|
|
|
static inline void arch_spin_lock(arch_spinlock_t *mutex)
|
|
{
|
|
pthread_mutex_lock(mutex);
|
|
}
|
|
|
|
static inline void arch_spin_unlock(arch_spinlock_t *mutex)
|
|
{
|
|
pthread_mutex_unlock(mutex);
|
|
}
|
|
|
|
static inline bool arch_spin_is_locked(arch_spinlock_t *mutex)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
#include <linux/lockdep.h>
|
|
|
|
#endif
|