mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 07:50:53 +07:00
XArray: Add support for 1s-based allocation
A lot of places want to allocate IDs starting at 1 instead of 0. While the xa_alloc() API supports this, it's not very efficient if lots of IDs are allocated, due to having to walk down to the bottom of the tree to see if ID 1 is available, then all the way over to the next non-allocated ID. This method marks ID 0 as being occupied which wastes one slot in the XArray, but preserves xa_empty() as working. Signed-off-by: Matthew Wilcox <willy@infradead.org>
This commit is contained in:
parent
fd9dc93e36
commit
3ccaf57a6a
@ -131,17 +131,21 @@ If you use :c:func:`DEFINE_XARRAY_ALLOC` to define the XArray, or
|
||||
initialise it by passing ``XA_FLAGS_ALLOC`` to :c:func:`xa_init_flags`,
|
||||
the XArray changes to track whether entries are in use or not.
|
||||
|
||||
You can call :c:func:`xa_alloc` to store the entry at any unused index
|
||||
You can call :c:func:`xa_alloc` to store the entry at an unused index
|
||||
in the XArray. If you need to modify the array from interrupt context,
|
||||
you can use :c:func:`xa_alloc_bh` or :c:func:`xa_alloc_irq` to disable
|
||||
interrupts while allocating the ID.
|
||||
|
||||
Using :c:func:`xa_store`, :c:func:`xa_cmpxchg` or :c:func:`xa_insert`
|
||||
will mark the entry as being allocated. Unlike a normal XArray, storing
|
||||
Using :c:func:`xa_store`, :c:func:`xa_cmpxchg` or :c:func:`xa_insert` will
|
||||
also mark the entry as being allocated. Unlike a normal XArray, storing
|
||||
``NULL`` will mark the entry as being in use, like :c:func:`xa_reserve`.
|
||||
To free an entry, use :c:func:`xa_erase` (or :c:func:`xa_release` if
|
||||
you only want to free the entry if it's ``NULL``).
|
||||
|
||||
By default, the lowest free entry is allocated starting from 0. If you
|
||||
want to allocate entries starting at 1, it is more efficient to use
|
||||
:c:func:`DEFINE_XARRAY_ALLOC1` or ``XA_FLAGS_ALLOC1``.
|
||||
|
||||
You cannot use ``XA_MARK_0`` with an allocating XArray as this mark
|
||||
is used to track whether an entry is free or not. The other marks are
|
||||
available for your use.
|
||||
|
@ -220,10 +220,13 @@ enum xa_lock_type {
|
||||
#define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ)
|
||||
#define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
|
||||
#define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U)
|
||||
#define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U)
|
||||
#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
|
||||
(__force unsigned)(mark)))
|
||||
|
||||
/* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */
|
||||
#define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK))
|
||||
#define XA_FLAGS_ALLOC1 (XA_FLAGS_TRACK_FREE | XA_FLAGS_ZERO_BUSY)
|
||||
|
||||
/**
|
||||
* struct xarray - The anchor of the XArray.
|
||||
@ -279,7 +282,7 @@ struct xarray {
|
||||
#define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0)
|
||||
|
||||
/**
|
||||
* DEFINE_XARRAY_ALLOC() - Define an XArray which can allocate IDs.
|
||||
* DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0.
|
||||
* @name: A string that names your XArray.
|
||||
*
|
||||
* This is intended for file scope definitions of allocating XArrays.
|
||||
@ -287,6 +290,15 @@ struct xarray {
|
||||
*/
|
||||
#define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC)
|
||||
|
||||
/**
|
||||
* DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1.
|
||||
* @name: A string that names your XArray.
|
||||
*
|
||||
* This is intended for file scope definitions of allocating XArrays.
|
||||
* See also DEFINE_XARRAY().
|
||||
*/
|
||||
#define DEFINE_XARRAY_ALLOC1(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC1)
|
||||
|
||||
void *xa_load(struct xarray *, unsigned long index);
|
||||
void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
|
||||
void *xa_erase(struct xarray *, unsigned long index);
|
||||
|
@ -589,64 +589,86 @@ static noinline void check_multi_store(struct xarray *xa)
|
||||
#endif
|
||||
}
|
||||
|
||||
static DEFINE_XARRAY_ALLOC(xa0);
|
||||
|
||||
static noinline void check_xa_alloc(void)
|
||||
static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
|
||||
{
|
||||
int i;
|
||||
u32 id;
|
||||
|
||||
/* An empty array should assign 0 to the first alloc */
|
||||
xa_alloc_index(&xa0, 0, GFP_KERNEL);
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
/* An empty array should assign %base to the first alloc */
|
||||
xa_alloc_index(xa, base, GFP_KERNEL);
|
||||
|
||||
/* Erasing it should make the array empty again */
|
||||
xa_erase_index(&xa0, 0);
|
||||
XA_BUG_ON(&xa0, !xa_empty(&xa0));
|
||||
xa_erase_index(xa, base);
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
|
||||
/* And it should assign 0 again */
|
||||
xa_alloc_index(&xa0, 0, GFP_KERNEL);
|
||||
/* And it should assign %base again */
|
||||
xa_alloc_index(xa, base, GFP_KERNEL);
|
||||
|
||||
/* The next assigned ID should be 1 */
|
||||
xa_alloc_index(&xa0, 1, GFP_KERNEL);
|
||||
xa_erase_index(&xa0, 1);
|
||||
/* Allocating and then erasing a lot should not lose base */
|
||||
for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++)
|
||||
xa_alloc_index(xa, i, GFP_KERNEL);
|
||||
for (i = base; i < 2 * XA_CHUNK_SIZE; i++)
|
||||
xa_erase_index(xa, i);
|
||||
xa_alloc_index(xa, base, GFP_KERNEL);
|
||||
|
||||
/* Destroying the array should do the same as erasing */
|
||||
xa_destroy(xa);
|
||||
|
||||
/* And it should assign %base again */
|
||||
xa_alloc_index(xa, base, GFP_KERNEL);
|
||||
|
||||
/* The next assigned ID should be base+1 */
|
||||
xa_alloc_index(xa, base + 1, GFP_KERNEL);
|
||||
xa_erase_index(xa, base + 1);
|
||||
|
||||
/* Storing a value should mark it used */
|
||||
xa_store_index(&xa0, 1, GFP_KERNEL);
|
||||
xa_alloc_index(&xa0, 2, GFP_KERNEL);
|
||||
xa_store_index(xa, base + 1, GFP_KERNEL);
|
||||
xa_alloc_index(xa, base + 2, GFP_KERNEL);
|
||||
|
||||
/* If we then erase 0, it should be free */
|
||||
xa_erase_index(&xa0, 0);
|
||||
xa_alloc_index(&xa0, 0, GFP_KERNEL);
|
||||
/* If we then erase base, it should be free */
|
||||
xa_erase_index(xa, base);
|
||||
xa_alloc_index(xa, base, GFP_KERNEL);
|
||||
|
||||
xa_erase_index(&xa0, 1);
|
||||
xa_erase_index(&xa0, 2);
|
||||
xa_erase_index(xa, base + 1);
|
||||
xa_erase_index(xa, base + 2);
|
||||
|
||||
for (i = 1; i < 5000; i++) {
|
||||
xa_alloc_index(&xa0, i, GFP_KERNEL);
|
||||
xa_alloc_index(xa, base + i, GFP_KERNEL);
|
||||
}
|
||||
|
||||
xa_destroy(&xa0);
|
||||
xa_destroy(xa);
|
||||
|
||||
/* Check that we fail properly at the limit of allocation */
|
||||
id = 0xfffffffeU;
|
||||
XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
|
||||
XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id),
|
||||
GFP_KERNEL) != 0);
|
||||
XA_BUG_ON(&xa0, id != 0xfffffffeU);
|
||||
XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
|
||||
XA_BUG_ON(xa, id != 0xfffffffeU);
|
||||
XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id),
|
||||
GFP_KERNEL) != 0);
|
||||
XA_BUG_ON(&xa0, id != 0xffffffffU);
|
||||
XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
|
||||
XA_BUG_ON(xa, id != 0xffffffffU);
|
||||
XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id),
|
||||
GFP_KERNEL) != -ENOSPC);
|
||||
XA_BUG_ON(&xa0, id != 0xffffffffU);
|
||||
xa_destroy(&xa0);
|
||||
XA_BUG_ON(xa, id != 0xffffffffU);
|
||||
xa_destroy(xa);
|
||||
|
||||
id = 10;
|
||||
XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
|
||||
XA_BUG_ON(xa, xa_alloc(xa, &id, 5, xa_mk_index(id),
|
||||
GFP_KERNEL) != -ENOSPC);
|
||||
XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0);
|
||||
XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
|
||||
XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0);
|
||||
XA_BUG_ON(xa, xa_alloc(xa, &id, 5, xa_mk_index(id),
|
||||
GFP_KERNEL) != -ENOSPC);
|
||||
xa_erase_index(&xa0, 3);
|
||||
XA_BUG_ON(&xa0, !xa_empty(&xa0));
|
||||
xa_erase_index(xa, 3);
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
}
|
||||
|
||||
static DEFINE_XARRAY_ALLOC(xa0);
|
||||
static DEFINE_XARRAY_ALLOC1(xa1);
|
||||
|
||||
static noinline void check_xa_alloc(void)
|
||||
{
|
||||
check_xa_alloc_1(&xa0, 0);
|
||||
check_xa_alloc_1(&xa1, 1);
|
||||
}
|
||||
|
||||
static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
|
||||
|
11
lib/xarray.c
11
lib/xarray.c
@ -57,6 +57,11 @@ static inline bool xa_track_free(const struct xarray *xa)
|
||||
return xa->xa_flags & XA_FLAGS_TRACK_FREE;
|
||||
}
|
||||
|
||||
static inline bool xa_zero_busy(const struct xarray *xa)
|
||||
{
|
||||
return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
|
||||
}
|
||||
|
||||
static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
|
||||
{
|
||||
if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
|
||||
@ -432,6 +437,8 @@ static void xas_shrink(struct xa_state *xas)
|
||||
break;
|
||||
if (!xa_is_node(entry) && node->shift)
|
||||
break;
|
||||
if (xa_is_zero(entry) && xa_zero_busy(xa))
|
||||
entry = NULL;
|
||||
xas->xa_node = XAS_BOUNDS;
|
||||
|
||||
RCU_INIT_POINTER(xa->xa_head, entry);
|
||||
@ -628,6 +635,8 @@ static void *xas_create(struct xa_state *xas, bool allow_root)
|
||||
if (xas_top(node)) {
|
||||
entry = xa_head_locked(xa);
|
||||
xas->xa_node = NULL;
|
||||
if (!entry && xa_zero_busy(xa))
|
||||
entry = XA_ZERO_ENTRY;
|
||||
shift = xas_expand(xas, entry);
|
||||
if (shift < 0)
|
||||
return NULL;
|
||||
@ -1942,6 +1951,8 @@ void xa_destroy(struct xarray *xa)
|
||||
entry = xa_head_locked(xa);
|
||||
RCU_INIT_POINTER(xa->xa_head, NULL);
|
||||
xas_init_marks(&xas);
|
||||
if (xa_zero_busy(xa))
|
||||
xa_mark_clear(xa, XA_FREE_MARK);
|
||||
/* lockdep checks we're still holding the lock in xas_free_nodes() */
|
||||
if (xa_is_node(entry))
|
||||
xas_free_nodes(&xas, xa_to_node(entry));
|
||||
|
Loading…
Reference in New Issue
Block a user