mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-15 23:36:40 +07:00
49d0de082c
Abhi noticed that we were getting a complaint from the RCU subsystem about access of an RCU protected list under the write side bit lock. This commit adds additional annotation to check both the RCU read lock and the write side bit lock before printing a message. Reported by: Abhijith Das <adas@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com> Tested-by: Abhijith Das <adas@redhat.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
162 lines
4.1 KiB
C
162 lines
4.1 KiB
C
#ifndef _LINUX_LIST_BL_H
|
|
#define _LINUX_LIST_BL_H
|
|
|
|
#include <linux/list.h>
|
|
#include <linux/bit_spinlock.h>
|
|
|
|
/*
|
|
* Special version of lists, where head of the list has a lock in the lowest
|
|
* bit. This is useful for scalable hash tables without increasing memory
|
|
* footprint overhead.
|
|
*
|
|
* For modification operations, the 0 bit of hlist_bl_head->first
|
|
* pointer must be set.
|
|
*
|
|
* With some small modifications, this can easily be adapted to store several
|
|
* arbitrary bits (not just a single lock bit), if the need arises to store
|
|
* some fast and compact auxiliary data.
|
|
*/
|
|
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
#define LIST_BL_LOCKMASK 1UL
|
|
#else
|
|
#define LIST_BL_LOCKMASK 0UL
|
|
#endif
|
|
|
|
#ifdef CONFIG_DEBUG_LIST
|
|
#define LIST_BL_BUG_ON(x) BUG_ON(x)
|
|
#else
|
|
#define LIST_BL_BUG_ON(x)
|
|
#endif
|
|
|
|
|
|
struct hlist_bl_head {
|
|
struct hlist_bl_node *first;
|
|
};
|
|
|
|
struct hlist_bl_node {
|
|
struct hlist_bl_node *next, **pprev;
|
|
};
|
|
#define INIT_HLIST_BL_HEAD(ptr) \
|
|
((ptr)->first = NULL)
|
|
|
|
static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
|
|
{
|
|
h->next = NULL;
|
|
h->pprev = NULL;
|
|
}
|
|
|
|
#define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member)
|
|
|
|
static inline int hlist_bl_unhashed(const struct hlist_bl_node *h)
|
|
{
|
|
return !h->pprev;
|
|
}
|
|
|
|
static inline struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h)
|
|
{
|
|
return (struct hlist_bl_node *)
|
|
((unsigned long)h->first & ~LIST_BL_LOCKMASK);
|
|
}
|
|
|
|
static inline void hlist_bl_set_first(struct hlist_bl_head *h,
|
|
struct hlist_bl_node *n)
|
|
{
|
|
LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
|
|
LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) !=
|
|
LIST_BL_LOCKMASK);
|
|
h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK);
|
|
}
|
|
|
|
static inline int hlist_bl_empty(const struct hlist_bl_head *h)
|
|
{
|
|
return !((unsigned long)h->first & ~LIST_BL_LOCKMASK);
|
|
}
|
|
|
|
static inline void hlist_bl_add_head(struct hlist_bl_node *n,
|
|
struct hlist_bl_head *h)
|
|
{
|
|
struct hlist_bl_node *first = hlist_bl_first(h);
|
|
|
|
n->next = first;
|
|
if (first)
|
|
first->pprev = &n->next;
|
|
n->pprev = &h->first;
|
|
hlist_bl_set_first(h, n);
|
|
}
|
|
|
|
static inline void __hlist_bl_del(struct hlist_bl_node *n)
|
|
{
|
|
struct hlist_bl_node *next = n->next;
|
|
struct hlist_bl_node **pprev = n->pprev;
|
|
|
|
LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
|
|
|
|
/* pprev may be `first`, so be careful not to lose the lock bit */
|
|
*pprev = (struct hlist_bl_node *)
|
|
((unsigned long)next |
|
|
((unsigned long)*pprev & LIST_BL_LOCKMASK));
|
|
if (next)
|
|
next->pprev = pprev;
|
|
}
|
|
|
|
static inline void hlist_bl_del(struct hlist_bl_node *n)
|
|
{
|
|
__hlist_bl_del(n);
|
|
n->next = LIST_POISON1;
|
|
n->pprev = LIST_POISON2;
|
|
}
|
|
|
|
static inline void hlist_bl_del_init(struct hlist_bl_node *n)
|
|
{
|
|
if (!hlist_bl_unhashed(n)) {
|
|
__hlist_bl_del(n);
|
|
INIT_HLIST_BL_NODE(n);
|
|
}
|
|
}
|
|
|
|
static inline void hlist_bl_lock(struct hlist_bl_head *b)
|
|
{
|
|
bit_spin_lock(0, (unsigned long *)b);
|
|
}
|
|
|
|
static inline void hlist_bl_unlock(struct hlist_bl_head *b)
|
|
{
|
|
__bit_spin_unlock(0, (unsigned long *)b);
|
|
}
|
|
|
|
static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
|
|
{
|
|
return bit_spin_is_locked(0, (unsigned long *)b);
|
|
}
|
|
|
|
/**
|
|
* hlist_bl_for_each_entry - iterate over list of given type
|
|
* @tpos: the type * to use as a loop cursor.
|
|
* @pos: the &struct hlist_node to use as a loop cursor.
|
|
* @head: the head for your list.
|
|
* @member: the name of the hlist_node within the struct.
|
|
*
|
|
*/
|
|
#define hlist_bl_for_each_entry(tpos, pos, head, member) \
|
|
for (pos = hlist_bl_first(head); \
|
|
pos && \
|
|
({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \
|
|
pos = pos->next)
|
|
|
|
/**
|
|
* hlist_bl_for_each_entry_safe - iterate over list of given type safe against removal of list entry
|
|
* @tpos: the type * to use as a loop cursor.
|
|
* @pos: the &struct hlist_node to use as a loop cursor.
|
|
* @n: another &struct hlist_node to use as temporary storage
|
|
* @head: the head for your list.
|
|
* @member: the name of the hlist_node within the struct.
|
|
*/
|
|
#define hlist_bl_for_each_entry_safe(tpos, pos, n, head, member) \
|
|
for (pos = hlist_bl_first(head); \
|
|
pos && ({ n = pos->next; 1; }) && \
|
|
({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \
|
|
pos = n)
|
|
|
|
#endif
|