2011-01-07 13:50:03 +07:00
|
|
|
#ifndef _LINUX_LIST_BL_H
|
|
|
|
#define _LINUX_LIST_BL_H
|
|
|
|
|
|
|
|
#include <linux/list.h>
|
2011-04-26 01:01:36 +07:00
|
|
|
#include <linux/bit_spinlock.h>
|
2011-01-07 13:50:03 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Special version of lists, where head of the list has a lock in the lowest
|
|
|
|
* bit. This is useful for scalable hash tables without increasing memory
|
|
|
|
* footprint overhead.
|
|
|
|
*
|
|
|
|
* For modification operations, the 0 bit of hlist_bl_head->first
|
|
|
|
* pointer must be set.
|
|
|
|
*
|
|
|
|
* With some small modifications, this can easily be adapted to store several
|
|
|
|
* arbitrary bits (not just a single lock bit), if the need arises to store
|
|
|
|
* some fast and compact auxiliary data.
|
|
|
|
*/
|
|
|
|
|
kernel: fix hlist_bl again
__d_rehash is dereferencing an almost-NULL pointer on my ARM926.
CONFIG_SMP=n and CONFIG_DEBUG_SPINLOCK=y.
The faulting instruction is: strne r3, [r2, #4]
and as can be seen from the register dump below, r2 is 0x00000001, hence
the faulting 0x00000005 address.
__d_rehash is essentially:
spin_lock_bucket(b);
entry->d_flags &= ~DCACHE_UNHASHED;
hlist_bl_add_head_rcu(&entry->d_hash, &b->head);
spin_unlock_bucket(b);
which is:
bit_spin_lock(0, (unsigned long *)&b->head.first);
entry->d_flags &= ~DCACHE_UNHASHED;
hlist_bl_add_head_rcu(&entry->d_hash, &b->head);
__bit_spin_unlock(0, (unsigned long *)&b->head.first);
bit_spin_lock(0, ptr) sets bit 0 of *ptr, in this case b->head.first if
CONFIG_SMP or CONFIG_DEBUG_SPINLOCK is set:
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
while (test_bit(bitnum, addr)) {
preempt_enable();
cpu_relax();
preempt_disable();
}
}
#endif
So, b->head.first starts off NULL, and becomes a non-NULL (address 1).
hlist_bl_add_head_rcu() does this:
static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n,
struct hlist_bl_head *h)
{
first = hlist_bl_first(h);
n->next = first;
if (first)
first->pprev = &n->next;
It is the store to first->pprev which is faulting.
hlist_bl_first():
static inline struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h)
{
return (struct hlist_bl_node *)
((unsigned long)h->first & ~LIST_BL_LOCKMASK);
}
but:
#if defined(CONFIG_SMP)
#define LIST_BL_LOCKMASK 1UL
#else
#define LIST_BL_LOCKMASK 0UL
#endif
So, we have one piece of code which sets bit 0 of addresses, and another
bit of code which doesn't clear it before dereferencing the pointer if
!CONFIG_SMP && CONFIG_DEBUG_SPINLOCK. With the patch below, I can again
sucessfully boot the kernel on my Versatile PB/926 platform.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-14 20:12:45 +07:00
|
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
2011-01-07 13:50:03 +07:00
|
|
|
#define LIST_BL_LOCKMASK 1UL
|
|
|
|
#else
|
|
|
|
#define LIST_BL_LOCKMASK 0UL
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_LIST
|
|
|
|
#define LIST_BL_BUG_ON(x) BUG_ON(x)
|
|
|
|
#else
|
|
|
|
#define LIST_BL_BUG_ON(x)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
struct hlist_bl_head {
|
|
|
|
struct hlist_bl_node *first;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct hlist_bl_node {
|
|
|
|
struct hlist_bl_node *next, **pprev;
|
|
|
|
};
|
|
|
|
#define INIT_HLIST_BL_HEAD(ptr) \
|
|
|
|
((ptr)->first = NULL)
|
|
|
|
|
|
|
|
static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
|
|
|
|
{
|
|
|
|
h->next = NULL;
|
|
|
|
h->pprev = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member)
|
|
|
|
|
|
|
|
static inline int hlist_bl_unhashed(const struct hlist_bl_node *h)
|
|
|
|
{
|
|
|
|
return !h->pprev;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h)
|
|
|
|
{
|
|
|
|
return (struct hlist_bl_node *)
|
|
|
|
((unsigned long)h->first & ~LIST_BL_LOCKMASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void hlist_bl_set_first(struct hlist_bl_head *h,
|
|
|
|
struct hlist_bl_node *n)
|
|
|
|
{
|
|
|
|
LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
|
2011-01-14 09:36:43 +07:00
|
|
|
LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) !=
|
|
|
|
LIST_BL_LOCKMASK);
|
2011-01-07 13:50:03 +07:00
|
|
|
h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int hlist_bl_empty(const struct hlist_bl_head *h)
|
|
|
|
{
|
|
|
|
return !((unsigned long)h->first & ~LIST_BL_LOCKMASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void hlist_bl_add_head(struct hlist_bl_node *n,
|
|
|
|
struct hlist_bl_head *h)
|
|
|
|
{
|
|
|
|
struct hlist_bl_node *first = hlist_bl_first(h);
|
|
|
|
|
|
|
|
n->next = first;
|
|
|
|
if (first)
|
|
|
|
first->pprev = &n->next;
|
|
|
|
n->pprev = &h->first;
|
|
|
|
hlist_bl_set_first(h, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __hlist_bl_del(struct hlist_bl_node *n)
|
|
|
|
{
|
|
|
|
struct hlist_bl_node *next = n->next;
|
|
|
|
struct hlist_bl_node **pprev = n->pprev;
|
|
|
|
|
|
|
|
LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
|
|
|
|
|
|
|
|
/* pprev may be `first`, so be careful not to lose the lock bit */
|
2015-09-18 22:45:22 +07:00
|
|
|
WRITE_ONCE(*pprev,
|
|
|
|
(struct hlist_bl_node *)
|
2011-01-07 13:50:03 +07:00
|
|
|
((unsigned long)next |
|
2015-09-18 22:45:22 +07:00
|
|
|
((unsigned long)*pprev & LIST_BL_LOCKMASK)));
|
2011-01-07 13:50:03 +07:00
|
|
|
if (next)
|
|
|
|
next->pprev = pprev;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void hlist_bl_del(struct hlist_bl_node *n)
|
|
|
|
{
|
|
|
|
__hlist_bl_del(n);
|
|
|
|
n->next = LIST_POISON1;
|
|
|
|
n->pprev = LIST_POISON2;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void hlist_bl_del_init(struct hlist_bl_node *n)
|
|
|
|
{
|
|
|
|
if (!hlist_bl_unhashed(n)) {
|
|
|
|
__hlist_bl_del(n);
|
|
|
|
INIT_HLIST_BL_NODE(n);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-26 01:01:36 +07:00
|
|
|
static inline void hlist_bl_lock(struct hlist_bl_head *b)
|
|
|
|
{
|
|
|
|
bit_spin_lock(0, (unsigned long *)b);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void hlist_bl_unlock(struct hlist_bl_head *b)
|
|
|
|
{
|
|
|
|
__bit_spin_unlock(0, (unsigned long *)b);
|
|
|
|
}
|
|
|
|
|
2013-02-15 07:42:34 +07:00
|
|
|
static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
|
|
|
|
{
|
|
|
|
return bit_spin_is_locked(0, (unsigned long *)b);
|
|
|
|
}
|
|
|
|
|
2011-01-07 13:50:03 +07:00
|
|
|
/**
|
|
|
|
* hlist_bl_for_each_entry - iterate over list of given type
|
|
|
|
* @tpos: the type * to use as a loop cursor.
|
|
|
|
* @pos: the &struct hlist_node to use as a loop cursor.
|
|
|
|
* @head: the head for your list.
|
|
|
|
* @member: the name of the hlist_node within the struct.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
#define hlist_bl_for_each_entry(tpos, pos, head, member) \
|
|
|
|
for (pos = hlist_bl_first(head); \
|
|
|
|
pos && \
|
|
|
|
({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \
|
|
|
|
pos = pos->next)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* hlist_bl_for_each_entry_safe - iterate over list of given type safe against removal of list entry
|
|
|
|
* @tpos: the type * to use as a loop cursor.
|
|
|
|
* @pos: the &struct hlist_node to use as a loop cursor.
|
|
|
|
* @n: another &struct hlist_node to use as temporary storage
|
|
|
|
* @head: the head for your list.
|
|
|
|
* @member: the name of the hlist_node within the struct.
|
|
|
|
*/
|
|
|
|
#define hlist_bl_for_each_entry_safe(tpos, pos, n, head, member) \
|
|
|
|
for (pos = hlist_bl_first(head); \
|
|
|
|
pos && ({ n = pos->next; 1; }) && \
|
|
|
|
({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \
|
|
|
|
pos = n)
|
|
|
|
|
|
|
|
#endif
|