locking/lockdep: Mark local_lock_t

[ Upstream commit dfd5e3f5fe27bda91d5cc028c86ffbb7a0614489 ]

The local_lock_t's are special, because they cannot form IRQ
inversions, make sure we can tell them apart from the rest of the
locks.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Peter Zijlstra 2020-12-09 16:06:21 +01:00 committed by AuxXxilium
parent 446e31c1ec
commit acfd11d007
4 changed files with 39 additions and 15 deletions

View File

@ -18,6 +18,7 @@ typedef struct {
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
.lock_type = LD_LOCK_PERCPU, \
}
#else
# define LL_DEP_MAP_INIT(lockname)
@ -30,7 +31,9 @@ do { \
static struct lock_class_key __key; \
\
debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
lockdep_init_map_wait(&(lock)->dep_map, #lock, &__key, 0, LD_WAIT_CONFIG);\
lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, 0, \
LD_WAIT_CONFIG, LD_WAIT_INV, \
LD_LOCK_PERCPU); \
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC

View File

@ -188,12 +188,19 @@ extern void lockdep_unregister_key(struct lock_class_key *key);
* to lockdep:
*/
extern void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass, short inner, short outer);
extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
static inline void
lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass, u8 inner, u8 outer)
{
lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL);
}
static inline void
lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass, short inner)
struct lock_class_key *key, int subclass, u8 inner)
{
lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
}
@ -347,6 +354,8 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
do { (void)(name); (void)(key); } while (0)
# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
do { (void)(name); (void)(key); } while (0)
# define lockdep_init_map_wait(lock, name, key, sub, inner) \

View File

@ -30,6 +30,12 @@ enum lockdep_wait_type {
LD_WAIT_MAX, /* must be last */
};
enum lockdep_lock_type {
LD_LOCK_NORMAL = 0, /* normal, catch all */
LD_LOCK_PERCPU, /* percpu */
LD_LOCK_MAX,
};
#ifdef CONFIG_LOCKDEP
/*
@ -119,8 +125,10 @@ struct lock_class {
int name_version;
const char *name;
short wait_type_inner;
short wait_type_outer;
u8 wait_type_inner;
u8 wait_type_outer;
u8 lock_type;
/* u8 hole; */
#ifdef CONFIG_LOCK_STAT
unsigned long contention_point[LOCKSTAT_POINTS];
@ -169,8 +177,10 @@ struct lockdep_map {
struct lock_class_key *key;
struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
const char *name;
short wait_type_outer; /* can be taken in this context */
short wait_type_inner; /* presents this context */
u8 wait_type_outer; /* can be taken in this context */
u8 wait_type_inner; /* presents this context */
u8 lock_type;
/* u8 hole; */
#ifdef CONFIG_LOCK_STAT
int cpu;
unsigned long ip;

View File

@ -1304,6 +1304,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
class->name_version = count_matching_names(class);
class->wait_type_inner = lock->wait_type_inner;
class->wait_type_outer = lock->wait_type_outer;
class->lock_type = lock->lock_type;
/*
* We use RCU's safe list-add method to make
* parallel walking of the hash-list safe:
@ -4632,9 +4633,9 @@ print_lock_invalid_wait_context(struct task_struct *curr,
*/
static int check_wait_context(struct task_struct *curr, struct held_lock *next)
{
short next_inner = hlock_class(next)->wait_type_inner;
short next_outer = hlock_class(next)->wait_type_outer;
short curr_inner;
u8 next_inner = hlock_class(next)->wait_type_inner;
u8 next_outer = hlock_class(next)->wait_type_outer;
u8 curr_inner;
int depth;
if (!next_inner || next->trylock)
@ -4657,7 +4658,7 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
for (; depth < curr->lockdep_depth; depth++) {
struct held_lock *prev = curr->held_locks + depth;
short prev_inner = hlock_class(prev)->wait_type_inner;
u8 prev_inner = hlock_class(prev)->wait_type_inner;
if (prev_inner) {
/*
@ -4706,9 +4707,9 @@ static inline int check_wait_context(struct task_struct *curr,
/*
* Initialize a lock instance's lock-class mapping info:
*/
void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass,
short inner, short outer)
u8 inner, u8 outer, u8 lock_type)
{
int i;
@ -4731,6 +4732,7 @@ void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
lock->wait_type_outer = outer;
lock->wait_type_inner = inner;
lock->lock_type = lock_type;
/*
* No key, no joy, we need to hash something.
@ -4765,7 +4767,7 @@ void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
raw_local_irq_restore(flags);
}
}
EXPORT_SYMBOL_GPL(lockdep_init_map_waits);
EXPORT_SYMBOL_GPL(lockdep_init_map_type);
struct lock_class_key __lockdep_no_validate__;
EXPORT_SYMBOL_GPL(__lockdep_no_validate__);