mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-26 00:30:53 +07:00
8161239a8b
In current rtmutex, the pending owner may be boosted by the tasks in the rtmutex's waitlist when the pending owner is deboosted or a task in the waitlist is boosted. This boosting is unrelated, because the pending owner does not really take the rtmutex. It is not reasonable. Example. time1: A(high prio) onwers the rtmutex. B(mid prio) and C (low prio) in the waitlist. time2 A release the lock, B becomes the pending owner A(or other high prio task) continues to run. B's prio is lower than A, so B is just queued at the runqueue. time3 A or other high prio task sleeps, but we have passed some time The B and C's prio are changed in the period (time2 ~ time3) due to boosting or deboosting. Now C has the priority higher than B. ***Is it reasonable that C has to boost B and help B to get the rtmutex? NO!! I think, it is unrelated/unneed boosting before B really owns the rtmutex. We should give C a chance to beat B and win the rtmutex. This is the motivation of this patch. This patch *ensures* only the top waiter or higher priority task can take the lock. How? 1) we don't dequeue the top waiter when unlock, if the top waiter is changed, the old top waiter will fail and go to sleep again. 2) when requiring lock, it will get the lock when the lock is not taken and: there is no waiter OR higher priority than waiters OR it is top waiter. 3) In any time, the top waiter is changed, the top waiter will be woken up. The algorithm is much simpler than before, no pending owner, no boosting for pending owner. Other advantage of this patch: 1) The states of a rtmutex are reduced a half, easier to read the code. 2) the codes become shorter. 3) top waiter is not dequeued until it really take the lock: they will retain FIFO when it is stolen. Not advantage nor disadvantage 1) Even we may wakeup multiple waiters(any time when top waiter changed), we hardly cause "thundering herd", the number of wokenup task is likely 1 or very little. 2) two APIs are changed. rt_mutex_owner() will not return pending owner, it will return NULL when the top waiter is going to take the lock. rt_mutex_next_owner() always return the top waiter. will not return NULL if we have waiters because the top waiter is not dequeued. I have fixed the code that use these APIs. need updated after this patch is accepted 1) Document/* 2) the testcase scripts/rt-tester/t4-l2-pi-deboost.tst Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> LKML-Reference: <4D3012D5.4060709@cn.fujitsu.com> Reviewed-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
127 lines
3.3 KiB
C
127 lines
3.3 KiB
C
/*
|
|
* RT Mutexes: blocking mutual exclusion locks with PI support
|
|
*
|
|
* started by Ingo Molnar and Thomas Gleixner:
|
|
*
|
|
* Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
|
* Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
|
|
*
|
|
* This file contains the private data structure and API definitions.
|
|
*/
|
|
|
|
#ifndef __KERNEL_RTMUTEX_COMMON_H
|
|
#define __KERNEL_RTMUTEX_COMMON_H
|
|
|
|
#include <linux/rtmutex.h>
|
|
|
|
/*
|
|
* The rtmutex in kernel tester is independent of rtmutex debugging. We
|
|
* call schedule_rt_mutex_test() instead of schedule() for the tasks which
|
|
* belong to the tester. That way we can delay the wakeup path of those
|
|
* threads to provoke lock stealing and testing of complex boosting scenarios.
|
|
*/
|
|
#ifdef CONFIG_RT_MUTEX_TESTER
|
|
|
|
extern void schedule_rt_mutex_test(struct rt_mutex *lock);
|
|
|
|
#define schedule_rt_mutex(_lock) \
|
|
do { \
|
|
if (!(current->flags & PF_MUTEX_TESTER)) \
|
|
schedule(); \
|
|
else \
|
|
schedule_rt_mutex_test(_lock); \
|
|
} while (0)
|
|
|
|
#else
|
|
# define schedule_rt_mutex(_lock) schedule()
|
|
#endif
|
|
|
|
/*
|
|
* This is the control structure for tasks blocked on a rt_mutex,
|
|
* which is allocated on the kernel stack on of the blocked task.
|
|
*
|
|
* @list_entry: pi node to enqueue into the mutex waiters list
|
|
* @pi_list_entry: pi node to enqueue into the mutex owner waiters list
|
|
* @task: task reference to the blocked task
|
|
*/
|
|
struct rt_mutex_waiter {
|
|
struct plist_node list_entry;
|
|
struct plist_node pi_list_entry;
|
|
struct task_struct *task;
|
|
struct rt_mutex *lock;
|
|
#ifdef CONFIG_DEBUG_RT_MUTEXES
|
|
unsigned long ip;
|
|
struct pid *deadlock_task_pid;
|
|
struct rt_mutex *deadlock_lock;
|
|
#endif
|
|
};
|
|
|
|
/*
|
|
* Various helpers to access the waiters-plist:
|
|
*/
|
|
static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
|
|
{
|
|
return !plist_head_empty(&lock->wait_list);
|
|
}
|
|
|
|
static inline struct rt_mutex_waiter *
|
|
rt_mutex_top_waiter(struct rt_mutex *lock)
|
|
{
|
|
struct rt_mutex_waiter *w;
|
|
|
|
w = plist_first_entry(&lock->wait_list, struct rt_mutex_waiter,
|
|
list_entry);
|
|
BUG_ON(w->lock != lock);
|
|
|
|
return w;
|
|
}
|
|
|
|
static inline int task_has_pi_waiters(struct task_struct *p)
|
|
{
|
|
return !plist_head_empty(&p->pi_waiters);
|
|
}
|
|
|
|
static inline struct rt_mutex_waiter *
|
|
task_top_pi_waiter(struct task_struct *p)
|
|
{
|
|
return plist_first_entry(&p->pi_waiters, struct rt_mutex_waiter,
|
|
pi_list_entry);
|
|
}
|
|
|
|
/*
|
|
* lock->owner state tracking:
|
|
*/
|
|
#define RT_MUTEX_HAS_WAITERS 1UL
|
|
#define RT_MUTEX_OWNER_MASKALL 1UL
|
|
|
|
static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
|
|
{
|
|
return (struct task_struct *)
|
|
((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
|
|
}
|
|
|
|
/*
|
|
* PI-futex support (proxy locking functions, etc.):
|
|
*/
|
|
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
|
|
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
|
|
struct task_struct *proxy_owner);
|
|
extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
|
|
struct task_struct *proxy_owner);
|
|
extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
struct rt_mutex_waiter *waiter,
|
|
struct task_struct *task,
|
|
int detect_deadlock);
|
|
extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
|
|
struct hrtimer_sleeper *to,
|
|
struct rt_mutex_waiter *waiter,
|
|
int detect_deadlock);
|
|
|
|
#ifdef CONFIG_DEBUG_RT_MUTEXES
|
|
# include "rtmutex-debug.h"
|
|
#else
|
|
# include "rtmutex.h"
|
|
#endif
|
|
|
|
#endif
|