mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-20 07:26:49 +07:00
drm/i915: Extend intel_wakeref to support delayed puts
In some cases we want to hold onto the wakeref for a little after the last user so that we can avoid having to drop and then immediately reacquire it. Allow the last user to specify if they would like to keep the wakeref alive for a short hysteresis. v2: Embrace bitfield.h for adjustable flags. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200323103221.14444-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
45d4173994
commit
e9037e7f9a
@ -37,6 +37,12 @@ static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine)
|
||||
intel_wakeref_put_async(&engine->wakeref);
|
||||
}
|
||||
|
||||
static inline void intel_engine_pm_put_delay(struct intel_engine_cs *engine,
|
||||
unsigned long delay)
|
||||
{
|
||||
intel_wakeref_put_delay(&engine->wakeref, delay);
|
||||
}
|
||||
|
||||
static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
|
||||
{
|
||||
intel_wakeref_unlock_wait(&engine->wakeref);
|
||||
|
@ -38,7 +38,7 @@ static bool flush_submission(struct intel_gt *gt)
|
||||
for_each_engine(engine, gt, id) {
|
||||
intel_engine_flush_submission(engine);
|
||||
active |= flush_work(&engine->retire_work);
|
||||
active |= flush_work(&engine->wakeref.work);
|
||||
active |= flush_delayed_work(&engine->wakeref.work);
|
||||
}
|
||||
|
||||
return active;
|
||||
|
@ -70,11 +70,12 @@ static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
|
||||
|
||||
void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
|
||||
{
|
||||
INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
|
||||
INTEL_WAKEREF_BUG_ON(delayed_work_pending(&wf->work));
|
||||
|
||||
/* Assume we are not in process context and so cannot sleep. */
|
||||
if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
|
||||
schedule_work(&wf->work);
|
||||
mod_delayed_work(system_wq, &wf->work,
|
||||
FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -83,7 +84,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
|
||||
|
||||
static void __intel_wakeref_put_work(struct work_struct *wrk)
|
||||
{
|
||||
struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work);
|
||||
struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work.work);
|
||||
|
||||
if (atomic_add_unless(&wf->count, -1, 1))
|
||||
return;
|
||||
@ -104,8 +105,9 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
|
||||
atomic_set(&wf->count, 0);
|
||||
wf->wakeref = 0;
|
||||
|
||||
INIT_WORK(&wf->work, __intel_wakeref_put_work);
|
||||
lockdep_init_map(&wf->work.lockdep_map, "wakeref.work", &key->work, 0);
|
||||
INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
|
||||
lockdep_init_map(&wf->work.work.lockdep_map,
|
||||
"wakeref.work", &key->work, 0);
|
||||
}
|
||||
|
||||
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
|
||||
|
@ -8,6 +8,7 @@
|
||||
#define INTEL_WAKEREF_H
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bits.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/mutex.h>
|
||||
@ -41,7 +42,7 @@ struct intel_wakeref {
|
||||
struct intel_runtime_pm *rpm;
|
||||
const struct intel_wakeref_ops *ops;
|
||||
|
||||
struct work_struct work;
|
||||
struct delayed_work work;
|
||||
};
|
||||
|
||||
struct intel_wakeref_lockclass {
|
||||
@ -117,6 +118,11 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
|
||||
return atomic_inc_not_zero(&wf->count);
|
||||
}
|
||||
|
||||
enum {
|
||||
INTEL_WAKEREF_PUT_ASYNC_BIT = 0,
|
||||
__INTEL_WAKEREF_PUT_LAST_BIT__
|
||||
};
|
||||
|
||||
/**
|
||||
* intel_wakeref_put_flags: Release the wakeref
|
||||
* @wf: the wakeref
|
||||
@ -134,7 +140,9 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
|
||||
*/
|
||||
static inline void
|
||||
__intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
|
||||
#define INTEL_WAKEREF_PUT_ASYNC BIT(0)
|
||||
#define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT)
|
||||
#define INTEL_WAKEREF_PUT_DELAY \
|
||||
GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__)
|
||||
{
|
||||
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
|
||||
if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
|
||||
@ -154,6 +162,14 @@ intel_wakeref_put_async(struct intel_wakeref *wf)
|
||||
__intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay)
|
||||
{
|
||||
__intel_wakeref_put(wf,
|
||||
INTEL_WAKEREF_PUT_ASYNC |
|
||||
FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_wakeref_lock: Lock the wakeref (mutex)
|
||||
* @wf: the wakeref
|
||||
@ -194,7 +210,7 @@ intel_wakeref_unlock_wait(struct intel_wakeref *wf)
|
||||
{
|
||||
mutex_lock(&wf->mutex);
|
||||
mutex_unlock(&wf->mutex);
|
||||
flush_work(&wf->work);
|
||||
flush_delayed_work(&wf->work);
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user