mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 20:21:06 +07:00
drm/i915/gvt: Use sched_lock to protect gvt scheduler logic.
The scheduler lock(gvt->sched_lock) is used to protect gvt scheduler logic, including the gvt scheduler structure(gvt->scheduler and per vgpu schedule data(vgpu->sched_data, vgpu->sched_ctl). v9: - Change commit author since the patches are improved a lot compared with original version. Original author: Pei Zhang <pei.zhang@intel.com> - Rebase to latest gvt-staging. v8: - Correct coding wqstyle. - Rebase to latest gvt-staging. v7: - Remove gtt_lock since already proteced by gvt_lock and vgpu_lock. v6: - Rebase to latest gvt-staging. v5: - Rebase to latest gvt-staging. v4: - Rebase to latest gvt-staging. v3: update to latest code base Signed-off-by: Pei Zhang <pei.zhang@intel.com> Signed-off-by: Colin Xu <colin.xu@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
This commit is contained in:
parent
f25a49ab8a
commit
9a512e23f1
@ -376,6 +376,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
||||
idr_init(&gvt->vgpu_idr);
|
||||
spin_lock_init(&gvt->scheduler.mmio_context_lock);
|
||||
mutex_init(&gvt->lock);
|
||||
mutex_init(&gvt->sched_lock);
|
||||
gvt->dev_priv = dev_priv;
|
||||
|
||||
init_device_info(gvt);
|
||||
|
@ -177,6 +177,11 @@ struct intel_vgpu {
|
||||
bool pv_notified;
|
||||
bool failsafe;
|
||||
unsigned int resetting_eng;
|
||||
|
||||
/* Both sched_data and sched_ctl can be seen a part of the global gvt
|
||||
* scheduler structure. So below 2 vgpu data are protected
|
||||
* by sched_lock, not vgpu_lock.
|
||||
*/
|
||||
void *sched_data;
|
||||
struct vgpu_sched_ctl sched_ctl;
|
||||
|
||||
@ -299,6 +304,9 @@ struct intel_gvt {
|
||||
* not yet protected by special locks(vgpu and scheduler lock).
|
||||
*/
|
||||
struct mutex lock;
|
||||
/* scheduler scope lock, protect gvt and vgpu schedule related data */
|
||||
struct mutex sched_lock;
|
||||
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct idr vgpu_idr; /* vGPU IDR pool */
|
||||
|
||||
|
@ -228,7 +228,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
|
||||
struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
|
||||
ktime_t cur_time;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
mutex_lock(&gvt->sched_lock);
|
||||
cur_time = ktime_get();
|
||||
|
||||
if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
|
||||
@ -244,7 +244,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
|
||||
vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
|
||||
tbs_sched_func(sched_data);
|
||||
|
||||
mutex_unlock(&gvt->lock);
|
||||
mutex_unlock(&gvt->sched_lock);
|
||||
}
|
||||
|
||||
static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
|
||||
@ -359,39 +359,65 @@ static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
|
||||
|
||||
int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
|
||||
{
|
||||
gvt->scheduler.sched_ops = &tbs_schedule_ops;
|
||||
int ret;
|
||||
|
||||
return gvt->scheduler.sched_ops->init(gvt);
|
||||
mutex_lock(&gvt->sched_lock);
|
||||
gvt->scheduler.sched_ops = &tbs_schedule_ops;
|
||||
ret = gvt->scheduler.sched_ops->init(gvt);
|
||||
mutex_unlock(&gvt->sched_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
|
||||
{
|
||||
mutex_lock(&gvt->sched_lock);
|
||||
gvt->scheduler.sched_ops->clean(gvt);
|
||||
mutex_unlock(&gvt->sched_lock);
|
||||
}
|
||||
|
||||
/* for per-vgpu scheduler policy, there are 2 per-vgpu data:
|
||||
* sched_data, and sched_ctl. We see these 2 data as part of
|
||||
* the global scheduler which are proteced by gvt->sched_lock.
|
||||
* Caller should make their decision if the vgpu_lock should
|
||||
* be hold outside.
|
||||
*/
|
||||
|
||||
int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
|
||||
{
|
||||
return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&vgpu->gvt->sched_lock);
|
||||
ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
|
||||
mutex_unlock(&vgpu->gvt->sched_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
|
||||
{
|
||||
mutex_lock(&vgpu->gvt->sched_lock);
|
||||
vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
|
||||
mutex_unlock(&vgpu->gvt->sched_lock);
|
||||
}
|
||||
|
||||
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
|
||||
|
||||
mutex_lock(&vgpu->gvt->sched_lock);
|
||||
if (!vgpu_data->active) {
|
||||
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
|
||||
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
|
||||
}
|
||||
mutex_unlock(&vgpu->gvt->sched_lock);
|
||||
}
|
||||
|
||||
void intel_gvt_kick_schedule(struct intel_gvt *gvt)
|
||||
{
|
||||
mutex_lock(&gvt->sched_lock);
|
||||
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
|
||||
mutex_unlock(&gvt->sched_lock);
|
||||
}
|
||||
|
||||
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||
@ -406,6 +432,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||
|
||||
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
|
||||
|
||||
mutex_lock(&vgpu->gvt->sched_lock);
|
||||
scheduler->sched_ops->stop_schedule(vgpu);
|
||||
|
||||
if (scheduler->next_vgpu == vgpu)
|
||||
@ -425,4 +452,5 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&scheduler->mmio_context_lock);
|
||||
mutex_unlock(&vgpu->gvt->sched_lock);
|
||||
}
|
||||
|
@ -715,7 +715,7 @@ static struct intel_vgpu_workload *pick_next_workload(
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
struct intel_vgpu_workload *workload = NULL;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
mutex_lock(&gvt->sched_lock);
|
||||
|
||||
/*
|
||||
* no current vgpu / will be scheduled out / no workload
|
||||
@ -761,7 +761,7 @@ static struct intel_vgpu_workload *pick_next_workload(
|
||||
|
||||
atomic_inc(&workload->vgpu->submission.running_workload_num);
|
||||
out:
|
||||
mutex_unlock(&gvt->lock);
|
||||
mutex_unlock(&gvt->sched_lock);
|
||||
return workload;
|
||||
}
|
||||
|
||||
@ -862,8 +862,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
int event;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
mutex_lock(&vgpu->vgpu_lock);
|
||||
mutex_lock(&gvt->sched_lock);
|
||||
|
||||
/* For the workload w/ request, needs to wait for the context
|
||||
* switch to make sure request is completed.
|
||||
@ -941,8 +941,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||
if (gvt->scheduler.need_reschedule)
|
||||
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
|
||||
|
||||
mutex_unlock(&gvt->sched_lock);
|
||||
mutex_unlock(&vgpu->vgpu_lock);
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
|
||||
struct workload_thread_param {
|
||||
|
Loading…
Reference in New Issue
Block a user