2015-05-22 17:55:07 +07:00
|
|
|
/*
|
|
|
|
* Copyright 2015 Advanced Micro Devices, Inc.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
*/
|
2017-12-06 23:49:39 +07:00
|
|
|
|
2015-05-22 17:55:07 +07:00
|
|
|
#include <linux/kthread.h>
|
|
|
|
#include <linux/wait.h>
|
|
|
|
#include <linux/sched.h>
|
2017-02-02 00:07:51 +07:00
|
|
|
#include <uapi/linux/sched/types.h>
|
2015-05-22 17:55:07 +07:00
|
|
|
#include <drm/drmP.h>
|
2017-12-06 23:49:39 +07:00
|
|
|
#include <drm/gpu_scheduler.h>
|
|
|
|
#include <drm/spsc_queue.h>
|
2017-10-13 03:46:26 +07:00
|
|
|
|
2015-09-07 15:06:53 +07:00
|
|
|
#define CREATE_TRACE_POINTS
|
2018-03-30 00:06:33 +07:00
|
|
|
#include "gpu_scheduler_trace.h"
|
2015-09-07 15:06:53 +07:00
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
#define to_drm_sched_job(sched_job) \
|
|
|
|
container_of((sched_job), struct drm_sched_job, queue_node)
|
2017-10-13 03:46:26 +07:00
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
|
|
|
|
static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
|
|
|
|
static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
|
2015-08-24 19:29:40 +07:00
|
|
|
|
2015-05-22 17:55:07 +07:00
|
|
|
/* Initialize a given run queue struct */
|
2017-12-06 23:49:39 +07:00
|
|
|
static void drm_sched_rq_init(struct drm_sched_rq *rq)
|
2015-05-22 17:55:07 +07:00
|
|
|
{
|
2015-08-18 19:41:25 +07:00
|
|
|
spin_lock_init(&rq->lock);
|
2015-08-12 16:46:04 +07:00
|
|
|
INIT_LIST_HEAD(&rq->entities);
|
|
|
|
rq->current_entity = NULL;
|
2015-05-22 17:55:07 +07:00
|
|
|
}
|
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
|
|
|
|
struct drm_sched_entity *entity)
|
2015-05-22 17:55:07 +07:00
|
|
|
{
|
2015-12-11 17:22:52 +07:00
|
|
|
if (!list_empty(&entity->list))
|
|
|
|
return;
|
2015-08-18 19:41:25 +07:00
|
|
|
spin_lock(&rq->lock);
|
2015-08-12 16:46:04 +07:00
|
|
|
list_add_tail(&entity->list, &rq->entities);
|
2015-08-18 19:41:25 +07:00
|
|
|
spin_unlock(&rq->lock);
|
2015-05-22 17:55:07 +07:00
|
|
|
}
|
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
|
|
|
|
struct drm_sched_entity *entity)
|
2015-05-22 17:55:07 +07:00
|
|
|
{
|
2015-12-11 17:22:52 +07:00
|
|
|
if (list_empty(&entity->list))
|
|
|
|
return;
|
2015-08-18 19:41:25 +07:00
|
|
|
spin_lock(&rq->lock);
|
2015-08-12 16:46:04 +07:00
|
|
|
list_del_init(&entity->list);
|
|
|
|
if (rq->current_entity == entity)
|
|
|
|
rq->current_entity = NULL;
|
2015-08-18 19:41:25 +07:00
|
|
|
spin_unlock(&rq->lock);
|
2015-05-22 17:55:07 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-11-13 03:10:35 +07:00
|
|
|
* Select an entity which could provide a job to run
|
|
|
|
*
|
|
|
|
* @rq The run queue to check.
|
|
|
|
*
|
|
|
|
* Try to find a ready entity, returns NULL if none found.
|
2015-05-22 17:55:07 +07:00
|
|
|
*/
|
2017-12-06 23:49:39 +07:00
|
|
|
static struct drm_sched_entity *
|
|
|
|
drm_sched_rq_select_entity(struct drm_sched_rq *rq)
|
2015-05-22 17:55:07 +07:00
|
|
|
{
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_entity *entity;
|
2015-08-12 16:46:04 +07:00
|
|
|
|
2015-08-18 19:41:25 +07:00
|
|
|
spin_lock(&rq->lock);
|
|
|
|
|
|
|
|
entity = rq->current_entity;
|
2015-08-12 16:46:04 +07:00
|
|
|
if (entity) {
|
|
|
|
list_for_each_entry_continue(entity, &rq->entities, list) {
|
2017-12-06 23:49:39 +07:00
|
|
|
if (drm_sched_entity_is_ready(entity)) {
|
2015-08-12 16:46:04 +07:00
|
|
|
rq->current_entity = entity;
|
2015-08-18 19:41:25 +07:00
|
|
|
spin_unlock(&rq->lock);
|
2015-11-13 03:10:35 +07:00
|
|
|
return entity;
|
2015-08-12 16:46:04 +07:00
|
|
|
}
|
2015-05-22 17:55:07 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-12 16:46:04 +07:00
|
|
|
list_for_each_entry(entity, &rq->entities, list) {
|
2015-05-22 17:55:07 +07:00
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
if (drm_sched_entity_is_ready(entity)) {
|
2015-08-12 16:46:04 +07:00
|
|
|
rq->current_entity = entity;
|
2015-08-18 19:41:25 +07:00
|
|
|
spin_unlock(&rq->lock);
|
2015-11-13 03:10:35 +07:00
|
|
|
return entity;
|
2015-08-12 16:46:04 +07:00
|
|
|
}
|
2015-05-22 17:55:07 +07:00
|
|
|
|
2015-08-12 16:46:04 +07:00
|
|
|
if (entity == rq->current_entity)
|
|
|
|
break;
|
|
|
|
}
|
2015-05-22 17:55:07 +07:00
|
|
|
|
2015-08-18 19:41:25 +07:00
|
|
|
spin_unlock(&rq->lock);
|
|
|
|
|
2015-08-12 16:46:04 +07:00
|
|
|
return NULL;
|
2015-05-22 17:55:07 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Init a context entity used by scheduler when submit to HW ring.
|
|
|
|
*
|
|
|
|
* @sched The pointer to the scheduler
|
2017-12-06 23:49:39 +07:00
|
|
|
* @entity The pointer to a valid drm_sched_entity
|
2015-05-22 17:55:07 +07:00
|
|
|
* @rq The run queue this entity belongs
|
2015-07-30 15:36:58 +07:00
|
|
|
* @jobs The max number of jobs in the job queue
|
2018-03-30 00:06:31 +07:00
|
|
|
* @guilty atomic_t set to 1 when a job on this queue
|
|
|
|
* is found to be guilty causing a timeout
|
2015-05-22 17:55:07 +07:00
|
|
|
*
|
|
|
|
* return 0 if succeed. negative error code on failure
|
|
|
|
*/
|
2017-12-06 23:49:39 +07:00
|
|
|
int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
|
|
|
|
struct drm_sched_entity *entity,
|
|
|
|
struct drm_sched_rq *rq,
|
2017-10-23 11:23:29 +07:00
|
|
|
uint32_t jobs, atomic_t *guilty)
|
2015-05-22 17:55:07 +07:00
|
|
|
{
|
|
|
|
if (!(sched && entity && rq))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
memset(entity, 0, sizeof(struct drm_sched_entity));
|
2015-09-07 23:07:14 +07:00
|
|
|
INIT_LIST_HEAD(&entity->list);
|
|
|
|
entity->rq = rq;
|
|
|
|
entity->sched = sched;
|
2017-10-23 11:23:29 +07:00
|
|
|
entity->guilty = guilty;
|
2015-05-22 17:55:07 +07:00
|
|
|
|
2017-06-03 02:09:00 +07:00
|
|
|
spin_lock_init(&entity->rq_lock);
|
2015-05-22 17:55:07 +07:00
|
|
|
spin_lock_init(&entity->queue_lock);
|
2017-10-13 03:46:26 +07:00
|
|
|
spsc_queue_init(&entity->job_queue);
|
2015-09-07 23:07:14 +07:00
|
|
|
|
2015-08-19 20:00:55 +07:00
|
|
|
atomic_set(&entity->fence_seq, 0);
|
2016-10-25 19:00:45 +07:00
|
|
|
entity->fence_context = dma_fence_context_alloc(2);
|
2015-05-22 17:55:07 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2017-12-06 23:49:39 +07:00
|
|
|
EXPORT_SYMBOL(drm_sched_entity_init);
|
2015-05-22 17:55:07 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Query if entity is initialized
|
|
|
|
*
|
|
|
|
* @sched Pointer to scheduler instance
|
|
|
|
* @entity The pointer to a valid scheduler entity
|
|
|
|
*
|
|
|
|
* return true if entity is initialized, false otherwise
|
|
|
|
*/
|
2017-12-06 23:49:39 +07:00
|
|
|
static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
|
|
|
|
struct drm_sched_entity *entity)
|
2015-05-22 17:55:07 +07:00
|
|
|
{
|
2015-09-07 23:07:14 +07:00
|
|
|
return entity->sched == sched &&
|
|
|
|
entity->rq != NULL;
|
2015-05-22 17:55:07 +07:00
|
|
|
}
|
|
|
|
|
2015-08-20 19:47:46 +07:00
|
|
|
/**
|
|
|
|
* Check if entity is idle
|
|
|
|
*
|
|
|
|
* @entity The pointer to a valid scheduler entity
|
|
|
|
*
|
|
|
|
* Return true if entity don't has any unscheduled jobs.
|
|
|
|
*/
|
2017-12-06 23:49:39 +07:00
|
|
|
static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
|
2015-05-22 17:55:07 +07:00
|
|
|
{
|
2015-08-20 19:47:46 +07:00
|
|
|
rmb();
|
2017-10-13 03:46:26 +07:00
|
|
|
if (spsc_queue_peek(&entity->job_queue) == NULL)
|
2015-05-22 17:55:07 +07:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-11-13 03:10:35 +07:00
|
|
|
/**
|
|
|
|
* Check if entity is ready
|
|
|
|
*
|
|
|
|
* @entity The pointer to a valid scheduler entity
|
|
|
|
*
|
|
|
|
* Return true if entity could provide a job.
|
|
|
|
*/
|
2017-12-06 23:49:39 +07:00
|
|
|
static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
|
2015-11-13 03:10:35 +07:00
|
|
|
{
|
2017-10-13 03:46:26 +07:00
|
|
|
if (spsc_queue_peek(&entity->job_queue) == NULL)
|
2015-11-13 03:10:35 +07:00
|
|
|
return false;
|
|
|
|
|
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 04:07:29 +07:00
|
|
|
if (READ_ONCE(entity->dependency))
|
2015-11-13 03:10:35 +07:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-05-22 17:55:07 +07:00
|
|
|
/**
|
|
|
|
* Destroy a context entity
|
|
|
|
*
|
|
|
|
* @sched Pointer to scheduler instance
|
|
|
|
* @entity The pointer to a valid scheduler entity
|
|
|
|
*
|
2015-08-21 20:46:43 +07:00
|
|
|
* Cleanup and free the allocated resources.
|
2015-05-22 17:55:07 +07:00
|
|
|
*/
|
2017-12-06 23:49:39 +07:00
|
|
|
void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
|
|
|
|
struct drm_sched_entity *entity)
|
2015-05-22 17:55:07 +07:00
|
|
|
{
|
2017-08-21 19:27:51 +07:00
|
|
|
int r;
|
2015-05-22 17:55:07 +07:00
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
if (!drm_sched_entity_is_initialized(sched, entity))
|
2015-08-21 20:46:43 +07:00
|
|
|
return;
|
2015-05-22 17:55:07 +07:00
|
|
|
/**
|
|
|
|
* The client will not queue more IBs during this fini, consume existing
|
2017-08-21 19:27:51 +07:00
|
|
|
* queued IBs or discard them on SIGKILL
|
2015-05-22 17:55:07 +07:00
|
|
|
*/
|
2017-08-21 19:27:51 +07:00
|
|
|
if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
|
|
|
|
r = -ERESTARTSYS;
|
|
|
|
else
|
|
|
|
r = wait_event_killable(sched->job_scheduled,
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_entity_is_idle(entity));
|
|
|
|
drm_sched_entity_set_rq(entity, NULL);
|
2017-08-21 19:27:51 +07:00
|
|
|
if (r) {
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_job *job;
|
2017-08-21 19:27:51 +07:00
|
|
|
|
|
|
|
/* Park the kernel for a moment to make sure it isn't processing
|
|
|
|
* our enity.
|
|
|
|
*/
|
|
|
|
kthread_park(sched->thread);
|
|
|
|
kthread_unpark(sched->thread);
|
2017-11-07 09:40:00 +07:00
|
|
|
if (entity->dependency) {
|
|
|
|
dma_fence_remove_callback(entity->dependency,
|
|
|
|
&entity->cb);
|
|
|
|
dma_fence_put(entity->dependency);
|
|
|
|
entity->dependency = NULL;
|
|
|
|
}
|
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
|
|
|
|
struct drm_sched_fence *s_fence = job->s_fence;
|
|
|
|
drm_sched_fence_scheduled(s_fence);
|
2017-09-28 16:57:32 +07:00
|
|
|
dma_fence_set_error(&s_fence->finished, -ESRCH);
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_fence_finished(s_fence);
|
2017-11-07 09:27:43 +07:00
|
|
|
WARN_ON(s_fence->parent);
|
2017-09-28 16:57:32 +07:00
|
|
|
dma_fence_put(&s_fence->finished);
|
2017-08-21 19:27:51 +07:00
|
|
|
sched->ops->free_job(job);
|
2017-09-28 16:57:32 +07:00
|
|
|
}
|
2017-08-21 19:27:51 +07:00
|
|
|
}
|
2015-05-22 17:55:07 +07:00
|
|
|
}
|
2017-12-06 23:49:39 +07:00
|
|
|
EXPORT_SYMBOL(drm_sched_entity_fini);
|
2015-05-22 17:55:07 +07:00
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
|
2015-08-25 16:05:36 +07:00
|
|
|
{
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_entity *entity =
|
|
|
|
container_of(cb, struct drm_sched_entity, cb);
|
2015-08-25 16:05:36 +07:00
|
|
|
entity->dependency = NULL;
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(f);
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_wakeup(entity->sched);
|
2015-08-25 16:05:36 +07:00
|
|
|
}
|
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
|
2016-01-26 13:59:57 +07:00
|
|
|
{
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_entity *entity =
|
|
|
|
container_of(cb, struct drm_sched_entity, cb);
|
2016-01-26 13:59:57 +07:00
|
|
|
entity->dependency = NULL;
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(f);
|
2016-01-26 13:59:57 +07:00
|
|
|
}
|
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
|
|
|
|
struct drm_sched_rq *rq)
|
2017-06-03 02:09:00 +07:00
|
|
|
{
|
|
|
|
if (entity->rq == rq)
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock(&entity->rq_lock);
|
|
|
|
|
|
|
|
if (entity->rq)
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_rq_remove_entity(entity->rq, entity);
|
2017-06-03 02:09:00 +07:00
|
|
|
|
|
|
|
entity->rq = rq;
|
|
|
|
if (rq)
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_rq_add_entity(rq, entity);
|
2017-06-03 02:09:00 +07:00
|
|
|
|
|
|
|
spin_unlock(&entity->rq_lock);
|
|
|
|
}
|
2017-12-06 23:49:39 +07:00
|
|
|
EXPORT_SYMBOL(drm_sched_entity_set_rq);
|
2017-06-03 02:09:00 +07:00
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
bool drm_sched_dependency_optimized(struct dma_fence* fence,
|
|
|
|
struct drm_sched_entity *entity)
|
2017-05-09 12:39:40 +07:00
|
|
|
{
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_gpu_scheduler *sched = entity->sched;
|
|
|
|
struct drm_sched_fence *s_fence;
|
2017-05-09 12:39:40 +07:00
|
|
|
|
|
|
|
if (!fence || dma_fence_is_signaled(fence))
|
|
|
|
return false;
|
|
|
|
if (fence->context == entity->fence_context)
|
|
|
|
return true;
|
2017-12-06 23:49:39 +07:00
|
|
|
s_fence = to_drm_sched_fence(fence);
|
2017-05-09 12:39:40 +07:00
|
|
|
if (s_fence && s_fence->sched == sched)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2017-12-06 23:49:39 +07:00
|
|
|
EXPORT_SYMBOL(drm_sched_dependency_optimized);
|
2017-05-09 12:39:40 +07:00
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
|
2015-11-05 18:57:10 +07:00
|
|
|
{
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_gpu_scheduler *sched = entity->sched;
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence * fence = entity->dependency;
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_fence *s_fence;
|
2015-11-05 18:57:10 +07:00
|
|
|
|
|
|
|
if (fence->context == entity->fence_context) {
|
|
|
|
/* We can ignore fences from ourself */
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(entity->dependency);
|
2015-11-05 18:57:10 +07:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
s_fence = to_drm_sched_fence(fence);
|
2015-11-05 18:57:10 +07:00
|
|
|
if (s_fence && s_fence->sched == sched) {
|
|
|
|
|
2016-05-20 17:53:52 +07:00
|
|
|
/*
|
|
|
|
* Fence is from the same scheduler, only need to wait for
|
|
|
|
* it to be scheduled
|
|
|
|
*/
|
2016-10-25 19:00:45 +07:00
|
|
|
fence = dma_fence_get(&s_fence->scheduled);
|
|
|
|
dma_fence_put(entity->dependency);
|
2016-05-20 17:53:52 +07:00
|
|
|
entity->dependency = fence;
|
2016-10-25 19:00:45 +07:00
|
|
|
if (!dma_fence_add_callback(fence, &entity->cb,
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_entity_clear_dep))
|
2016-05-20 17:53:52 +07:00
|
|
|
return true;
|
|
|
|
|
|
|
|
/* Ignore it when it is already scheduled */
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(fence);
|
2016-05-20 17:53:52 +07:00
|
|
|
return false;
|
2015-11-05 18:57:10 +07:00
|
|
|
}
|
|
|
|
|
2016-10-25 19:00:45 +07:00
|
|
|
if (!dma_fence_add_callback(entity->dependency, &entity->cb,
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_entity_wakeup))
|
2015-11-05 18:57:10 +07:00
|
|
|
return true;
|
|
|
|
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(entity->dependency);
|
2015-11-05 18:57:10 +07:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
static struct drm_sched_job *
|
|
|
|
drm_sched_entity_pop_job(struct drm_sched_entity *entity)
|
2015-08-26 16:31:23 +07:00
|
|
|
{
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_gpu_scheduler *sched = entity->sched;
|
|
|
|
struct drm_sched_job *sched_job = to_drm_sched_job(
|
2017-10-13 03:46:26 +07:00
|
|
|
spsc_queue_peek(&entity->job_queue));
|
2015-08-26 16:31:23 +07:00
|
|
|
|
2017-10-13 03:46:26 +07:00
|
|
|
if (!sched_job)
|
2015-08-26 16:31:23 +07:00
|
|
|
return NULL;
|
|
|
|
|
2017-10-25 00:30:16 +07:00
|
|
|
while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
|
2017-12-06 23:49:39 +07:00
|
|
|
if (drm_sched_entity_add_dependency_cb(entity))
|
2015-08-25 16:05:36 +07:00
|
|
|
return NULL;
|
|
|
|
|
2017-10-25 15:21:08 +07:00
|
|
|
/* skip jobs from entity that marked guilty */
|
|
|
|
if (entity->guilty && atomic_read(entity->guilty))
|
|
|
|
dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
|
|
|
|
|
2017-10-13 03:46:26 +07:00
|
|
|
spsc_queue_pop(&entity->job_queue);
|
2015-09-09 08:05:55 +07:00
|
|
|
return sched_job;
|
2015-08-26 16:31:23 +07:00
|
|
|
}
|
|
|
|
|
2015-05-22 17:55:07 +07:00
|
|
|
/**
|
2017-10-13 03:46:26 +07:00
|
|
|
* Submit a job to the job queue
|
2015-05-22 17:55:07 +07:00
|
|
|
*
|
2015-09-09 08:05:55 +07:00
|
|
|
* @sched_job The pointer to job required to submit
|
2015-08-20 21:12:50 +07:00
|
|
|
*
|
2017-10-13 03:46:26 +07:00
|
|
|
* Returns 0 for success, negative error code otherwise.
|
2015-08-20 21:12:50 +07:00
|
|
|
*/
|
2017-12-06 23:49:39 +07:00
|
|
|
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
|
|
|
|
struct drm_sched_entity *entity)
|
2015-05-22 17:55:07 +07:00
|
|
|
{
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_gpu_scheduler *sched = sched_job->sched;
|
2017-10-13 03:46:26 +07:00
|
|
|
bool first = false;
|
2015-08-20 21:12:50 +07:00
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
trace_drm_sched_job(sched_job, entity);
|
2015-08-20 21:12:50 +07:00
|
|
|
|
2017-10-13 03:46:26 +07:00
|
|
|
spin_lock(&entity->queue_lock);
|
|
|
|
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
|
2015-08-20 21:12:50 +07:00
|
|
|
|
|
|
|
spin_unlock(&entity->queue_lock);
|
|
|
|
|
|
|
|
/* first job wakes up scheduler */
|
2015-12-11 17:22:52 +07:00
|
|
|
if (first) {
|
|
|
|
/* Add the entity to the run queue */
|
2017-06-03 02:09:00 +07:00
|
|
|
spin_lock(&entity->rq_lock);
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_rq_add_entity(entity->rq, entity);
|
2017-06-03 02:09:00 +07:00
|
|
|
spin_unlock(&entity->rq_lock);
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_wakeup(sched);
|
2015-12-11 17:22:52 +07:00
|
|
|
}
|
2015-08-20 21:12:50 +07:00
|
|
|
}
|
2017-12-06 23:49:39 +07:00
|
|
|
EXPORT_SYMBOL(drm_sched_entity_push_job);
|
2015-08-20 21:12:50 +07:00
|
|
|
|
2017-09-28 16:35:05 +07:00
|
|
|
/* job_finish is called after hw fence signaled
|
2016-03-04 17:51:02 +07:00
|
|
|
*/
|
2017-12-06 23:49:39 +07:00
|
|
|
static void drm_sched_job_finish(struct work_struct *work)
|
2016-03-04 17:51:02 +07:00
|
|
|
{
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
|
2016-05-19 14:54:15 +07:00
|
|
|
finish_work);
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_gpu_scheduler *sched = s_job->sched;
|
2016-03-04 17:51:02 +07:00
|
|
|
|
2016-05-18 20:40:58 +07:00
|
|
|
/* remove job from ring_mirror_list */
|
2016-06-13 21:12:43 +07:00
|
|
|
spin_lock(&sched->job_list_lock);
|
2016-05-18 20:40:58 +07:00
|
|
|
list_del_init(&s_job->node);
|
2016-03-04 17:51:02 +07:00
|
|
|
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_job *next;
|
2016-05-19 14:54:15 +07:00
|
|
|
|
2016-06-13 21:12:43 +07:00
|
|
|
spin_unlock(&sched->job_list_lock);
|
2016-05-19 14:54:15 +07:00
|
|
|
cancel_delayed_work_sync(&s_job->work_tdr);
|
2016-06-13 21:12:43 +07:00
|
|
|
spin_lock(&sched->job_list_lock);
|
2016-03-04 17:51:02 +07:00
|
|
|
|
|
|
|
/* queue TDR for next job */
|
|
|
|
next = list_first_entry_or_null(&sched->ring_mirror_list,
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_job, node);
|
2016-03-04 17:51:02 +07:00
|
|
|
|
2016-05-19 14:54:15 +07:00
|
|
|
if (next)
|
2016-03-04 17:51:02 +07:00
|
|
|
schedule_delayed_work(&next->work_tdr, sched->timeout);
|
|
|
|
}
|
2016-06-13 21:12:43 +07:00
|
|
|
spin_unlock(&sched->job_list_lock);
|
2017-10-13 15:58:15 +07:00
|
|
|
dma_fence_put(&s_job->s_fence->finished);
|
2016-05-19 14:54:15 +07:00
|
|
|
sched->ops->free_job(s_job);
|
|
|
|
}
|
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
static void drm_sched_job_finish_cb(struct dma_fence *f,
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence_cb *cb)
|
2016-05-19 14:54:15 +07:00
|
|
|
{
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
|
2016-05-19 14:54:15 +07:00
|
|
|
finish_cb);
|
|
|
|
schedule_work(&job->finish_work);
|
2016-03-04 17:51:02 +07:00
|
|
|
}
|
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
static void drm_sched_job_begin(struct drm_sched_job *s_job)
|
2016-03-04 17:51:02 +07:00
|
|
|
{
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_gpu_scheduler *sched = s_job->sched;
|
2016-03-04 17:51:02 +07:00
|
|
|
|
2017-09-28 16:37:02 +07:00
|
|
|
dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_job_finish_cb);
|
2017-09-28 16:37:02 +07:00
|
|
|
|
2016-06-13 21:12:43 +07:00
|
|
|
spin_lock(&sched->job_list_lock);
|
2016-05-18 20:40:58 +07:00
|
|
|
list_add_tail(&s_job->node, &sched->ring_mirror_list);
|
2016-03-04 17:51:02 +07:00
|
|
|
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
|
2016-05-18 14:43:07 +07:00
|
|
|
list_first_entry_or_null(&sched->ring_mirror_list,
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_job, node) == s_job)
|
2016-03-04 17:51:02 +07:00
|
|
|
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
|
2016-06-13 21:12:43 +07:00
|
|
|
spin_unlock(&sched->job_list_lock);
|
2016-03-04 17:51:02 +07:00
|
|
|
}
|
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
static void drm_sched_job_timedout(struct work_struct *work)
|
2016-05-18 19:19:32 +07:00
|
|
|
{
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_job *job = container_of(work, struct drm_sched_job,
|
2016-05-18 19:19:32 +07:00
|
|
|
work_tdr.work);
|
|
|
|
|
|
|
|
job->sched->ops->timedout_job(job);
|
|
|
|
}
|
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
|
2016-06-30 10:30:37 +07:00
|
|
|
{
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_job *s_job;
|
|
|
|
struct drm_sched_entity *entity, *tmp;
|
2018-01-18 01:22:41 +07:00
|
|
|
int i;
|
2016-06-30 10:30:37 +07:00
|
|
|
|
|
|
|
spin_lock(&sched->job_list_lock);
|
|
|
|
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
|
2017-04-24 16:39:00 +07:00
|
|
|
if (s_job->s_fence->parent &&
|
|
|
|
dma_fence_remove_callback(s_job->s_fence->parent,
|
|
|
|
&s_job->s_fence->cb)) {
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(s_job->s_fence->parent);
|
2016-06-30 10:30:37 +07:00
|
|
|
s_job->s_fence->parent = NULL;
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 12:36:44 +07:00
|
|
|
atomic_dec(&sched->hw_rq_count);
|
2016-06-30 10:30:37 +07:00
|
|
|
}
|
|
|
|
}
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 12:36:44 +07:00
|
|
|
spin_unlock(&sched->job_list_lock);
|
2017-10-16 18:46:43 +07:00
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
|
2017-11-08 13:35:04 +07:00
|
|
|
atomic_inc(&bad->karma);
|
2017-10-25 15:21:08 +07:00
|
|
|
/* don't increase @bad's karma if it's from KERNEL RQ,
|
|
|
|
* becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
|
|
|
|
* corrupt but keep in mind that kernel jobs always considered good.
|
|
|
|
*/
|
2017-12-06 23:49:39 +07:00
|
|
|
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; i++ ) {
|
|
|
|
struct drm_sched_rq *rq = &sched->sched_rq[i];
|
2017-10-16 18:46:43 +07:00
|
|
|
|
|
|
|
spin_lock(&rq->lock);
|
|
|
|
list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
|
|
|
|
if (bad->s_fence->scheduled.context == entity->fence_context) {
|
2017-11-08 13:35:04 +07:00
|
|
|
if (atomic_read(&bad->karma) > bad->sched->hang_limit)
|
2017-10-25 15:21:08 +07:00
|
|
|
if (entity->guilty)
|
|
|
|
atomic_set(entity->guilty, 1);
|
2017-10-16 18:46:43 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock(&rq->lock);
|
2017-10-25 15:21:08 +07:00
|
|
|
if (&entity->list != &rq->entities)
|
2017-10-16 18:46:43 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 12:36:44 +07:00
|
|
|
}
|
2017-12-06 23:49:39 +07:00
|
|
|
EXPORT_SYMBOL(drm_sched_hw_job_reset);
|
drm/amdgpu/SRIOV:implement guilty job TDR for(V2)
1,TDR will kickout guilty job if it hang exceed the threshold
of the given one from kernel paramter "job_hang_limit", that
way a bad command stream will not infinitly cause GPU hang.
by default this threshold is 1 so a job will be kicked out
after it hang.
2,if a job timeout TDR routine will not reset all sched/ring,
instead if will only reset on the givn one which is indicated
by @job of amdgpu_sriov_gpu_reset, that way we don't need to
reset and recover each sched/ring if we already know which job
cause GPU hang.
3,unblock sriov_gpu_reset for AI family.
V2:
1:put kickout guilty job after sched parked.
2:since parking scheduler prior to kickout already occupies a
while, we can do last check on the in question job before
doing hw_reset.
TODO:
1:when a job is considered as guilty, we should mark some flag
in its fence status flag, and let UMD side aware that this
fence signaling is not due to job complete but job hang.
2:if gpu reset cause all video memory lost, we need introduce
a new policy to implement TDR, like drop all jobs not yet
signaled, and all IOCTL on this device will return ERROR
DEVICE_LOST.
this will be implemented later.
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2017-05-11 12:36:44 +07:00
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
|
2016-06-29 14:23:55 +07:00
|
|
|
{
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_job *s_job, *tmp;
|
2017-10-25 15:21:08 +07:00
|
|
|
bool found_guilty = false;
|
2016-06-29 14:23:55 +07:00
|
|
|
int r;
|
|
|
|
|
|
|
|
spin_lock(&sched->job_list_lock);
|
|
|
|
s_job = list_first_entry_or_null(&sched->ring_mirror_list,
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_job, node);
|
2016-08-17 00:52:35 +07:00
|
|
|
if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
|
2016-06-29 14:23:55 +07:00
|
|
|
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
|
|
|
|
|
2016-07-25 12:55:35 +07:00
|
|
|
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_fence *s_fence = s_job->s_fence;
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence *fence;
|
2017-10-25 15:21:08 +07:00
|
|
|
uint64_t guilty_context;
|
|
|
|
|
|
|
|
if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
|
|
|
|
found_guilty = true;
|
|
|
|
guilty_context = s_job->s_fence->scheduled.context;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
|
|
|
|
dma_fence_set_error(&s_fence->finished, -ECANCELED);
|
2016-07-22 12:01:02 +07:00
|
|
|
|
2016-07-25 12:55:35 +07:00
|
|
|
spin_unlock(&sched->job_list_lock);
|
|
|
|
fence = sched->ops->run_job(s_job);
|
2016-07-22 12:01:02 +07:00
|
|
|
atomic_inc(&sched->hw_rq_count);
|
2016-06-29 14:23:55 +07:00
|
|
|
if (fence) {
|
2016-10-25 19:00:45 +07:00
|
|
|
s_fence->parent = dma_fence_get(fence);
|
|
|
|
r = dma_fence_add_callback(fence, &s_fence->cb,
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_process_job);
|
2016-06-29 14:23:55 +07:00
|
|
|
if (r == -ENOENT)
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_process_job(fence, &s_fence->cb);
|
2016-06-29 14:23:55 +07:00
|
|
|
else if (r)
|
|
|
|
DRM_ERROR("fence add callback failed (%d)\n",
|
|
|
|
r);
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(fence);
|
2016-06-29 14:23:55 +07:00
|
|
|
} else {
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_process_job(NULL, &s_fence->cb);
|
2016-06-29 14:23:55 +07:00
|
|
|
}
|
2016-07-25 12:55:35 +07:00
|
|
|
spin_lock(&sched->job_list_lock);
|
2016-06-29 14:23:55 +07:00
|
|
|
}
|
|
|
|
spin_unlock(&sched->job_list_lock);
|
|
|
|
}
|
2017-12-06 23:49:39 +07:00
|
|
|
EXPORT_SYMBOL(drm_sched_job_recovery);
|
2016-06-29 14:23:55 +07:00
|
|
|
|
2016-03-07 11:49:55 +07:00
|
|
|
/* init a sched_job with basic field */
|
2017-12-06 23:49:39 +07:00
|
|
|
int drm_sched_job_init(struct drm_sched_job *job,
|
|
|
|
struct drm_gpu_scheduler *sched,
|
|
|
|
struct drm_sched_entity *entity,
|
2016-06-30 15:52:03 +07:00
|
|
|
void *owner)
|
2016-03-07 11:49:55 +07:00
|
|
|
{
|
|
|
|
job->sched = sched;
|
2017-10-20 01:29:46 +07:00
|
|
|
job->s_priority = entity->rq - sched->sched_rq;
|
2017-12-06 23:49:39 +07:00
|
|
|
job->s_fence = drm_sched_fence_create(entity, owner);
|
2016-03-07 11:49:55 +07:00
|
|
|
if (!job->s_fence)
|
|
|
|
return -ENOMEM;
|
2017-05-09 14:34:07 +07:00
|
|
|
job->id = atomic64_inc_return(&sched->job_id_count);
|
2016-03-07 11:49:55 +07:00
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
INIT_WORK(&job->finish_work, drm_sched_job_finish);
|
2016-05-19 14:54:15 +07:00
|
|
|
INIT_LIST_HEAD(&job->node);
|
2017-12-06 23:49:39 +07:00
|
|
|
INIT_DELAYED_WORK(&job->work_tdr, drm_sched_job_timedout);
|
2016-03-04 13:33:44 +07:00
|
|
|
|
2016-03-07 11:49:55 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2017-12-06 23:49:39 +07:00
|
|
|
EXPORT_SYMBOL(drm_sched_job_init);
|
2016-03-07 11:49:55 +07:00
|
|
|
|
2015-08-20 22:01:01 +07:00
|
|
|
/**
|
|
|
|
* Return ture if we can push more jobs to the hw.
|
|
|
|
*/
|
2017-12-06 23:49:39 +07:00
|
|
|
static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
|
2015-08-20 22:01:01 +07:00
|
|
|
{
|
|
|
|
return atomic_read(&sched->hw_rq_count) <
|
|
|
|
sched->hw_submission_limit;
|
|
|
|
}
|
|
|
|
|
2015-08-24 19:29:40 +07:00
|
|
|
/**
|
|
|
|
* Wake up the scheduler when it is ready
|
|
|
|
*/
|
2017-12-06 23:49:39 +07:00
|
|
|
static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
|
2015-08-24 19:29:40 +07:00
|
|
|
{
|
2017-12-06 23:49:39 +07:00
|
|
|
if (drm_sched_ready(sched))
|
2015-08-26 02:39:31 +07:00
|
|
|
wake_up_interruptible(&sched->wake_up_worker);
|
2015-08-24 19:29:40 +07:00
|
|
|
}
|
|
|
|
|
2015-08-20 22:01:01 +07:00
|
|
|
/**
|
2015-11-13 03:10:35 +07:00
|
|
|
* Select next entity to process
|
2015-08-20 22:01:01 +07:00
|
|
|
*/
|
2017-12-06 23:49:39 +07:00
|
|
|
static struct drm_sched_entity *
|
|
|
|
drm_sched_select_entity(struct drm_gpu_scheduler *sched)
|
2015-08-20 22:01:01 +07:00
|
|
|
{
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_entity *entity;
|
2015-11-05 14:23:09 +07:00
|
|
|
int i;
|
2015-08-20 22:01:01 +07:00
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
if (!drm_sched_ready(sched))
|
2015-08-20 22:01:01 +07:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Kernel run queue has higher priority than normal run queue*/
|
2017-12-06 23:49:39 +07:00
|
|
|
for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
|
|
|
|
entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
|
2015-11-05 14:23:09 +07:00
|
|
|
if (entity)
|
|
|
|
break;
|
|
|
|
}
|
2015-08-20 22:01:01 +07:00
|
|
|
|
2015-11-13 03:10:35 +07:00
|
|
|
return entity;
|
2015-08-20 22:01:01 +07:00
|
|
|
}
|
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
|
2015-08-06 02:22:10 +07:00
|
|
|
{
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_fence *s_fence =
|
|
|
|
container_of(cb, struct drm_sched_fence, cb);
|
|
|
|
struct drm_gpu_scheduler *sched = s_fence->sched;
|
2015-08-06 02:22:10 +07:00
|
|
|
|
2017-10-13 15:58:15 +07:00
|
|
|
dma_fence_get(&s_fence->finished);
|
2015-08-19 21:12:15 +07:00
|
|
|
atomic_dec(&sched->hw_rq_count);
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_fence_finished(s_fence);
|
2016-03-04 13:42:26 +07:00
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
trace_drm_sched_process_job(s_fence);
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(&s_fence->finished);
|
2015-08-26 02:39:31 +07:00
|
|
|
wake_up_interruptible(&sched->wake_up_worker);
|
2015-08-06 02:22:10 +07:00
|
|
|
}
|
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
|
2016-06-12 14:41:58 +07:00
|
|
|
{
|
|
|
|
if (kthread_should_park()) {
|
|
|
|
kthread_parkme();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
static int drm_sched_main(void *param)
|
2015-05-22 17:55:07 +07:00
|
|
|
{
|
|
|
|
struct sched_param sparam = {.sched_priority = 1};
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
|
2017-10-13 03:46:26 +07:00
|
|
|
int r;
|
2015-05-22 17:55:07 +07:00
|
|
|
|
|
|
|
sched_setscheduler(current, SCHED_FIFO, &sparam);
|
|
|
|
|
|
|
|
while (!kthread_should_stop()) {
|
2017-12-06 23:49:39 +07:00
|
|
|
struct drm_sched_entity *entity = NULL;
|
|
|
|
struct drm_sched_fence *s_fence;
|
|
|
|
struct drm_sched_job *sched_job;
|
2016-10-25 19:00:45 +07:00
|
|
|
struct dma_fence *fence;
|
2015-08-06 02:22:10 +07:00
|
|
|
|
2015-08-26 02:39:31 +07:00
|
|
|
wait_event_interruptible(sched->wake_up_worker,
|
2017-12-06 23:49:39 +07:00
|
|
|
(!drm_sched_blocked(sched) &&
|
|
|
|
(entity = drm_sched_select_entity(sched))) ||
|
2016-06-12 14:41:58 +07:00
|
|
|
kthread_should_stop());
|
2015-08-19 22:37:52 +07:00
|
|
|
|
2015-11-13 03:10:35 +07:00
|
|
|
if (!entity)
|
|
|
|
continue;
|
|
|
|
|
2017-12-06 23:49:39 +07:00
|
|
|
sched_job = drm_sched_entity_pop_job(entity);
|
2015-09-09 08:05:55 +07:00
|
|
|
if (!sched_job)
|
2015-08-19 22:37:52 +07:00
|
|
|
continue;
|
|
|
|
|
2015-09-09 08:05:55 +07:00
|
|
|
s_fence = sched_job->s_fence;
|
2015-10-10 07:48:42 +07:00
|
|
|
|
2015-08-20 22:08:25 +07:00
|
|
|
atomic_inc(&sched->hw_rq_count);
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_job_begin(sched_job);
|
2016-05-18 18:00:38 +07:00
|
|
|
|
2015-09-09 08:05:55 +07:00
|
|
|
fence = sched->ops->run_job(sched_job);
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_fence_scheduled(s_fence);
|
2017-09-28 16:51:32 +07:00
|
|
|
|
2015-08-06 02:22:10 +07:00
|
|
|
if (fence) {
|
2016-10-25 19:00:45 +07:00
|
|
|
s_fence->parent = dma_fence_get(fence);
|
|
|
|
r = dma_fence_add_callback(fence, &s_fence->cb,
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_process_job);
|
2015-08-06 02:22:10 +07:00
|
|
|
if (r == -ENOENT)
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_process_job(fence, &s_fence->cb);
|
2015-08-06 02:22:10 +07:00
|
|
|
else if (r)
|
2016-05-18 14:43:07 +07:00
|
|
|
DRM_ERROR("fence add callback failed (%d)\n",
|
|
|
|
r);
|
2016-10-25 19:00:45 +07:00
|
|
|
dma_fence_put(fence);
|
2015-09-02 17:03:06 +07:00
|
|
|
} else {
|
2017-12-06 23:49:39 +07:00
|
|
|
drm_sched_process_job(NULL, &s_fence->cb);
|
2015-08-06 02:22:10 +07:00
|
|
|
}
|
2015-08-20 19:47:46 +07:00
|
|
|
|
2015-08-26 02:39:31 +07:00
|
|
|
wake_up(&sched->job_scheduled);
|
2015-05-22 17:55:07 +07:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-09-09 01:22:31 +07:00
|
|
|
* Init a gpu scheduler instance
|
2015-05-22 17:55:07 +07:00
|
|
|
*
|
2015-09-09 01:22:31 +07:00
|
|
|
* @sched The pointer to the scheduler
|
2015-08-20 22:24:40 +07:00
|
|
|
* @ops The backend operations for this scheduler.
|
|
|
|
* @hw_submissions Number of hw submissions to do.
|
2015-09-09 01:22:31 +07:00
|
|
|
* @name Name used for debugging
|
2015-05-22 17:55:07 +07:00
|
|
|
*
|
2015-09-09 01:22:31 +07:00
|
|
|
* Return 0 on success, otherwise error code.
|
2015-05-22 17:55:07 +07:00
|
|
|
*/
|
2017-12-06 23:49:39 +07:00
|
|
|
int drm_sched_init(struct drm_gpu_scheduler *sched,
|
|
|
|
const struct drm_sched_backend_ops *ops,
|
2017-10-17 12:40:54 +07:00
|
|
|
unsigned hw_submission,
|
|
|
|
unsigned hang_limit,
|
|
|
|
long timeout,
|
|
|
|
const char *name)
|
2015-05-22 17:55:07 +07:00
|
|
|
{
|
2015-11-05 14:23:09 +07:00
|
|
|
int i;
|
2015-05-22 17:55:07 +07:00
|
|
|
sched->ops = ops;
|
2015-08-05 18:52:14 +07:00
|
|
|
sched->hw_submission_limit = hw_submission;
|
2015-09-09 01:22:31 +07:00
|
|
|
sched->name = name;
|
2015-10-10 07:48:42 +07:00
|
|
|
sched->timeout = timeout;
|
2017-10-17 12:40:54 +07:00
|
|
|
sched->hang_limit = hang_limit;
|
2017-12-06 23:49:39 +07:00
|
|
|
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
|
|
|
|
drm_sched_rq_init(&sched->sched_rq[i]);
|
2015-05-22 17:55:07 +07:00
|
|
|
|
2015-08-26 02:39:31 +07:00
|
|
|
init_waitqueue_head(&sched->wake_up_worker);
|
|
|
|
init_waitqueue_head(&sched->job_scheduled);
|
2016-03-04 13:33:44 +07:00
|
|
|
INIT_LIST_HEAD(&sched->ring_mirror_list);
|
|
|
|
spin_lock_init(&sched->job_list_lock);
|
2015-08-19 21:12:15 +07:00
|
|
|
atomic_set(&sched->hw_rq_count, 0);
|
2017-03-10 09:25:50 +07:00
|
|
|
atomic64_set(&sched->job_id_count, 0);
|
2015-09-09 01:22:31 +07:00
|
|
|
|
2015-05-22 17:55:07 +07:00
|
|
|
/* Each scheduler will run on a seperate kernel thread */
|
2017-12-06 23:49:39 +07:00
|
|
|
sched->thread = kthread_run(drm_sched_main, sched, sched->name);
|
2015-08-20 21:59:38 +07:00
|
|
|
if (IS_ERR(sched->thread)) {
|
2015-09-09 01:22:31 +07:00
|
|
|
DRM_ERROR("Failed to create scheduler for %s.\n", name);
|
|
|
|
return PTR_ERR(sched->thread);
|
2015-05-22 17:55:07 +07:00
|
|
|
}
|
|
|
|
|
2015-09-09 01:22:31 +07:00
|
|
|
return 0;
|
2015-05-22 17:55:07 +07:00
|
|
|
}
|
2017-12-06 23:49:39 +07:00
|
|
|
EXPORT_SYMBOL(drm_sched_init);
|
2015-05-22 17:55:07 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Destroy a gpu scheduler
|
|
|
|
*
|
|
|
|
* @sched The pointer to the scheduler
|
|
|
|
*/
|
2017-12-06 23:49:39 +07:00
|
|
|
void drm_sched_fini(struct drm_gpu_scheduler *sched)
|
2015-05-22 17:55:07 +07:00
|
|
|
{
|
2015-11-03 23:10:03 +07:00
|
|
|
if (sched->thread)
|
|
|
|
kthread_stop(sched->thread);
|
2015-05-22 17:55:07 +07:00
|
|
|
}
|
2017-12-06 23:49:39 +07:00
|
|
|
EXPORT_SYMBOL(drm_sched_fini);
|