summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-09-11 04:34:46 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-09-11 04:34:46 -0300
commit863981e96738983919de841ec669e157e6bdaeb0 (patch)
treed6d89a12e7eb8017837c057935a2271290907f76 /drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
parent8dec7c70575785729a6a9e6719a955e9c545bcab (diff)
Linux-libre 4.7.1-gnupck-4.7.1-gnu
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c')
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c121
1 files changed, 82 insertions, 39 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index a5ff9458d..c16248cee 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -319,6 +319,48 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
return added;
}
+static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
+ struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job);
+ schedule_work(&job->work_free_job);
+}
+
+/* job_finish is called after hw fence signaled, and
+ * the job had already been deleted from ring_mirror_list
+ */
+void amd_sched_job_finish(struct amd_sched_job *s_job)
+{
+ struct amd_sched_job *next;
+ struct amd_gpu_scheduler *sched = s_job->sched;
+
+ if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
+ if (cancel_delayed_work(&s_job->work_tdr))
+ amd_sched_job_put(s_job);
+
+ /* queue TDR for next job */
+ next = list_first_entry_or_null(&sched->ring_mirror_list,
+ struct amd_sched_job, node);
+
+ if (next) {
+ INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback);
+ amd_sched_job_get(next);
+ schedule_delayed_work(&next->work_tdr, sched->timeout);
+ }
+ }
+}
+
+void amd_sched_job_begin(struct amd_sched_job *s_job)
+{
+ struct amd_gpu_scheduler *sched = s_job->sched;
+
+ if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+ list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job)
+ {
+ INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback);
+ amd_sched_job_get(s_job);
+ schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+ }
+}
+
/**
* Submit a job to the job queue
*
@@ -330,11 +372,39 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
{
struct amd_sched_entity *entity = sched_job->s_entity;
+ sched_job->use_sched = 1;
+ fence_add_callback(&sched_job->s_fence->base,
+ &sched_job->cb_free_job, amd_sched_free_job);
trace_amd_sched_job(sched_job);
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
}
+/* init a sched_job with basic field */
+int amd_sched_job_init(struct amd_sched_job *job,
+ struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity,
+ void (*timeout_cb)(struct work_struct *work),
+ void (*free_cb)(struct kref *refcount),
+ void *owner, struct fence **fence)
+{
+ INIT_LIST_HEAD(&job->node);
+ kref_init(&job->refcount);
+ job->sched = sched;
+ job->s_entity = entity;
+ job->s_fence = amd_sched_fence_create(entity, owner);
+ if (!job->s_fence)
+ return -ENOMEM;
+
+ job->s_fence->s_job = job;
+ job->timeout_callback = timeout_cb;
+ job->free_callback = free_cb;
+
+ if (fence)
+ *fence = &job->s_fence->base;
+ return 0;
+}
+
/**
* Return ture if we can push more jobs to the hw.
*/
@@ -383,47 +453,26 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
unsigned long flags;
atomic_dec(&sched->hw_rq_count);
+
+ /* remove job from ring_mirror_list */
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+ list_del_init(&s_fence->s_job->node);
+ sched->ops->finish_job(s_fence->s_job);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
amd_sched_fence_signal(s_fence);
- if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
- cancel_delayed_work(&s_fence->dwork);
- spin_lock_irqsave(&sched->fence_list_lock, flags);
- list_del_init(&s_fence->list);
- spin_unlock_irqrestore(&sched->fence_list_lock, flags);
- }
+
trace_amd_sched_process_job(s_fence);
fence_put(&s_fence->base);
wake_up_interruptible(&sched->wake_up_worker);
}
-static void amd_sched_fence_work_func(struct work_struct *work)
-{
- struct amd_sched_fence *s_fence =
- container_of(work, struct amd_sched_fence, dwork.work);
- struct amd_gpu_scheduler *sched = s_fence->sched;
- struct amd_sched_fence *entity, *tmp;
- unsigned long flags;
-
- DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
-
- /* Clean all pending fences */
- spin_lock_irqsave(&sched->fence_list_lock, flags);
- list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
- DRM_ERROR(" fence no %d\n", entity->base.seqno);
- cancel_delayed_work(&entity->dwork);
- list_del_init(&entity->list);
- fence_put(&entity->base);
- }
- spin_unlock_irqrestore(&sched->fence_list_lock, flags);
-}
-
static int amd_sched_main(void *param)
{
struct sched_param sparam = {.sched_priority = 1};
struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
int r, count;
- spin_lock_init(&sched->fence_list_lock);
- INIT_LIST_HEAD(&sched->fence_list);
sched_setscheduler(current, SCHED_FIFO, &sparam);
while (!kthread_should_stop()) {
@@ -431,7 +480,6 @@ static int amd_sched_main(void *param)
struct amd_sched_fence *s_fence;
struct amd_sched_job *sched_job;
struct fence *fence;
- unsigned long flags;
wait_event_interruptible(sched->wake_up_worker,
(entity = amd_sched_select_entity(sched)) ||
@@ -446,15 +494,8 @@ static int amd_sched_main(void *param)
s_fence = sched_job->s_fence;
- if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
- INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
- schedule_delayed_work(&s_fence->dwork, sched->timeout);
- spin_lock_irqsave(&sched->fence_list_lock, flags);
- list_add_tail(&s_fence->list, &sched->fence_list);
- spin_unlock_irqrestore(&sched->fence_list_lock, flags);
- }
-
atomic_inc(&sched->hw_rq_count);
+ amd_sched_job_pre_schedule(sched, sched_job);
fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence);
if (fence) {
@@ -489,7 +530,7 @@ static int amd_sched_main(void *param)
* Return 0 on success, otherwise error code.
*/
int amd_sched_init(struct amd_gpu_scheduler *sched,
- struct amd_sched_backend_ops *ops,
+ const struct amd_sched_backend_ops *ops,
unsigned hw_submission, long timeout, const char *name)
{
int i;
@@ -502,6 +543,8 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
+ INIT_LIST_HEAD(&sched->ring_mirror_list);
+ spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
sched_fence_slab = kmem_cache_create(