summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/scheduler
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-10-20 00:10:27 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-10-20 00:10:27 -0300
commitd0b2f91bede3bd5e3d24dd6803e56eee959c1797 (patch)
tree7fee4ab0509879c373c4f2cbd5b8a5be5b4041ee /drivers/gpu/drm/amd/scheduler
parente914f8eb445e8f74b00303c19c2ffceaedd16a05 (diff)
Linux-libre 4.8.2-gnupck-4.8.2-gnu
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler')
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h4
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c190
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h60
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c81
4 files changed, 204 insertions, 131 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index c89dc7777..b961a1c6c 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -26,7 +26,7 @@ TRACE_EVENT(amd_sched_job,
TP_fast_assign(
__entry->entity = sched_job->s_entity;
__entry->sched_job = sched_job;
- __entry->fence = &sched_job->s_fence->base;
+ __entry->fence = &sched_job->s_fence->finished;
__entry->name = sched_job->sched->name;
__entry->job_count = kfifo_len(
&sched_job->s_entity->job_queue) / sizeof(sched_job);
@@ -46,7 +46,7 @@ TRACE_EVENT(amd_sched_process_job,
),
TP_fast_assign(
- __entry->fence = &fence->base;
+ __entry->fence = &fence->finished;
),
TP_printk("fence=%p signaled", __entry->fence)
);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index c16248cee..963a24d46 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -32,6 +32,7 @@
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
+static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
struct kmem_cache *sched_fence_slab;
atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
@@ -140,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
return r;
atomic_set(&entity->fence_seq, 0);
- entity->fence_context = fence_context_alloc(1);
+ entity->fence_context = fence_context_alloc(2);
return 0;
}
@@ -251,17 +252,21 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
s_fence = to_amd_sched_fence(fence);
if (s_fence && s_fence->sched == sched) {
- /* Fence is from the same scheduler */
- if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
- /* Ignore it when it is already scheduled */
- fence_put(entity->dependency);
- return false;
- }
- /* Wait for fence to be scheduled */
- entity->cb.func = amd_sched_entity_clear_dep;
- list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
- return true;
+ /*
+ * Fence is from the same scheduler, only need to wait for
+ * it to be scheduled
+ */
+ fence = fence_get(&s_fence->scheduled);
+ fence_put(entity->dependency);
+ entity->dependency = fence;
+ if (!fence_add_callback(fence, &entity->cb,
+ amd_sched_entity_clear_dep))
+ return true;
+
+ /* Ignore it when it is already scheduled */
+ fence_put(fence);
+ return false;
}
if (!fence_add_callback(entity->dependency, &entity->cb,
@@ -319,46 +324,114 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
return added;
}
-static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
- struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job);
- schedule_work(&job->work_free_job);
-}
-
/* job_finish is called after hw fence signaled, and
* the job had already been deleted from ring_mirror_list
*/
-void amd_sched_job_finish(struct amd_sched_job *s_job)
+static void amd_sched_job_finish(struct work_struct *work)
{
- struct amd_sched_job *next;
+ struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
+ finish_work);
struct amd_gpu_scheduler *sched = s_job->sched;
+ /* remove job from ring_mirror_list */
+ spin_lock(&sched->job_list_lock);
+ list_del_init(&s_job->node);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
- if (cancel_delayed_work(&s_job->work_tdr))
- amd_sched_job_put(s_job);
+ struct amd_sched_job *next;
+
+ spin_unlock(&sched->job_list_lock);
+ cancel_delayed_work_sync(&s_job->work_tdr);
+ spin_lock(&sched->job_list_lock);
/* queue TDR for next job */
next = list_first_entry_or_null(&sched->ring_mirror_list,
struct amd_sched_job, node);
- if (next) {
- INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback);
- amd_sched_job_get(next);
+ if (next)
schedule_delayed_work(&next->work_tdr, sched->timeout);
- }
}
+ spin_unlock(&sched->job_list_lock);
+ sched->ops->free_job(s_job);
}
-void amd_sched_job_begin(struct amd_sched_job *s_job)
+static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
+{
+ struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
+ finish_cb);
+ schedule_work(&job->finish_work);
+}
+
+static void amd_sched_job_begin(struct amd_sched_job *s_job)
{
struct amd_gpu_scheduler *sched = s_job->sched;
+ spin_lock(&sched->job_list_lock);
+ list_add_tail(&s_job->node, &sched->ring_mirror_list);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job)
- {
- INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback);
- amd_sched_job_get(s_job);
+ list_first_entry_or_null(&sched->ring_mirror_list,
+ struct amd_sched_job, node) == s_job)
+ schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+ spin_unlock(&sched->job_list_lock);
+}
+
+static void amd_sched_job_timedout(struct work_struct *work)
+{
+ struct amd_sched_job *job = container_of(work, struct amd_sched_job,
+ work_tdr.work);
+
+ job->sched->ops->timedout_job(job);
+}
+
+void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
+{
+ struct amd_sched_job *s_job;
+
+ spin_lock(&sched->job_list_lock);
+ list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
+ if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
+ fence_put(s_job->s_fence->parent);
+ s_job->s_fence->parent = NULL;
+ }
+ }
+ atomic_set(&sched->hw_rq_count, 0);
+ spin_unlock(&sched->job_list_lock);
+}
+
+void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
+{
+ struct amd_sched_job *s_job, *tmp;
+ int r;
+
+ spin_lock(&sched->job_list_lock);
+ s_job = list_first_entry_or_null(&sched->ring_mirror_list,
+ struct amd_sched_job, node);
+ if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+
+ list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+ struct amd_sched_fence *s_fence = s_job->s_fence;
+ struct fence *fence;
+
+ spin_unlock(&sched->job_list_lock);
+ fence = sched->ops->run_job(s_job);
+ atomic_inc(&sched->hw_rq_count);
+ if (fence) {
+ s_fence->parent = fence_get(fence);
+ r = fence_add_callback(fence, &s_fence->cb,
+ amd_sched_process_job);
+ if (r == -ENOENT)
+ amd_sched_process_job(fence, &s_fence->cb);
+ else if (r)
+ DRM_ERROR("fence add callback failed (%d)\n",
+ r);
+ fence_put(fence);
+ } else {
+ DRM_ERROR("Failed to run job!\n");
+ amd_sched_process_job(NULL, &s_fence->cb);
+ }
+ spin_lock(&sched->job_list_lock);
}
+ spin_unlock(&sched->job_list_lock);
}
/**
@@ -372,36 +445,29 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
{
struct amd_sched_entity *entity = sched_job->s_entity;
- sched_job->use_sched = 1;
- fence_add_callback(&sched_job->s_fence->base,
- &sched_job->cb_free_job, amd_sched_free_job);
trace_amd_sched_job(sched_job);
+ fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
+ amd_sched_job_finish_cb);
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
}
/* init a sched_job with basic field */
int amd_sched_job_init(struct amd_sched_job *job,
- struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
- void (*timeout_cb)(struct work_struct *work),
- void (*free_cb)(struct kref *refcount),
- void *owner, struct fence **fence)
+ struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity,
+ void *owner)
{
- INIT_LIST_HEAD(&job->node);
- kref_init(&job->refcount);
job->sched = sched;
job->s_entity = entity;
job->s_fence = amd_sched_fence_create(entity, owner);
if (!job->s_fence)
return -ENOMEM;
- job->s_fence->s_job = job;
- job->timeout_callback = timeout_cb;
- job->free_callback = free_cb;
+ INIT_WORK(&job->finish_work, amd_sched_job_finish);
+ INIT_LIST_HEAD(&job->node);
+ INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
- if (fence)
- *fence = &job->s_fence->base;
return 0;
}
@@ -450,23 +516,25 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
struct amd_sched_fence *s_fence =
container_of(cb, struct amd_sched_fence, cb);
struct amd_gpu_scheduler *sched = s_fence->sched;
- unsigned long flags;
atomic_dec(&sched->hw_rq_count);
-
- /* remove job from ring_mirror_list */
- spin_lock_irqsave(&sched->job_list_lock, flags);
- list_del_init(&s_fence->s_job->node);
- sched->ops->finish_job(s_fence->s_job);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
-
- amd_sched_fence_signal(s_fence);
+ amd_sched_fence_finished(s_fence);
trace_amd_sched_process_job(s_fence);
- fence_put(&s_fence->base);
+ fence_put(&s_fence->finished);
wake_up_interruptible(&sched->wake_up_worker);
}
+static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
+{
+ if (kthread_should_park()) {
+ kthread_parkme();
+ return true;
+ }
+
+ return false;
+}
+
static int amd_sched_main(void *param)
{
struct sched_param sparam = {.sched_priority = 1};
@@ -476,14 +544,15 @@ static int amd_sched_main(void *param)
sched_setscheduler(current, SCHED_FIFO, &sparam);
while (!kthread_should_stop()) {
- struct amd_sched_entity *entity;
+ struct amd_sched_entity *entity = NULL;
struct amd_sched_fence *s_fence;
struct amd_sched_job *sched_job;
struct fence *fence;
wait_event_interruptible(sched->wake_up_worker,
- (entity = amd_sched_select_entity(sched)) ||
- kthread_should_stop());
+ (!amd_sched_blocked(sched) &&
+ (entity = amd_sched_select_entity(sched))) ||
+ kthread_should_stop());
if (!entity)
continue;
@@ -495,16 +564,19 @@ static int amd_sched_main(void *param)
s_fence = sched_job->s_fence;
atomic_inc(&sched->hw_rq_count);
- amd_sched_job_pre_schedule(sched, sched_job);
+ amd_sched_job_begin(sched_job);
+
fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence);
if (fence) {
+ s_fence->parent = fence_get(fence);
r = fence_add_callback(fence, &s_fence->cb,
amd_sched_process_job);
if (r == -ENOENT)
amd_sched_process_job(fence, &s_fence->cb);
else if (r)
- DRM_ERROR("fence add callback failed (%d)\n", r);
+ DRM_ERROR("fence add callback failed (%d)\n",
+ r);
fence_put(fence);
} else {
DRM_ERROR("Failed to run job!\n");
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 070095a94..7cbbbfb50 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -27,8 +27,6 @@
#include <linux/kfifo.h>
#include <linux/fence.h>
-#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS
-
struct amd_gpu_scheduler;
struct amd_sched_rq;
@@ -68,36 +66,34 @@ struct amd_sched_rq {
};
struct amd_sched_fence {
- struct fence base;
+ struct fence scheduled;
+ struct fence finished;
struct fence_cb cb;
- struct list_head scheduled_cb;
+ struct fence *parent;
struct amd_gpu_scheduler *sched;
spinlock_t lock;
void *owner;
- struct amd_sched_job *s_job;
};
struct amd_sched_job {
- struct kref refcount;
struct amd_gpu_scheduler *sched;
struct amd_sched_entity *s_entity;
struct amd_sched_fence *s_fence;
- bool use_sched; /* true if the job goes to scheduler */
- struct fence_cb cb_free_job;
- struct work_struct work_free_job;
- struct list_head node;
- struct delayed_work work_tdr;
- void (*timeout_callback) (struct work_struct *work);
- void (*free_callback)(struct kref *refcount);
+ struct fence_cb finish_cb;
+ struct work_struct finish_work;
+ struct list_head node;
+ struct delayed_work work_tdr;
};
-extern const struct fence_ops amd_sched_fence_ops;
+extern const struct fence_ops amd_sched_fence_ops_scheduled;
+extern const struct fence_ops amd_sched_fence_ops_finished;
static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
{
- struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
+ if (f->ops == &amd_sched_fence_ops_scheduled)
+ return container_of(f, struct amd_sched_fence, scheduled);
- if (__f->base.ops == &amd_sched_fence_ops)
- return __f;
+ if (f->ops == &amd_sched_fence_ops_finished)
+ return container_of(f, struct amd_sched_fence, finished);
return NULL;
}
@@ -109,8 +105,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
struct amd_sched_backend_ops {
struct fence *(*dependency)(struct amd_sched_job *sched_job);
struct fence *(*run_job)(struct amd_sched_job *sched_job);
- void (*begin_job)(struct amd_sched_job *sched_job);
- void (*finish_job)(struct amd_sched_job *sched_job);
+ void (*timedout_job)(struct amd_sched_job *sched_job);
+ void (*free_job)(struct amd_sched_job *sched_job);
};
enum amd_sched_priority {
@@ -152,25 +148,11 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
struct amd_sched_fence *amd_sched_fence_create(
struct amd_sched_entity *s_entity, void *owner);
void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
-void amd_sched_fence_signal(struct amd_sched_fence *fence);
+void amd_sched_fence_finished(struct amd_sched_fence *fence);
int amd_sched_job_init(struct amd_sched_job *job,
- struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
- void (*timeout_cb)(struct work_struct *work),
- void (*free_cb)(struct kref* refcount),
- void *owner, struct fence **fence);
-void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
- struct amd_sched_job *s_job);
-void amd_sched_job_finish(struct amd_sched_job *s_job);
-void amd_sched_job_begin(struct amd_sched_job *s_job);
-static inline void amd_sched_job_get(struct amd_sched_job *job) {
- if (job)
- kref_get(&job->refcount);
-}
-
-static inline void amd_sched_job_put(struct amd_sched_job *job) {
- if (job)
- kref_put(&job->refcount, job->free_callback);
-}
-
+ struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity,
+ void *owner);
+void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
+void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
#endif
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 2a732c490..6b63beaf7 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -27,7 +27,8 @@
#include <drm/drmP.h>
#include "gpu_scheduler.h"
-struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner)
+struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
+ void *owner)
{
struct amd_sched_fence *fence = NULL;
unsigned seq;
@@ -36,46 +37,37 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
if (fence == NULL)
return NULL;
- INIT_LIST_HEAD(&fence->scheduled_cb);
fence->owner = owner;
- fence->sched = s_entity->sched;
+ fence->sched = entity->sched;
spin_lock_init(&fence->lock);
- seq = atomic_inc_return(&s_entity->fence_seq);
- fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
- s_entity->fence_context, seq);
+ seq = atomic_inc_return(&entity->fence_seq);
+ fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
+ &fence->lock, entity->fence_context, seq);
+ fence_init(&fence->finished, &amd_sched_fence_ops_finished,
+ &fence->lock, entity->fence_context + 1, seq);
return fence;
}
-void amd_sched_fence_signal(struct amd_sched_fence *fence)
+void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
{
- int ret = fence_signal(&fence->base);
+ int ret = fence_signal(&fence->scheduled);
+
if (!ret)
- FENCE_TRACE(&fence->base, "signaled from irq context\n");
+ FENCE_TRACE(&fence->scheduled, "signaled from irq context\n");
else
- FENCE_TRACE(&fence->base, "was already signaled\n");
-}
-
-void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
- struct amd_sched_job *s_job)
-{
- unsigned long flags;
- spin_lock_irqsave(&sched->job_list_lock, flags);
- list_add_tail(&s_job->node, &sched->ring_mirror_list);
- sched->ops->begin_job(s_job);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ FENCE_TRACE(&fence->scheduled, "was already signaled\n");
}
-void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
+void amd_sched_fence_finished(struct amd_sched_fence *fence)
{
- struct fence_cb *cur, *tmp;
+ int ret = fence_signal(&fence->finished);
- set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags);
- list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) {
- list_del_init(&cur->node);
- cur->func(&s_fence->base, cur);
- }
+ if (!ret)
+ FENCE_TRACE(&fence->finished, "signaled from irq context\n");
+ else
+ FENCE_TRACE(&fence->finished, "was already signaled\n");
}
static const char *amd_sched_fence_get_driver_name(struct fence *fence)
@@ -105,6 +97,8 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
{
struct fence *f = container_of(rcu, struct fence, rcu);
struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+ fence_put(fence->parent);
kmem_cache_free(sched_fence_slab, fence);
}
@@ -116,16 +110,41 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
-static void amd_sched_fence_release(struct fence *f)
+static void amd_sched_fence_release_scheduled(struct fence *f)
{
- call_rcu(&f->rcu, amd_sched_fence_free);
+ struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+ call_rcu(&fence->finished.rcu, amd_sched_fence_free);
}
-const struct fence_ops amd_sched_fence_ops = {
+/**
+ * amd_sched_fence_release_scheduled - drop extra reference
+ *
+ * @f: fence
+ *
+ * Drop the extra reference from the scheduled fence to the base fence.
+ */
+static void amd_sched_fence_release_finished(struct fence *f)
+{
+ struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+ fence_put(&fence->scheduled);
+}
+
+const struct fence_ops amd_sched_fence_ops_scheduled = {
+ .get_driver_name = amd_sched_fence_get_driver_name,
+ .get_timeline_name = amd_sched_fence_get_timeline_name,
+ .enable_signaling = amd_sched_fence_enable_signaling,
+ .signaled = NULL,
+ .wait = fence_default_wait,
+ .release = amd_sched_fence_release_scheduled,
+};
+
+const struct fence_ops amd_sched_fence_ops_finished = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
.wait = fence_default_wait,
- .release = amd_sched_fence_release,
+ .release = amd_sched_fence_release_finished,
};