diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-12-15 14:52:16 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-12-15 14:52:16 -0300 |
commit | 8d91c1e411f55d7ea91b1183a2e9f8088fb4d5be (patch) | |
tree | e9891aa6c295060d065adffd610c4f49ecf884f3 /drivers/gpu/drm/amd/scheduler | |
parent | a71852147516bc1cb5b0b3cbd13639bfd4022dc8 (diff) |
Linux-libre 4.3.2-gnu
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler')
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | 41 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 425 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 133 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/sched_fence.c | 81 |
4 files changed, 680 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h new file mode 100644 index 000000000..144f50acc --- /dev/null +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -0,0 +1,41 @@ +#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _GPU_SCHED_TRACE_H_ + +#include <linux/stringify.h> +#include <linux/types.h> +#include <linux/tracepoint.h> + +#include <drm/drmP.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM gpu_sched +#define TRACE_INCLUDE_FILE gpu_sched_trace + +TRACE_EVENT(amd_sched_job, + TP_PROTO(struct amd_sched_job *sched_job), + TP_ARGS(sched_job), + TP_STRUCT__entry( + __field(struct amd_sched_entity *, entity) + __field(const char *, name) + __field(u32, job_count) + __field(int, hw_job_count) + ), + + TP_fast_assign( + __entry->entity = sched_job->s_entity; + __entry->name = sched_job->sched->name; + __entry->job_count = kfifo_len( + &sched_job->s_entity->job_queue) / sizeof(sched_job); + __entry->hw_job_count = atomic_read( + &sched_job->sched->hw_rq_count); + ), + TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d", + __entry->entity, __entry->name, __entry->job_count, + __entry->hw_job_count) +); +#endif + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include <trace/define_trace.h> diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c new file mode 100644 index 000000000..3697eeeec --- /dev/null +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -0,0 +1,425 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#include <linux/kthread.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <drm/drmP.h> +#include "gpu_scheduler.h" + +#define CREATE_TRACE_POINTS +#include "gpu_sched_trace.h" + +static struct amd_sched_job * +amd_sched_entity_pop_job(struct amd_sched_entity *entity); +static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); + +/* Initialize a given run queue struct */ +static void amd_sched_rq_init(struct amd_sched_rq *rq) +{ + spin_lock_init(&rq->lock); + INIT_LIST_HEAD(&rq->entities); + rq->current_entity = NULL; +} + +static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, + struct amd_sched_entity *entity) +{ + spin_lock(&rq->lock); + list_add_tail(&entity->list, &rq->entities); + spin_unlock(&rq->lock); +} + +static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, + struct amd_sched_entity *entity) +{ + spin_lock(&rq->lock); + list_del_init(&entity->list); + if (rq->current_entity == entity) + rq->current_entity = NULL; + spin_unlock(&rq->lock); +} + +/** + * Select next job from a specified run queue with round robin policy. + * Return NULL if nothing available. + */ +static struct amd_sched_job * +amd_sched_rq_select_job(struct amd_sched_rq *rq) +{ + struct amd_sched_entity *entity; + struct amd_sched_job *sched_job; + + spin_lock(&rq->lock); + + entity = rq->current_entity; + if (entity) { + list_for_each_entry_continue(entity, &rq->entities, list) { + sched_job = amd_sched_entity_pop_job(entity); + if (sched_job) { + rq->current_entity = entity; + spin_unlock(&rq->lock); + return sched_job; + } + } + } + + list_for_each_entry(entity, &rq->entities, list) { + + sched_job = amd_sched_entity_pop_job(entity); + if (sched_job) { + rq->current_entity = entity; + spin_unlock(&rq->lock); + return sched_job; + } + + if (entity == rq->current_entity) + break; + } + + spin_unlock(&rq->lock); + + return NULL; +} + +/** + * Init a context entity used by scheduler when submit to HW ring. + * + * @sched The pointer to the scheduler + * @entity The pointer to a valid amd_sched_entity + * @rq The run queue this entity belongs + * @kernel If this is an entity for the kernel + * @jobs The max number of jobs in the job queue + * + * return 0 if succeed. negative error code on failure +*/ +int amd_sched_entity_init(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity, + struct amd_sched_rq *rq, + uint32_t jobs) +{ + int r; + + if (!(sched && entity && rq)) + return -EINVAL; + + memset(entity, 0, sizeof(struct amd_sched_entity)); + INIT_LIST_HEAD(&entity->list); + entity->rq = rq; + entity->sched = sched; + + spin_lock_init(&entity->queue_lock); + r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL); + if (r) + return r; + + atomic_set(&entity->fence_seq, 0); + entity->fence_context = fence_context_alloc(1); + + /* Add the entity to the run queue */ + amd_sched_rq_add_entity(rq, entity); + + return 0; +} + +/** + * Query if entity is initialized + * + * @sched Pointer to scheduler instance + * @entity The pointer to a valid scheduler entity + * + * return true if entity is initialized, false otherwise +*/ +static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity) +{ + return entity->sched == sched && + entity->rq != NULL; +} + +/** + * Check if entity is idle + * + * @entity The pointer to a valid scheduler entity + * + * Return true if entity don't has any unscheduled jobs. + */ +static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) +{ + rmb(); + if (kfifo_is_empty(&entity->job_queue)) + return true; + + return false; +} + +/** + * Destroy a context entity + * + * @sched Pointer to scheduler instance + * @entity The pointer to a valid scheduler entity + * + * Cleanup and free the allocated resources. + */ +void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity) +{ + struct amd_sched_rq *rq = entity->rq; + + if (!amd_sched_entity_is_initialized(sched, entity)) + return; + + /** + * The client will not queue more IBs during this fini, consume existing + * queued IBs + */ + wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); + + amd_sched_rq_remove_entity(rq, entity); + kfifo_free(&entity->job_queue); +} + +static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) +{ + struct amd_sched_entity *entity = + container_of(cb, struct amd_sched_entity, cb); + entity->dependency = NULL; + fence_put(f); + amd_sched_wakeup(entity->sched); +} + +static struct amd_sched_job * +amd_sched_entity_pop_job(struct amd_sched_entity *entity) +{ + struct amd_gpu_scheduler *sched = entity->sched; + struct amd_sched_job *sched_job; + + if (ACCESS_ONCE(entity->dependency)) + return NULL; + + if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) + return NULL; + + while ((entity->dependency = sched->ops->dependency(sched_job))) { + + if (fence_add_callback(entity->dependency, &entity->cb, + amd_sched_entity_wakeup)) + fence_put(entity->dependency); + else + return NULL; + } + + return sched_job; +} + +/** + * Helper to submit a job to the job queue + * + * @sched_job The pointer to job required to submit + * + * Returns true if we could submit the job. + */ +static bool amd_sched_entity_in(struct amd_sched_job *sched_job) +{ + struct amd_sched_entity *entity = sched_job->s_entity; + bool added, first = false; + + spin_lock(&entity->queue_lock); + added = kfifo_in(&entity->job_queue, &sched_job, + sizeof(sched_job)) == sizeof(sched_job); + + if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job)) + first = true; + + spin_unlock(&entity->queue_lock); + + /* first job wakes up scheduler */ + if (first) + amd_sched_wakeup(sched_job->sched); + + return added; +} + +/** + * Submit a job to the job queue + * + * @sched_job The pointer to job required to submit + * + * Returns 0 for success, negative error code otherwise. + */ +int amd_sched_entity_push_job(struct amd_sched_job *sched_job) +{ + struct amd_sched_entity *entity = sched_job->s_entity; + struct amd_sched_fence *fence = amd_sched_fence_create( + entity, sched_job->owner); + + if (!fence) + return -ENOMEM; + + fence_get(&fence->base); + sched_job->s_fence = fence; + + wait_event(entity->sched->job_scheduled, + amd_sched_entity_in(sched_job)); + trace_amd_sched_job(sched_job); + return 0; +} + +/** + * Return ture if we can push more jobs to the hw. + */ +static bool amd_sched_ready(struct amd_gpu_scheduler *sched) +{ + return atomic_read(&sched->hw_rq_count) < + sched->hw_submission_limit; +} + +/** + * Wake up the scheduler when it is ready + */ +static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) +{ + if (amd_sched_ready(sched)) + wake_up_interruptible(&sched->wake_up_worker); +} + +/** + * Select next to run +*/ +static struct amd_sched_job * +amd_sched_select_job(struct amd_gpu_scheduler *sched) +{ + struct amd_sched_job *sched_job; + + if (!amd_sched_ready(sched)) + return NULL; + + /* Kernel run queue has higher priority than normal run queue*/ + sched_job = amd_sched_rq_select_job(&sched->kernel_rq); + if (sched_job == NULL) + sched_job = amd_sched_rq_select_job(&sched->sched_rq); + + return sched_job; +} + +static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) +{ + struct amd_sched_fence *s_fence = + container_of(cb, struct amd_sched_fence, cb); + struct amd_gpu_scheduler *sched = s_fence->sched; + + atomic_dec(&sched->hw_rq_count); + amd_sched_fence_signal(s_fence); + fence_put(&s_fence->base); + wake_up_interruptible(&sched->wake_up_worker); +} + +static int amd_sched_main(void *param) +{ + struct sched_param sparam = {.sched_priority = 1}; + struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; + int r, count; + + sched_setscheduler(current, SCHED_FIFO, &sparam); + + while (!kthread_should_stop()) { + struct amd_sched_entity *entity; + struct amd_sched_fence *s_fence; + struct amd_sched_job *sched_job; + struct fence *fence; + + wait_event_interruptible(sched->wake_up_worker, + kthread_should_stop() || + (sched_job = amd_sched_select_job(sched))); + + if (!sched_job) + continue; + + entity = sched_job->s_entity; + s_fence = sched_job->s_fence; + atomic_inc(&sched->hw_rq_count); + fence = sched->ops->run_job(sched_job); + if (fence) { + r = fence_add_callback(fence, &s_fence->cb, + amd_sched_process_job); + if (r == -ENOENT) + amd_sched_process_job(fence, &s_fence->cb); + else if (r) + DRM_ERROR("fence add callback failed (%d)\n", r); + fence_put(fence); + } else { + DRM_ERROR("Failed to run job!\n"); + amd_sched_process_job(NULL, &s_fence->cb); + } + + count = kfifo_out(&entity->job_queue, &sched_job, + sizeof(sched_job)); + WARN_ON(count != sizeof(sched_job)); + wake_up(&sched->job_scheduled); + } + return 0; +} + +/** + * Init a gpu scheduler instance + * + * @sched The pointer to the scheduler + * @ops The backend operations for this scheduler. + * @hw_submissions Number of hw submissions to do. + * @name Name used for debugging + * + * Return 0 on success, otherwise error code. +*/ +int amd_sched_init(struct amd_gpu_scheduler *sched, + struct amd_sched_backend_ops *ops, + unsigned hw_submission, const char *name) +{ + sched->ops = ops; + sched->hw_submission_limit = hw_submission; + sched->name = name; + amd_sched_rq_init(&sched->sched_rq); + amd_sched_rq_init(&sched->kernel_rq); + + init_waitqueue_head(&sched->wake_up_worker); + init_waitqueue_head(&sched->job_scheduled); + atomic_set(&sched->hw_rq_count, 0); + + /* Each scheduler will run on a seperate kernel thread */ + sched->thread = kthread_run(amd_sched_main, sched, sched->name); + if (IS_ERR(sched->thread)) { + DRM_ERROR("Failed to create scheduler for %s.\n", name); + return PTR_ERR(sched->thread); + } + + return 0; +} + +/** + * Destroy a gpu scheduler + * + * @sched The pointer to the scheduler + */ +void amd_sched_fini(struct amd_gpu_scheduler *sched) +{ + kthread_stop(sched->thread); +} diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h new file mode 100644 index 000000000..80b64dc22 --- /dev/null +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -0,0 +1,133 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _GPU_SCHEDULER_H_ +#define _GPU_SCHEDULER_H_ + +#include <linux/kfifo.h> +#include <linux/fence.h> + +struct amd_gpu_scheduler; +struct amd_sched_rq; + +/** + * A scheduler entity is a wrapper around a job queue or a group + * of other entities. Entities take turns emitting jobs from their + * job queues to corresponding hardware ring based on scheduling + * policy. +*/ +struct amd_sched_entity { + struct list_head list; + struct amd_sched_rq *rq; + struct amd_gpu_scheduler *sched; + + spinlock_t queue_lock; + struct kfifo job_queue; + + atomic_t fence_seq; + uint64_t fence_context; + + struct fence *dependency; + struct fence_cb cb; +}; + +/** + * Run queue is a set of entities scheduling command submissions for + * one specific ring. It implements the scheduling policy that selects + * the next entity to emit commands from. +*/ +struct amd_sched_rq { + spinlock_t lock; + struct list_head entities; + struct amd_sched_entity *current_entity; +}; + +struct amd_sched_fence { + struct fence base; + struct fence_cb cb; + struct amd_gpu_scheduler *sched; + spinlock_t lock; + void *owner; +}; + +struct amd_sched_job { + struct amd_gpu_scheduler *sched; + struct amd_sched_entity *s_entity; + struct amd_sched_fence *s_fence; + void *owner; +}; + +extern const struct fence_ops amd_sched_fence_ops; +static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) +{ + struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base); + + if (__f->base.ops == &amd_sched_fence_ops) + return __f; + + return NULL; +} + +/** + * Define the backend operations called by the scheduler, + * these functions should be implemented in driver side +*/ +struct amd_sched_backend_ops { + struct fence *(*dependency)(struct amd_sched_job *sched_job); + struct fence *(*run_job)(struct amd_sched_job *sched_job); +}; + +/** + * One scheduler is implemented for each hardware ring +*/ +struct amd_gpu_scheduler { + struct amd_sched_backend_ops *ops; + uint32_t hw_submission_limit; + const char *name; + struct amd_sched_rq sched_rq; + struct amd_sched_rq kernel_rq; + wait_queue_head_t wake_up_worker; + wait_queue_head_t job_scheduled; + atomic_t hw_rq_count; + struct task_struct *thread; +}; + +int amd_sched_init(struct amd_gpu_scheduler *sched, + struct amd_sched_backend_ops *ops, + uint32_t hw_submission, const char *name); +void amd_sched_fini(struct amd_gpu_scheduler *sched); + +int amd_sched_entity_init(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity, + struct amd_sched_rq *rq, + uint32_t jobs); +void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity); +int amd_sched_entity_push_job(struct amd_sched_job *sched_job); + +struct amd_sched_fence *amd_sched_fence_create( + struct amd_sched_entity *s_entity, void *owner); +void amd_sched_fence_signal(struct amd_sched_fence *fence); + + +#endif diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c new file mode 100644 index 000000000..d80263809 --- /dev/null +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -0,0 +1,81 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#include <linux/kthread.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <drm/drmP.h> +#include "gpu_scheduler.h" + +struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner) +{ + struct amd_sched_fence *fence = NULL; + unsigned seq; + + fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL); + if (fence == NULL) + return NULL; + fence->owner = owner; + fence->sched = s_entity->sched; + spin_lock_init(&fence->lock); + + seq = atomic_inc_return(&s_entity->fence_seq); + fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock, + s_entity->fence_context, seq); + + return fence; +} + +void amd_sched_fence_signal(struct amd_sched_fence *fence) +{ + int ret = fence_signal(&fence->base); + if (!ret) + FENCE_TRACE(&fence->base, "signaled from irq context\n"); + else + FENCE_TRACE(&fence->base, "was already signaled\n"); +} + +static const char *amd_sched_fence_get_driver_name(struct fence *fence) +{ + return "amd_sched"; +} + +static const char *amd_sched_fence_get_timeline_name(struct fence *f) +{ + struct amd_sched_fence *fence = to_amd_sched_fence(f); + return (const char *)fence->sched->name; +} + +static bool amd_sched_fence_enable_signaling(struct fence *f) +{ + return true; +} + +const struct fence_ops amd_sched_fence_ops = { + .get_driver_name = amd_sched_fence_get_driver_name, + .get_timeline_name = amd_sched_fence_get_timeline_name, + .enable_signaling = amd_sched_fence_enable_signaling, + .signaled = NULL, + .wait = fence_default_wait, + .release = NULL, +}; |