summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-12-15 14:52:16 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-12-15 14:52:16 -0300
commit8d91c1e411f55d7ea91b1183a2e9f8088fb4d5be (patch)
treee9891aa6c295060d065adffd610c4f49ecf884f3 /drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
parenta71852147516bc1cb5b0b3cbd13639bfd4022dc8 (diff)
Linux-libre 4.3.2-gnu
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c213
1 files changed, 163 insertions, 50 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 6c66ac8a1..e0b80ccdf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -25,54 +25,107 @@
#include <drm/drmP.h>
#include "amdgpu.h"
-static void amdgpu_ctx_do_release(struct kref *ref)
+int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
+ struct amdgpu_ctx *ctx)
{
- struct amdgpu_ctx *ctx;
- struct amdgpu_ctx_mgr *mgr;
+ unsigned i, j;
+ int r;
- ctx = container_of(ref, struct amdgpu_ctx, refcount);
- mgr = &ctx->fpriv->ctx_mgr;
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->adev = adev;
+ kref_init(&ctx->refcount);
+ spin_lock_init(&ctx->ring_lock);
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+ ctx->rings[i].sequence = 1;
- idr_remove(&mgr->ctx_handles, ctx->id);
- kfree(ctx);
+ if (amdgpu_enable_scheduler) {
+ /* create context entity for each ring */
+ for (i = 0; i < adev->num_rings; i++) {
+ struct amd_sched_rq *rq;
+ if (kernel)
+ rq = &adev->rings[i]->sched.kernel_rq;
+ else
+ rq = &adev->rings[i]->sched.sched_rq;
+ r = amd_sched_entity_init(&adev->rings[i]->sched,
+ &ctx->rings[i].entity,
+ rq, amdgpu_sched_jobs);
+ if (r)
+ break;
+ }
+
+ if (i < adev->num_rings) {
+ for (j = 0; j < i; j++)
+ amd_sched_entity_fini(&adev->rings[j]->sched,
+ &ctx->rings[j].entity);
+ kfree(ctx);
+ return r;
+ }
+ }
+ return 0;
}
-int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t *id, uint32_t flags)
+void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
+{
+ struct amdgpu_device *adev = ctx->adev;
+ unsigned i, j;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+ for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
+ fence_put(ctx->rings[i].fences[j]);
+
+ if (amdgpu_enable_scheduler) {
+ for (i = 0; i < adev->num_rings; i++)
+ amd_sched_entity_fini(&adev->rings[i]->sched,
+ &ctx->rings[i].entity);
+ }
+}
+
+static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
+ struct amdgpu_fpriv *fpriv,
+ uint32_t *id)
{
- int r;
- struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+ struct amdgpu_ctx *ctx;
+ int r;
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
mutex_lock(&mgr->lock);
- r = idr_alloc(&mgr->ctx_handles, ctx, 0, 0, GFP_KERNEL);
+ r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
if (r < 0) {
mutex_unlock(&mgr->lock);
kfree(ctx);
return r;
}
*id = (uint32_t)r;
-
- memset(ctx, 0, sizeof(*ctx));
- ctx->id = *id;
- ctx->fpriv = fpriv;
- kref_init(&ctx->refcount);
+ r = amdgpu_ctx_init(adev, false, ctx);
mutex_unlock(&mgr->lock);
- return 0;
+ return r;
}
-int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id)
+static void amdgpu_ctx_do_release(struct kref *ref)
{
struct amdgpu_ctx *ctx;
+
+ ctx = container_of(ref, struct amdgpu_ctx, refcount);
+
+ amdgpu_ctx_fini(ctx);
+
+ kfree(ctx);
+}
+
+static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
+{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+ struct amdgpu_ctx *ctx;
mutex_lock(&mgr->lock);
ctx = idr_find(&mgr->ctx_handles, id);
if (ctx) {
+ idr_remove(&mgr->ctx_handles, id);
kref_put(&ctx->refcount, amdgpu_ctx_do_release);
mutex_unlock(&mgr->lock);
return 0;
@@ -86,9 +139,13 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
union drm_amdgpu_ctx_out *out)
{
struct amdgpu_ctx *ctx;
- struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+ struct amdgpu_ctx_mgr *mgr;
unsigned reset_counter;
+ if (!fpriv)
+ return -EINVAL;
+
+ mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
ctx = idr_find(&mgr->ctx_handles, id);
if (!ctx) {
@@ -97,8 +154,8 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
}
/* TODO: these two are always zero */
- out->state.flags = ctx->state.flags;
- out->state.hangs = ctx->state.hangs;
+ out->state.flags = 0x0;
+ out->state.hangs = 0x0;
/* determine if a GPU reset has occured since the last call */
reset_counter = atomic_read(&adev->gpu_reset_counter);
@@ -113,28 +170,11 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
return 0;
}
-void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv)
-{
- struct idr *idp;
- struct amdgpu_ctx *ctx;
- uint32_t id;
- struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
- idp = &mgr->ctx_handles;
-
- idr_for_each_entry(idp,ctx,id) {
- if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
- DRM_ERROR("ctx (id=%ul) is still alive\n",ctx->id);
- }
-
- mutex_destroy(&mgr->lock);
-}
-
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
int r;
uint32_t id;
- uint32_t flags;
union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = dev->dev_private;
@@ -142,15 +182,14 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
r = 0;
id = args->in.ctx_id;
- flags = args->in.flags;
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
- r = amdgpu_ctx_alloc(adev, fpriv, &id, flags);
+ r = amdgpu_ctx_alloc(adev, fpriv, &id);
args->out.alloc.ctx_id = id;
break;
case AMDGPU_CTX_OP_FREE_CTX:
- r = amdgpu_ctx_free(adev, fpriv, id);
+ r = amdgpu_ctx_free(fpriv, id);
break;
case AMDGPU_CTX_OP_QUERY_STATE:
r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
@@ -165,7 +204,12 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
{
struct amdgpu_ctx *ctx;
- struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+ struct amdgpu_ctx_mgr *mgr;
+
+ if (!fpriv)
+ return NULL;
+
+ mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
ctx = idr_find(&mgr->ctx_handles, id);
@@ -177,17 +221,86 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
{
- struct amdgpu_fpriv *fpriv;
- struct amdgpu_ctx_mgr *mgr;
-
if (ctx == NULL)
return -EINVAL;
- fpriv = ctx->fpriv;
- mgr = &fpriv->ctx_mgr;
- mutex_lock(&mgr->lock);
kref_put(&ctx->refcount, amdgpu_ctx_do_release);
- mutex_unlock(&mgr->lock);
-
return 0;
}
+
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+ struct fence *fence)
+{
+ struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
+ uint64_t seq = cring->sequence;
+ unsigned idx = 0;
+ struct fence *other = NULL;
+
+ idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
+ other = cring->fences[idx];
+ if (other) {
+ signed long r;
+ r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0)
+ DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+ }
+
+ fence_get(fence);
+
+ spin_lock(&ctx->ring_lock);
+ cring->fences[idx] = fence;
+ cring->sequence++;
+ spin_unlock(&ctx->ring_lock);
+
+ fence_put(other);
+
+ return seq;
+}
+
+struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ struct amdgpu_ring *ring, uint64_t seq)
+{
+ struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
+ struct fence *fence;
+
+ spin_lock(&ctx->ring_lock);
+
+ if (seq >= cring->sequence) {
+ spin_unlock(&ctx->ring_lock);
+ return ERR_PTR(-EINVAL);
+ }
+
+
+ if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) {
+ spin_unlock(&ctx->ring_lock);
+ return NULL;
+ }
+
+ fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
+ spin_unlock(&ctx->ring_lock);
+
+ return fence;
+}
+
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
+{
+ mutex_init(&mgr->lock);
+ idr_init(&mgr->ctx_handles);
+}
+
+void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
+{
+ struct amdgpu_ctx *ctx;
+ struct idr *idp;
+ uint32_t id;
+
+ idp = &mgr->ctx_handles;
+
+ idr_for_each_entry(idp, ctx, id) {
+ if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
+ DRM_ERROR("ctx %p is still alive\n", ctx);
+ }
+
+ idr_destroy(&mgr->ctx_handles);
+ mutex_destroy(&mgr->lock);
+}