summaryrefslogtreecommitdiff
path: root/drivers/misc/cxl/context.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/cxl/context.c')
-rw-r--r--drivers/misc/cxl/context.c46
1 files changed, 33 insertions, 13 deletions
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index e4dc8cdf6..128714862 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -145,8 +145,16 @@ static const struct vm_operations_struct cxl_mmap_vmops = {
*/
int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
{
+ u64 start = vma->vm_pgoff << PAGE_SHIFT;
u64 len = vma->vm_end - vma->vm_start;
- len = min(len, ctx->psn_size);
+
+ if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
+ if (start + len > ctx->afu->adapter->ps_size)
+ return -EINVAL;
+ } else {
+ if (start + len > ctx->psn_size)
+ return -EINVAL;
+ }
if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
/* make sure there is a valid per process space for this AFU */
@@ -174,7 +182,7 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
* return until all outstanding interrupts for this context have completed. The
* hardware should no longer access *ctx after this has returned.
*/
-static void __detach_context(struct cxl_context *ctx)
+int __detach_context(struct cxl_context *ctx)
{
enum cxl_context_status status;
@@ -183,12 +191,13 @@ static void __detach_context(struct cxl_context *ctx)
ctx->status = CLOSED;
mutex_unlock(&ctx->status_mutex);
if (status != STARTED)
- return;
+ return -EBUSY;
WARN_ON(cxl_detach_process(ctx));
- afu_release_irqs(ctx);
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
- wake_up_all(&ctx->wq);
+ put_pid(ctx->pid);
+ cxl_ctx_put();
+ return 0;
}
/*
@@ -199,7 +208,14 @@ static void __detach_context(struct cxl_context *ctx)
*/
void cxl_context_detach(struct cxl_context *ctx)
{
- __detach_context(ctx);
+ int rc;
+
+ rc = __detach_context(ctx);
+ if (rc)
+ return;
+
+ afu_release_irqs(ctx, ctx);
+ wake_up_all(&ctx->wq);
}
/*
@@ -216,7 +232,7 @@ void cxl_context_detach_all(struct cxl_afu *afu)
* Anything done in here needs to be setup before the IDR is
* created and torn down after the IDR removed
*/
- __detach_context(ctx);
+ cxl_context_detach(ctx);
/*
* We are force detaching - remove any active PSA mappings so
@@ -232,16 +248,20 @@ void cxl_context_detach_all(struct cxl_afu *afu)
mutex_unlock(&afu->contexts_lock);
}
-void cxl_context_free(struct cxl_context *ctx)
+static void reclaim_ctx(struct rcu_head *rcu)
{
- mutex_lock(&ctx->afu->contexts_lock);
- idr_remove(&ctx->afu->contexts_idr, ctx->pe);
- mutex_unlock(&ctx->afu->contexts_lock);
- synchronize_rcu();
+ struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
free_page((u64)ctx->sstp);
ctx->sstp = NULL;
- put_pid(ctx->pid);
kfree(ctx);
}
+
+void cxl_context_free(struct cxl_context *ctx)
+{
+ mutex_lock(&ctx->afu->contexts_lock);
+ idr_remove(&ctx->afu->contexts_idr, ctx->pe);
+ mutex_unlock(&ctx->afu->contexts_lock);
+ call_rcu(&ctx->rcu, reclaim_ctx);
+}