aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/cxl/context.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/cxl/context.c')
-rw-r--r--drivers/misc/cxl/context.c50
1 files changed, 35 insertions, 15 deletions
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index d1b55fe62817..1287148629c0 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -113,11 +113,11 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
113 113
114 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { 114 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
115 area = ctx->afu->psn_phys; 115 area = ctx->afu->psn_phys;
116 if (offset > ctx->afu->adapter->ps_size) 116 if (offset >= ctx->afu->adapter->ps_size)
117 return VM_FAULT_SIGBUS; 117 return VM_FAULT_SIGBUS;
118 } else { 118 } else {
119 area = ctx->psn_phys; 119 area = ctx->psn_phys;
120 if (offset > ctx->psn_size) 120 if (offset >= ctx->psn_size)
121 return VM_FAULT_SIGBUS; 121 return VM_FAULT_SIGBUS;
122 } 122 }
123 123
@@ -145,8 +145,16 @@ static const struct vm_operations_struct cxl_mmap_vmops = {
145 */ 145 */
146int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) 146int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
147{ 147{
148 u64 start = vma->vm_pgoff << PAGE_SHIFT;
148 u64 len = vma->vm_end - vma->vm_start; 149 u64 len = vma->vm_end - vma->vm_start;
149 len = min(len, ctx->psn_size); 150
151 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
152 if (start + len > ctx->afu->adapter->ps_size)
153 return -EINVAL;
154 } else {
155 if (start + len > ctx->psn_size)
156 return -EINVAL;
157 }
150 158
151 if (ctx->afu->current_mode != CXL_MODE_DEDICATED) { 159 if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
152 /* make sure there is a valid per process space for this AFU */ 160 /* make sure there is a valid per process space for this AFU */
@@ -174,7 +182,7 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
174 * return until all outstanding interrupts for this context have completed. The 182 * return until all outstanding interrupts for this context have completed. The
175 * hardware should no longer access *ctx after this has returned. 183 * hardware should no longer access *ctx after this has returned.
176 */ 184 */
177static void __detach_context(struct cxl_context *ctx) 185int __detach_context(struct cxl_context *ctx)
178{ 186{
179 enum cxl_context_status status; 187 enum cxl_context_status status;
180 188
@@ -183,12 +191,13 @@ static void __detach_context(struct cxl_context *ctx)
183 ctx->status = CLOSED; 191 ctx->status = CLOSED;
184 mutex_unlock(&ctx->status_mutex); 192 mutex_unlock(&ctx->status_mutex);
185 if (status != STARTED) 193 if (status != STARTED)
186 return; 194 return -EBUSY;
187 195
188 WARN_ON(cxl_detach_process(ctx)); 196 WARN_ON(cxl_detach_process(ctx));
189 afu_release_irqs(ctx);
190 flush_work(&ctx->fault_work); /* Only needed for dedicated process */ 197 flush_work(&ctx->fault_work); /* Only needed for dedicated process */
191 wake_up_all(&ctx->wq); 198 put_pid(ctx->pid);
199 cxl_ctx_put();
200 return 0;
192} 201}
193 202
194/* 203/*
@@ -199,7 +208,14 @@ static void __detach_context(struct cxl_context *ctx)
199 */ 208 */
200void cxl_context_detach(struct cxl_context *ctx) 209void cxl_context_detach(struct cxl_context *ctx)
201{ 210{
202 __detach_context(ctx); 211 int rc;
212
213 rc = __detach_context(ctx);
214 if (rc)
215 return;
216
217 afu_release_irqs(ctx, ctx);
218 wake_up_all(&ctx->wq);
203} 219}
204 220
205/* 221/*
@@ -216,7 +232,7 @@ void cxl_context_detach_all(struct cxl_afu *afu)
216 * Anything done in here needs to be setup before the IDR is 232 * Anything done in here needs to be setup before the IDR is
217 * created and torn down after the IDR removed 233 * created and torn down after the IDR removed
218 */ 234 */
219 __detach_context(ctx); 235 cxl_context_detach(ctx);
220 236
221 /* 237 /*
222 * We are force detaching - remove any active PSA mappings so 238 * We are force detaching - remove any active PSA mappings so
@@ -232,16 +248,20 @@ void cxl_context_detach_all(struct cxl_afu *afu)
232 mutex_unlock(&afu->contexts_lock); 248 mutex_unlock(&afu->contexts_lock);
233} 249}
234 250
235void cxl_context_free(struct cxl_context *ctx) 251static void reclaim_ctx(struct rcu_head *rcu)
236{ 252{
237 mutex_lock(&ctx->afu->contexts_lock); 253 struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
238 idr_remove(&ctx->afu->contexts_idr, ctx->pe);
239 mutex_unlock(&ctx->afu->contexts_lock);
240 synchronize_rcu();
241 254
242 free_page((u64)ctx->sstp); 255 free_page((u64)ctx->sstp);
243 ctx->sstp = NULL; 256 ctx->sstp = NULL;
244 257
245 put_pid(ctx->pid);
246 kfree(ctx); 258 kfree(ctx);
247} 259}
260
261void cxl_context_free(struct cxl_context *ctx)
262{
263 mutex_lock(&ctx->afu->contexts_lock);
264 idr_remove(&ctx->afu->contexts_idr, ctx->pe);
265 mutex_unlock(&ctx->afu->contexts_lock);
266 call_rcu(&ctx->rcu, reclaim_ctx);
267}