aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/cxl/context.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/cxl/context.c')
-rw-r--r--drivers/misc/cxl/context.c82
1 files changed, 63 insertions, 19 deletions
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 51fd6b524371..d1b55fe62817 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
100 return 0; 100 return 0;
101} 101}
102 102
103static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
104{
105 struct cxl_context *ctx = vma->vm_file->private_data;
106 unsigned long address = (unsigned long)vmf->virtual_address;
107 u64 area, offset;
108
109 offset = vmf->pgoff << PAGE_SHIFT;
110
111 pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
112 __func__, ctx->pe, address, offset);
113
114 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
115 area = ctx->afu->psn_phys;
116 if (offset > ctx->afu->adapter->ps_size)
117 return VM_FAULT_SIGBUS;
118 } else {
119 area = ctx->psn_phys;
120 if (offset > ctx->psn_size)
121 return VM_FAULT_SIGBUS;
122 }
123
124 mutex_lock(&ctx->status_mutex);
125
126 if (ctx->status != STARTED) {
127 mutex_unlock(&ctx->status_mutex);
128 pr_devel("%s: Context not started, failing problem state access\n", __func__);
129 return VM_FAULT_SIGBUS;
130 }
131
132 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
133
134 mutex_unlock(&ctx->status_mutex);
135
136 return VM_FAULT_NOPAGE;
137}
138
139static const struct vm_operations_struct cxl_mmap_vmops = {
140 .fault = cxl_mmap_fault,
141};
142
103/* 143/*
104 * Map a per-context mmio space into the given vma. 144 * Map a per-context mmio space into the given vma.
105 */ 145 */
@@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
108 u64 len = vma->vm_end - vma->vm_start; 148 u64 len = vma->vm_end - vma->vm_start;
109 len = min(len, ctx->psn_size); 149 len = min(len, ctx->psn_size);
110 150
111 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { 151 if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
112 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 152 /* make sure there is a valid per process space for this AFU */
113 return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size); 153 if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
114 } 154 pr_devel("AFU doesn't support mmio space\n");
155 return -EINVAL;
156 }
115 157
116 /* make sure there is a valid per process space for this AFU */ 158 /* Can't mmap until the AFU is enabled */
117 if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { 159 if (!ctx->afu->enabled)
118 pr_devel("AFU doesn't support mmio space\n"); 160 return -EBUSY;
119 return -EINVAL;
120 } 161 }
121 162
122 /* Can't mmap until the AFU is enabled */
123 if (!ctx->afu->enabled)
124 return -EBUSY;
125
126 pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__, 163 pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
127 ctx->psn_phys, ctx->pe , ctx->master); 164 ctx->psn_phys, ctx->pe , ctx->master);
128 165
166 vma->vm_flags |= VM_IO | VM_PFNMAP;
129 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 167 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
130 return vm_iomap_memory(vma, ctx->psn_phys, len); 168 vma->vm_ops = &cxl_mmap_vmops;
169 return 0;
131} 170}
132 171
133/* 172/*
@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx)
150 afu_release_irqs(ctx); 189 afu_release_irqs(ctx);
151 flush_work(&ctx->fault_work); /* Only needed for dedicated process */ 190 flush_work(&ctx->fault_work); /* Only needed for dedicated process */
152 wake_up_all(&ctx->wq); 191 wake_up_all(&ctx->wq);
153
154 /* Release Problem State Area mapping */
155 mutex_lock(&ctx->mapping_lock);
156 if (ctx->mapping)
157 unmap_mapping_range(ctx->mapping, 0, 0, 1);
158 mutex_unlock(&ctx->mapping_lock);
159} 192}
160 193
161/* 194/*
@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu)
184 * created and torn down after the IDR removed 217 * created and torn down after the IDR removed
185 */ 218 */
186 __detach_context(ctx); 219 __detach_context(ctx);
220
221 /*
222 * We are force detaching - remove any active PSA mappings so
223 * userspace cannot interfere with the card if it comes back.
224 * Easiest way to exercise this is to unbind and rebind the
225 * driver via sysfs while it is in use.
226 */
227 mutex_lock(&ctx->mapping_lock);
228 if (ctx->mapping)
229 unmap_mapping_range(ctx->mapping, 0, 0, 1);
230 mutex_unlock(&ctx->mapping_lock);
187 } 231 }
188 mutex_unlock(&afu->contexts_lock); 232 mutex_unlock(&afu->contexts_lock);
189} 233}