aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/thread_info.h13
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S1
-rw-r--r--drivers/misc/cxl/context.c82
-rw-r--r--drivers/misc/cxl/file.c14
4 files changed, 78 insertions, 32 deletions
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index ebc4f165690a..0be6c681cab1 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -23,9 +23,9 @@
23#define THREAD_SIZE (1 << THREAD_SHIFT) 23#define THREAD_SIZE (1 << THREAD_SHIFT)
24 24
25#ifdef CONFIG_PPC64 25#ifdef CONFIG_PPC64
26#define CURRENT_THREAD_INFO(dest, sp) clrrdi dest, sp, THREAD_SHIFT 26#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(clrrdi dest, sp, THREAD_SHIFT)
27#else 27#else
28#define CURRENT_THREAD_INFO(dest, sp) rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT 28#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT)
29#endif 29#endif
30 30
31#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
@@ -71,12 +71,13 @@ struct thread_info {
71#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) 71#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
72 72
73/* how to get the thread information struct from C */ 73/* how to get the thread information struct from C */
74register unsigned long __current_r1 asm("r1");
75static inline struct thread_info *current_thread_info(void) 74static inline struct thread_info *current_thread_info(void)
76{ 75{
77 /* gcc4, at least, is smart enough to turn this into a single 76 unsigned long val;
78 * rlwinm for ppc32 and clrrdi for ppc64 */ 77
79 return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1)); 78 asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val));
79
80 return (struct thread_info *)val;
80} 81}
81 82
82#endif /* __ASSEMBLY__ */ 83#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 54eca8b3b288..0509bca5e830 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -40,7 +40,6 @@ BEGIN_FTR_SECTION; \
40 b 1f; \ 40 b 1f; \
41END_FTR_SECTION(0, 1); \ 41END_FTR_SECTION(0, 1); \
42 ld r12,opal_tracepoint_refcount@toc(r2); \ 42 ld r12,opal_tracepoint_refcount@toc(r2); \
43 std r12,32(r1); \
44 cmpdi r12,0; \ 43 cmpdi r12,0; \
45 bne- LABEL; \ 44 bne- LABEL; \
461: 451:
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 51fd6b524371..d1b55fe62817 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
100 return 0; 100 return 0;
101} 101}
102 102
103static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
104{
105 struct cxl_context *ctx = vma->vm_file->private_data;
106 unsigned long address = (unsigned long)vmf->virtual_address;
107 u64 area, offset;
108
109 offset = vmf->pgoff << PAGE_SHIFT;
110
111 pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
112 __func__, ctx->pe, address, offset);
113
114 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
115 area = ctx->afu->psn_phys;
116 if (offset > ctx->afu->adapter->ps_size)
117 return VM_FAULT_SIGBUS;
118 } else {
119 area = ctx->psn_phys;
120 if (offset > ctx->psn_size)
121 return VM_FAULT_SIGBUS;
122 }
123
124 mutex_lock(&ctx->status_mutex);
125
126 if (ctx->status != STARTED) {
127 mutex_unlock(&ctx->status_mutex);
128 pr_devel("%s: Context not started, failing problem state access\n", __func__);
129 return VM_FAULT_SIGBUS;
130 }
131
132 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
133
134 mutex_unlock(&ctx->status_mutex);
135
136 return VM_FAULT_NOPAGE;
137}
138
139static const struct vm_operations_struct cxl_mmap_vmops = {
140 .fault = cxl_mmap_fault,
141};
142
103/* 143/*
104 * Map a per-context mmio space into the given vma. 144 * Map a per-context mmio space into the given vma.
105 */ 145 */
@@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
108 u64 len = vma->vm_end - vma->vm_start; 148 u64 len = vma->vm_end - vma->vm_start;
109 len = min(len, ctx->psn_size); 149 len = min(len, ctx->psn_size);
110 150
111 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { 151 if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
112 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 152 /* make sure there is a valid per process space for this AFU */
113 return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size); 153 if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
114 } 154 pr_devel("AFU doesn't support mmio space\n");
155 return -EINVAL;
156 }
115 157
116 /* make sure there is a valid per process space for this AFU */ 158 /* Can't mmap until the AFU is enabled */
117 if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { 159 if (!ctx->afu->enabled)
118 pr_devel("AFU doesn't support mmio space\n"); 160 return -EBUSY;
119 return -EINVAL;
120 } 161 }
121 162
122 /* Can't mmap until the AFU is enabled */
123 if (!ctx->afu->enabled)
124 return -EBUSY;
125
126 pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__, 163 pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
127 ctx->psn_phys, ctx->pe , ctx->master); 164 ctx->psn_phys, ctx->pe , ctx->master);
128 165
166 vma->vm_flags |= VM_IO | VM_PFNMAP;
129 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 167 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
130 return vm_iomap_memory(vma, ctx->psn_phys, len); 168 vma->vm_ops = &cxl_mmap_vmops;
169 return 0;
131} 170}
132 171
133/* 172/*
@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx)
150 afu_release_irqs(ctx); 189 afu_release_irqs(ctx);
151 flush_work(&ctx->fault_work); /* Only needed for dedicated process */ 190 flush_work(&ctx->fault_work); /* Only needed for dedicated process */
152 wake_up_all(&ctx->wq); 191 wake_up_all(&ctx->wq);
153
154 /* Release Problem State Area mapping */
155 mutex_lock(&ctx->mapping_lock);
156 if (ctx->mapping)
157 unmap_mapping_range(ctx->mapping, 0, 0, 1);
158 mutex_unlock(&ctx->mapping_lock);
159} 192}
160 193
161/* 194/*
@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu)
184 * created and torn down after the IDR removed 217 * created and torn down after the IDR removed
185 */ 218 */
186 __detach_context(ctx); 219 __detach_context(ctx);
220
221 /*
222 * We are force detaching - remove any active PSA mappings so
223 * userspace cannot interfere with the card if it comes back.
224 * Easiest way to exercise this is to unbind and rebind the
225 * driver via sysfs while it is in use.
226 */
227 mutex_lock(&ctx->mapping_lock);
228 if (ctx->mapping)
229 unmap_mapping_range(ctx->mapping, 0, 0, 1);
230 mutex_unlock(&ctx->mapping_lock);
187 } 231 }
188 mutex_unlock(&afu->contexts_lock); 232 mutex_unlock(&afu->contexts_lock);
189} 233}
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index e9f2f10dbb37..b15d8113877c 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -140,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
140 140
141 pr_devel("%s: pe: %i\n", __func__, ctx->pe); 141 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
142 142
143 mutex_lock(&ctx->status_mutex); 143 /* Do this outside the status_mutex to avoid a circular dependency with
144 if (ctx->status != OPENED) { 144 * the locking in cxl_mmap_fault() */
145 rc = -EIO;
146 goto out;
147 }
148
149 if (copy_from_user(&work, uwork, 145 if (copy_from_user(&work, uwork,
150 sizeof(struct cxl_ioctl_start_work))) { 146 sizeof(struct cxl_ioctl_start_work))) {
151 rc = -EFAULT; 147 rc = -EFAULT;
152 goto out; 148 goto out;
153 } 149 }
154 150
151 mutex_lock(&ctx->status_mutex);
152 if (ctx->status != OPENED) {
153 rc = -EIO;
154 goto out;
155 }
156
155 /* 157 /*
156 * if any of the reserved fields are set or any of the unused 158 * if any of the reserved fields are set or any of the unused
157 * flags are set it's invalid 159 * flags are set it's invalid