aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/cxl/irq.c
diff options
context:
space:
mode:
authorIan Munsie <imunsie@au1.ibm.com>2014-10-08 04:55:02 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2014-10-08 05:15:57 -0400
commitf204e0b8cedd7da1dfcfd05ed6b7692737e24029 (patch)
tree35ca15049345cdd5dbed38229a6b3add05610658 /drivers/misc/cxl/irq.c
parent10542ca0156f60571ef41799d44d40dd4cb0a473 (diff)
cxl: Driver code for powernv PCIe based cards for userspace access
This is the core of the cxl driver. It adds support for using cxl cards in the powernv environment only (ie POWER8 bare metal). It allows access to cxl accelerators by userspace using the /dev/cxl/afuM.N char devices. The kernel driver has no knowledge of the function implemented by the accelerator. It provides services to userspace via the /dev/cxl/afuM.N devices. When a program opens this device and runs the start work IOCTL, the accelerator will have coherent access to that processes memory using the same virtual addresses. That process may mmap the device to access any MMIO space the accelerator provides. Also, reads on the device will allow interrupts to be received. These services are further documented in a later patch in Documentation/powerpc/cxl.txt. Documentation of the cxl hardware architecture and userspace API is provided in subsequent patches. Signed-off-by: Ian Munsie <imunsie@au1.ibm.com> Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'drivers/misc/cxl/irq.c')
-rw-r--r--drivers/misc/cxl/irq.c402
1 files changed, 402 insertions, 0 deletions
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
new file mode 100644
index 000000000000..336020c8e1af
--- /dev/null
+++ b/drivers/misc/cxl/irq.c
@@ -0,0 +1,402 @@
1/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/interrupt.h>
11#include <linux/workqueue.h>
12#include <linux/sched.h>
13#include <linux/wait.h>
14#include <linux/slab.h>
15#include <linux/pid.h>
16#include <asm/cputable.h>
17#include <misc/cxl.h>
18
19#include "cxl.h"
20
21/* XXX: This is implementation specific */
22static irqreturn_t handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, u64 errstat)
23{
24 u64 fir1, fir2, fir_slice, serr, afu_debug;
25
26 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
27 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
28 fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
29 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
30 afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
31
32 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
33 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%.16llx\n", fir1);
34 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%.16llx\n", fir2);
35 dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
36 dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice);
37 dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug);
38
39 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
40 cxl_stop_trace(ctx->afu->adapter);
41
42 return cxl_ack_irq(ctx, 0, errstat);
43}
44
45irqreturn_t cxl_slice_irq_err(int irq, void *data)
46{
47 struct cxl_afu *afu = data;
48 u64 fir_slice, errstat, serr, afu_debug;
49
50 WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
51
52 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
53 fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
54 errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
55 afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
56 dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
57 dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice);
58 dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%.16llx\n", errstat);
59 dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug);
60
61 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
62
63 return IRQ_HANDLED;
64}
65
66static irqreturn_t cxl_irq_err(int irq, void *data)
67{
68 struct cxl *adapter = data;
69 u64 fir1, fir2, err_ivte;
70
71 WARN(1, "CXL ERROR interrupt %i\n", irq);
72
73 err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
74 dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%.16llx\n", err_ivte);
75
76 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
77 cxl_stop_trace(adapter);
78
79 fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
80 fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
81
82 dev_crit(&adapter->dev, "PSL_FIR1: 0x%.16llx\nPSL_FIR2: 0x%.16llx\n", fir1, fir2);
83
84 return IRQ_HANDLED;
85}
86
87static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar)
88{
89 ctx->dsisr = dsisr;
90 ctx->dar = dar;
91 schedule_work(&ctx->fault_work);
92 return IRQ_HANDLED;
93}
94
95static irqreturn_t cxl_irq(int irq, void *data)
96{
97 struct cxl_context *ctx = data;
98 struct cxl_irq_info irq_info;
99 u64 dsisr, dar;
100 int result;
101
102 if ((result = cxl_get_irq(ctx, &irq_info))) {
103 WARN(1, "Unable to get CXL IRQ Info: %i\n", result);
104 return IRQ_HANDLED;
105 }
106
107 dsisr = irq_info.dsisr;
108 dar = irq_info.dar;
109
110 pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
111
112 if (dsisr & CXL_PSL_DSISR_An_DS) {
113 /*
114 * We don't inherently need to sleep to handle this, but we do
115 * need to get a ref to the task's mm, which we can't do from
116 * irq context without the potential for a deadlock since it
117 * takes the task_lock. An alternate option would be to keep a
118 * reference to the task's mm the entire time it has cxl open,
119 * but to do that we need to solve the issue where we hold a
120 * ref to the mm, but the mm can hold a ref to the fd after an
121 * mmap preventing anything from being cleaned up.
122 */
123 pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe);
124 return schedule_cxl_fault(ctx, dsisr, dar);
125 }
126
127 if (dsisr & CXL_PSL_DSISR_An_M)
128 pr_devel("CXL interrupt: PTE not found\n");
129 if (dsisr & CXL_PSL_DSISR_An_P)
130 pr_devel("CXL interrupt: Storage protection violation\n");
131 if (dsisr & CXL_PSL_DSISR_An_A)
132 pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n");
133 if (dsisr & CXL_PSL_DSISR_An_S)
134 pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n");
135 if (dsisr & CXL_PSL_DSISR_An_K)
136 pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n");
137
138 if (dsisr & CXL_PSL_DSISR_An_DM) {
139 /*
140 * In some cases we might be able to handle the fault
141 * immediately if hash_page would succeed, but we still need
142 * the task's mm, which as above we can't get without a lock
143 */
144 pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe);
145 return schedule_cxl_fault(ctx, dsisr, dar);
146 }
147 if (dsisr & CXL_PSL_DSISR_An_ST)
148 WARN(1, "CXL interrupt: Segment Table PTE not found\n");
149 if (dsisr & CXL_PSL_DSISR_An_UR)
150 pr_devel("CXL interrupt: AURP PTE not found\n");
151 if (dsisr & CXL_PSL_DSISR_An_PE)
152 return handle_psl_slice_error(ctx, dsisr, irq_info.errstat);
153 if (dsisr & CXL_PSL_DSISR_An_AE) {
154 pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info.afu_err);
155
156 if (ctx->pending_afu_err) {
157 /*
158 * This shouldn't happen - the PSL treats these errors
159 * as fatal and will have reset the AFU, so there's not
160 * much point buffering multiple AFU errors.
161 * OTOH if we DO ever see a storm of these come in it's
162 * probably best that we log them somewhere:
163 */
164 dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
165 "undelivered to pe %i: %.llx\n",
166 ctx->pe, irq_info.afu_err);
167 } else {
168 spin_lock(&ctx->lock);
169 ctx->afu_err = irq_info.afu_err;
170 ctx->pending_afu_err = 1;
171 spin_unlock(&ctx->lock);
172
173 wake_up_all(&ctx->wq);
174 }
175
176 cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
177 }
178 if (dsisr & CXL_PSL_DSISR_An_OC)
179 pr_devel("CXL interrupt: OS Context Warning\n");
180
181 WARN(1, "Unhandled CXL PSL IRQ\n");
182 return IRQ_HANDLED;
183}
184
185static irqreturn_t cxl_irq_multiplexed(int irq, void *data)
186{
187 struct cxl_afu *afu = data;
188 struct cxl_context *ctx;
189 int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
190 int ret;
191
192 rcu_read_lock();
193 ctx = idr_find(&afu->contexts_idr, ph);
194 if (ctx) {
195 ret = cxl_irq(irq, ctx);
196 rcu_read_unlock();
197 return ret;
198 }
199 rcu_read_unlock();
200
201 WARN(1, "Unable to demultiplex CXL PSL IRQ\n");
202 return IRQ_HANDLED;
203}
204
205static irqreturn_t cxl_irq_afu(int irq, void *data)
206{
207 struct cxl_context *ctx = data;
208 irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq));
209 int irq_off, afu_irq = 1;
210 __u16 range;
211 int r;
212
213 for (r = 1; r < CXL_IRQ_RANGES; r++) {
214 irq_off = hwirq - ctx->irqs.offset[r];
215 range = ctx->irqs.range[r];
216 if (irq_off >= 0 && irq_off < range) {
217 afu_irq += irq_off;
218 break;
219 }
220 afu_irq += range;
221 }
222 if (unlikely(r >= CXL_IRQ_RANGES)) {
223 WARN(1, "Recieved AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
224 ctx->pe, irq, hwirq);
225 return IRQ_HANDLED;
226 }
227
228 pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n",
229 afu_irq, ctx->pe, irq, hwirq);
230
231 if (unlikely(!ctx->irq_bitmap)) {
232 WARN(1, "Recieved AFU IRQ for context with no IRQ bitmap\n");
233 return IRQ_HANDLED;
234 }
235 spin_lock(&ctx->lock);
236 set_bit(afu_irq - 1, ctx->irq_bitmap);
237 ctx->pending_irq = true;
238 spin_unlock(&ctx->lock);
239
240 wake_up_all(&ctx->wq);
241
242 return IRQ_HANDLED;
243}
244
245unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
246 irq_handler_t handler, void *cookie)
247{
248 unsigned int virq;
249 int result;
250
251 /* IRQ Domain? */
252 virq = irq_create_mapping(NULL, hwirq);
253 if (!virq) {
254 dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n");
255 return 0;
256 }
257
258 cxl_setup_irq(adapter, hwirq, virq);
259
260 pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq);
261
262 result = request_irq(virq, handler, 0, "cxl", cookie);
263 if (result) {
264 dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result);
265 return 0;
266 }
267
268 return virq;
269}
270
271void cxl_unmap_irq(unsigned int virq, void *cookie)
272{
273 free_irq(virq, cookie);
274 irq_dispose_mapping(virq);
275}
276
277static int cxl_register_one_irq(struct cxl *adapter,
278 irq_handler_t handler,
279 void *cookie,
280 irq_hw_number_t *dest_hwirq,
281 unsigned int *dest_virq)
282{
283 int hwirq, virq;
284
285 if ((hwirq = cxl_alloc_one_irq(adapter)) < 0)
286 return hwirq;
287
288 if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie)))
289 goto err;
290
291 *dest_hwirq = hwirq;
292 *dest_virq = virq;
293
294 return 0;
295
296err:
297 cxl_release_one_irq(adapter, hwirq);
298 return -ENOMEM;
299}
300
301int cxl_register_psl_err_irq(struct cxl *adapter)
302{
303 int rc;
304
305 if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter,
306 &adapter->err_hwirq,
307 &adapter->err_virq)))
308 return rc;
309
310 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff);
311
312 return 0;
313}
314
315void cxl_release_psl_err_irq(struct cxl *adapter)
316{
317 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
318 cxl_unmap_irq(adapter->err_virq, adapter);
319 cxl_release_one_irq(adapter, adapter->err_hwirq);
320}
321
322int cxl_register_serr_irq(struct cxl_afu *afu)
323{
324 u64 serr;
325 int rc;
326
327 if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu,
328 &afu->serr_hwirq,
329 &afu->serr_virq)))
330 return rc;
331
332 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
333 serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
334 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
335
336 return 0;
337}
338
339void cxl_release_serr_irq(struct cxl_afu *afu)
340{
341 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
342 cxl_unmap_irq(afu->serr_virq, afu);
343 cxl_release_one_irq(afu->adapter, afu->serr_hwirq);
344}
345
346int cxl_register_psl_irq(struct cxl_afu *afu)
347{
348 return cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu,
349 &afu->psl_hwirq, &afu->psl_virq);
350}
351
352void cxl_release_psl_irq(struct cxl_afu *afu)
353{
354 cxl_unmap_irq(afu->psl_virq, afu);
355 cxl_release_one_irq(afu->adapter, afu->psl_hwirq);
356}
357
358int afu_register_irqs(struct cxl_context *ctx, u32 count)
359{
360 irq_hw_number_t hwirq;
361 int rc, r, i;
362
363 if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count)))
364 return rc;
365
366 /* Multiplexed PSL Interrupt */
367 ctx->irqs.offset[0] = ctx->afu->psl_hwirq;
368 ctx->irqs.range[0] = 1;
369
370 ctx->irq_count = count;
371 ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count),
372 sizeof(*ctx->irq_bitmap), GFP_KERNEL);
373 if (!ctx->irq_bitmap)
374 return -ENOMEM;
375 for (r = 1; r < CXL_IRQ_RANGES; r++) {
376 hwirq = ctx->irqs.offset[r];
377 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
378 cxl_map_irq(ctx->afu->adapter, hwirq,
379 cxl_irq_afu, ctx);
380 }
381 }
382
383 return 0;
384}
385
386void afu_release_irqs(struct cxl_context *ctx)
387{
388 irq_hw_number_t hwirq;
389 unsigned int virq;
390 int r, i;
391
392 for (r = 1; r < CXL_IRQ_RANGES; r++) {
393 hwirq = ctx->irqs.offset[r];
394 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
395 virq = irq_find_mapping(NULL, hwirq);
396 if (virq)
397 cxl_unmap_irq(virq, ctx);
398 }
399 }
400
401 cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
402}