diff options
Diffstat (limited to 'drivers/misc/cxl/file.c')
-rw-r--r-- | drivers/misc/cxl/file.c | 508 |
1 files changed, 508 insertions, 0 deletions
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c new file mode 100644 index 000000000000..847b7e646a7e --- /dev/null +++ b/drivers/misc/cxl/file.c | |||
@@ -0,0 +1,508 @@ | |||
1 | /* | ||
2 | * Copyright 2014 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/export.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/bitmap.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/poll.h> | ||
17 | #include <linux/pid.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <asm/cputable.h> | ||
22 | #include <asm/current.h> | ||
23 | #include <asm/copro.h> | ||
24 | |||
25 | #include "cxl.h" | ||
26 | |||
27 | #define CXL_NUM_MINORS 256 /* Total to reserve */ | ||
28 | #define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */ | ||
29 | |||
30 | #define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS) | ||
31 | #define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice)) | ||
32 | #define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1) | ||
33 | #define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2) | ||
34 | #define CXL_AFU_MKDEV_D(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_D(afu)) | ||
35 | #define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu)) | ||
36 | #define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu)) | ||
37 | |||
38 | #define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS) | ||
39 | #define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3) | ||
40 | |||
41 | #define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0) | ||
42 | |||
43 | static dev_t cxl_dev; | ||
44 | |||
45 | static struct class *cxl_class; | ||
46 | |||
47 | static int __afu_open(struct inode *inode, struct file *file, bool master) | ||
48 | { | ||
49 | struct cxl *adapter; | ||
50 | struct cxl_afu *afu; | ||
51 | struct cxl_context *ctx; | ||
52 | int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev); | ||
53 | int slice = CXL_DEVT_AFU(inode->i_rdev); | ||
54 | int rc = -ENODEV; | ||
55 | |||
56 | pr_devel("afu_open afu%i.%i\n", slice, adapter_num); | ||
57 | |||
58 | if (!(adapter = get_cxl_adapter(adapter_num))) | ||
59 | return -ENODEV; | ||
60 | |||
61 | if (slice > adapter->slices) | ||
62 | goto err_put_adapter; | ||
63 | |||
64 | spin_lock(&adapter->afu_list_lock); | ||
65 | if (!(afu = adapter->afu[slice])) { | ||
66 | spin_unlock(&adapter->afu_list_lock); | ||
67 | goto err_put_adapter; | ||
68 | } | ||
69 | get_device(&afu->dev); | ||
70 | spin_unlock(&adapter->afu_list_lock); | ||
71 | |||
72 | if (!afu->current_mode) | ||
73 | goto err_put_afu; | ||
74 | |||
75 | if (!(ctx = cxl_context_alloc())) { | ||
76 | rc = -ENOMEM; | ||
77 | goto err_put_afu; | ||
78 | } | ||
79 | |||
80 | if ((rc = cxl_context_init(ctx, afu, master))) | ||
81 | goto err_put_afu; | ||
82 | |||
83 | pr_devel("afu_open pe: %i\n", ctx->pe); | ||
84 | file->private_data = ctx; | ||
85 | cxl_ctx_get(); | ||
86 | |||
87 | /* Our ref on the AFU will now hold the adapter */ | ||
88 | put_device(&adapter->dev); | ||
89 | |||
90 | return 0; | ||
91 | |||
92 | err_put_afu: | ||
93 | put_device(&afu->dev); | ||
94 | err_put_adapter: | ||
95 | put_device(&adapter->dev); | ||
96 | return rc; | ||
97 | } | ||
98 | static int afu_open(struct inode *inode, struct file *file) | ||
99 | { | ||
100 | return __afu_open(inode, file, false); | ||
101 | } | ||
102 | |||
103 | static int afu_master_open(struct inode *inode, struct file *file) | ||
104 | { | ||
105 | return __afu_open(inode, file, true); | ||
106 | } | ||
107 | |||
108 | static int afu_release(struct inode *inode, struct file *file) | ||
109 | { | ||
110 | struct cxl_context *ctx = file->private_data; | ||
111 | |||
112 | pr_devel("%s: closing cxl file descriptor. pe: %i\n", | ||
113 | __func__, ctx->pe); | ||
114 | cxl_context_detach(ctx); | ||
115 | |||
116 | put_device(&ctx->afu->dev); | ||
117 | |||
118 | /* | ||
119 | * At this this point all bottom halfs have finished and we should be | ||
120 | * getting no more IRQs from the hardware for this context. Once it's | ||
121 | * removed from the IDR (and RCU synchronised) it's safe to free the | ||
122 | * sstp and context. | ||
123 | */ | ||
124 | cxl_context_free(ctx); | ||
125 | |||
126 | cxl_ctx_put(); | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | static long afu_ioctl_start_work(struct cxl_context *ctx, | ||
131 | struct cxl_ioctl_start_work __user *uwork) | ||
132 | { | ||
133 | struct cxl_ioctl_start_work work; | ||
134 | u64 amr = 0; | ||
135 | int rc; | ||
136 | |||
137 | pr_devel("%s: pe: %i\n", __func__, ctx->pe); | ||
138 | |||
139 | mutex_lock(&ctx->status_mutex); | ||
140 | if (ctx->status != OPENED) { | ||
141 | rc = -EIO; | ||
142 | goto out; | ||
143 | } | ||
144 | |||
145 | if (copy_from_user(&work, uwork, | ||
146 | sizeof(struct cxl_ioctl_start_work))) { | ||
147 | rc = -EFAULT; | ||
148 | goto out; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * if any of the reserved fields are set or any of the unused | ||
153 | * flags are set it's invalid | ||
154 | */ | ||
155 | if (work.reserved1 || work.reserved2 || work.reserved3 || | ||
156 | work.reserved4 || work.reserved5 || work.reserved6 || | ||
157 | (work.flags & ~CXL_START_WORK_ALL)) { | ||
158 | rc = -EINVAL; | ||
159 | goto out; | ||
160 | } | ||
161 | |||
162 | if (!(work.flags & CXL_START_WORK_NUM_IRQS)) | ||
163 | work.num_interrupts = ctx->afu->pp_irqs; | ||
164 | else if ((work.num_interrupts < ctx->afu->pp_irqs) || | ||
165 | (work.num_interrupts > ctx->afu->irqs_max)) { | ||
166 | rc = -EINVAL; | ||
167 | goto out; | ||
168 | } | ||
169 | if ((rc = afu_register_irqs(ctx, work.num_interrupts))) | ||
170 | goto out; | ||
171 | |||
172 | if (work.flags & CXL_START_WORK_AMR) | ||
173 | amr = work.amr & mfspr(SPRN_UAMOR); | ||
174 | |||
175 | /* | ||
176 | * We grab the PID here and not in the file open to allow for the case | ||
177 | * where a process (master, some daemon, etc) has opened the chardev on | ||
178 | * behalf of another process, so the AFU's mm gets bound to the process | ||
179 | * that performs this ioctl and not the process that opened the file. | ||
180 | */ | ||
181 | ctx->pid = get_pid(get_task_pid(current, PIDTYPE_PID)); | ||
182 | |||
183 | if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor, | ||
184 | amr))) | ||
185 | goto out; | ||
186 | |||
187 | ctx->status = STARTED; | ||
188 | rc = 0; | ||
189 | out: | ||
190 | mutex_unlock(&ctx->status_mutex); | ||
191 | return rc; | ||
192 | } | ||
193 | static long afu_ioctl_process_element(struct cxl_context *ctx, | ||
194 | int __user *upe) | ||
195 | { | ||
196 | pr_devel("%s: pe: %i\n", __func__, ctx->pe); | ||
197 | |||
198 | if (copy_to_user(upe, &ctx->pe, sizeof(__u32))) | ||
199 | return -EFAULT; | ||
200 | |||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
205 | { | ||
206 | struct cxl_context *ctx = file->private_data; | ||
207 | |||
208 | if (ctx->status == CLOSED) | ||
209 | return -EIO; | ||
210 | |||
211 | pr_devel("afu_ioctl\n"); | ||
212 | switch (cmd) { | ||
213 | case CXL_IOCTL_START_WORK: | ||
214 | return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg); | ||
215 | case CXL_IOCTL_GET_PROCESS_ELEMENT: | ||
216 | return afu_ioctl_process_element(ctx, (__u32 __user *)arg); | ||
217 | } | ||
218 | return -EINVAL; | ||
219 | } | ||
220 | |||
221 | static long afu_compat_ioctl(struct file *file, unsigned int cmd, | ||
222 | unsigned long arg) | ||
223 | { | ||
224 | return afu_ioctl(file, cmd, arg); | ||
225 | } | ||
226 | |||
227 | static int afu_mmap(struct file *file, struct vm_area_struct *vm) | ||
228 | { | ||
229 | struct cxl_context *ctx = file->private_data; | ||
230 | |||
231 | /* AFU must be started before we can MMIO */ | ||
232 | if (ctx->status != STARTED) | ||
233 | return -EIO; | ||
234 | |||
235 | return cxl_context_iomap(ctx, vm); | ||
236 | } | ||
237 | |||
238 | static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll) | ||
239 | { | ||
240 | struct cxl_context *ctx = file->private_data; | ||
241 | int mask = 0; | ||
242 | unsigned long flags; | ||
243 | |||
244 | |||
245 | poll_wait(file, &ctx->wq, poll); | ||
246 | |||
247 | pr_devel("afu_poll wait done pe: %i\n", ctx->pe); | ||
248 | |||
249 | spin_lock_irqsave(&ctx->lock, flags); | ||
250 | if (ctx->pending_irq || ctx->pending_fault || | ||
251 | ctx->pending_afu_err) | ||
252 | mask |= POLLIN | POLLRDNORM; | ||
253 | else if (ctx->status == CLOSED) | ||
254 | /* Only error on closed when there are no futher events pending | ||
255 | */ | ||
256 | mask |= POLLERR; | ||
257 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
258 | |||
259 | pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask); | ||
260 | |||
261 | return mask; | ||
262 | } | ||
263 | |||
264 | static inline int ctx_event_pending(struct cxl_context *ctx) | ||
265 | { | ||
266 | return (ctx->pending_irq || ctx->pending_fault || | ||
267 | ctx->pending_afu_err || (ctx->status == CLOSED)); | ||
268 | } | ||
269 | |||
270 | static ssize_t afu_read(struct file *file, char __user *buf, size_t count, | ||
271 | loff_t *off) | ||
272 | { | ||
273 | struct cxl_context *ctx = file->private_data; | ||
274 | struct cxl_event event; | ||
275 | unsigned long flags; | ||
276 | DEFINE_WAIT(wait); | ||
277 | |||
278 | if (count < CXL_READ_MIN_SIZE) | ||
279 | return -EINVAL; | ||
280 | |||
281 | spin_lock_irqsave(&ctx->lock, flags); | ||
282 | |||
283 | for (;;) { | ||
284 | prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE); | ||
285 | if (ctx_event_pending(ctx)) | ||
286 | break; | ||
287 | |||
288 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
289 | if (file->f_flags & O_NONBLOCK) | ||
290 | return -EAGAIN; | ||
291 | |||
292 | if (signal_pending(current)) | ||
293 | return -ERESTARTSYS; | ||
294 | |||
295 | pr_devel("afu_read going to sleep...\n"); | ||
296 | schedule(); | ||
297 | pr_devel("afu_read woken up\n"); | ||
298 | spin_lock_irqsave(&ctx->lock, flags); | ||
299 | } | ||
300 | |||
301 | finish_wait(&ctx->wq, &wait); | ||
302 | |||
303 | memset(&event, 0, sizeof(event)); | ||
304 | event.header.process_element = ctx->pe; | ||
305 | event.header.size = sizeof(struct cxl_event_header); | ||
306 | if (ctx->pending_irq) { | ||
307 | pr_devel("afu_read delivering AFU interrupt\n"); | ||
308 | event.header.size += sizeof(struct cxl_event_afu_interrupt); | ||
309 | event.header.type = CXL_EVENT_AFU_INTERRUPT; | ||
310 | event.irq.irq = find_first_bit(ctx->irq_bitmap, ctx->irq_count) + 1; | ||
311 | clear_bit(event.irq.irq - 1, ctx->irq_bitmap); | ||
312 | if (bitmap_empty(ctx->irq_bitmap, ctx->irq_count)) | ||
313 | ctx->pending_irq = false; | ||
314 | } else if (ctx->pending_fault) { | ||
315 | pr_devel("afu_read delivering data storage fault\n"); | ||
316 | event.header.size += sizeof(struct cxl_event_data_storage); | ||
317 | event.header.type = CXL_EVENT_DATA_STORAGE; | ||
318 | event.fault.addr = ctx->fault_addr; | ||
319 | event.fault.dsisr = ctx->fault_dsisr; | ||
320 | ctx->pending_fault = false; | ||
321 | } else if (ctx->pending_afu_err) { | ||
322 | pr_devel("afu_read delivering afu error\n"); | ||
323 | event.header.size += sizeof(struct cxl_event_afu_error); | ||
324 | event.header.type = CXL_EVENT_AFU_ERROR; | ||
325 | event.afu_error.error = ctx->afu_err; | ||
326 | ctx->pending_afu_err = false; | ||
327 | } else if (ctx->status == CLOSED) { | ||
328 | pr_devel("afu_read fatal error\n"); | ||
329 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
330 | return -EIO; | ||
331 | } else | ||
332 | WARN(1, "afu_read must be buggy\n"); | ||
333 | |||
334 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
335 | |||
336 | if (copy_to_user(buf, &event, event.header.size)) | ||
337 | return -EFAULT; | ||
338 | return event.header.size; | ||
339 | } | ||
340 | |||
341 | static const struct file_operations afu_fops = { | ||
342 | .owner = THIS_MODULE, | ||
343 | .open = afu_open, | ||
344 | .poll = afu_poll, | ||
345 | .read = afu_read, | ||
346 | .release = afu_release, | ||
347 | .unlocked_ioctl = afu_ioctl, | ||
348 | .compat_ioctl = afu_compat_ioctl, | ||
349 | .mmap = afu_mmap, | ||
350 | }; | ||
351 | |||
352 | static const struct file_operations afu_master_fops = { | ||
353 | .owner = THIS_MODULE, | ||
354 | .open = afu_master_open, | ||
355 | .poll = afu_poll, | ||
356 | .read = afu_read, | ||
357 | .release = afu_release, | ||
358 | .unlocked_ioctl = afu_ioctl, | ||
359 | .compat_ioctl = afu_compat_ioctl, | ||
360 | .mmap = afu_mmap, | ||
361 | }; | ||
362 | |||
363 | |||
364 | static char *cxl_devnode(struct device *dev, umode_t *mode) | ||
365 | { | ||
366 | if (CXL_DEVT_IS_CARD(dev->devt)) { | ||
367 | /* | ||
368 | * These minor numbers will eventually be used to program the | ||
369 | * PSL and AFUs once we have dynamic reprogramming support | ||
370 | */ | ||
371 | return NULL; | ||
372 | } | ||
373 | return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev)); | ||
374 | } | ||
375 | |||
376 | extern struct class *cxl_class; | ||
377 | |||
378 | static int cxl_add_chardev(struct cxl_afu *afu, dev_t devt, struct cdev *cdev, | ||
379 | struct device **chardev, char *postfix, char *desc, | ||
380 | const struct file_operations *fops) | ||
381 | { | ||
382 | struct device *dev; | ||
383 | int rc; | ||
384 | |||
385 | cdev_init(cdev, fops); | ||
386 | if ((rc = cdev_add(cdev, devt, 1))) { | ||
387 | dev_err(&afu->dev, "Unable to add %s chardev: %i\n", desc, rc); | ||
388 | return rc; | ||
389 | } | ||
390 | |||
391 | dev = device_create(cxl_class, &afu->dev, devt, afu, | ||
392 | "afu%i.%i%s", afu->adapter->adapter_num, afu->slice, postfix); | ||
393 | if (IS_ERR(dev)) { | ||
394 | dev_err(&afu->dev, "Unable to create %s chardev in sysfs: %i\n", desc, rc); | ||
395 | rc = PTR_ERR(dev); | ||
396 | goto err; | ||
397 | } | ||
398 | |||
399 | *chardev = dev; | ||
400 | |||
401 | return 0; | ||
402 | err: | ||
403 | cdev_del(cdev); | ||
404 | return rc; | ||
405 | } | ||
406 | |||
407 | int cxl_chardev_d_afu_add(struct cxl_afu *afu) | ||
408 | { | ||
409 | return cxl_add_chardev(afu, CXL_AFU_MKDEV_D(afu), &afu->afu_cdev_d, | ||
410 | &afu->chardev_d, "d", "dedicated", | ||
411 | &afu_master_fops); /* Uses master fops */ | ||
412 | } | ||
413 | |||
414 | int cxl_chardev_m_afu_add(struct cxl_afu *afu) | ||
415 | { | ||
416 | return cxl_add_chardev(afu, CXL_AFU_MKDEV_M(afu), &afu->afu_cdev_m, | ||
417 | &afu->chardev_m, "m", "master", | ||
418 | &afu_master_fops); | ||
419 | } | ||
420 | |||
421 | int cxl_chardev_s_afu_add(struct cxl_afu *afu) | ||
422 | { | ||
423 | return cxl_add_chardev(afu, CXL_AFU_MKDEV_S(afu), &afu->afu_cdev_s, | ||
424 | &afu->chardev_s, "s", "shared", | ||
425 | &afu_fops); | ||
426 | } | ||
427 | |||
428 | void cxl_chardev_afu_remove(struct cxl_afu *afu) | ||
429 | { | ||
430 | if (afu->chardev_d) { | ||
431 | cdev_del(&afu->afu_cdev_d); | ||
432 | device_unregister(afu->chardev_d); | ||
433 | afu->chardev_d = NULL; | ||
434 | } | ||
435 | if (afu->chardev_m) { | ||
436 | cdev_del(&afu->afu_cdev_m); | ||
437 | device_unregister(afu->chardev_m); | ||
438 | afu->chardev_m = NULL; | ||
439 | } | ||
440 | if (afu->chardev_s) { | ||
441 | cdev_del(&afu->afu_cdev_s); | ||
442 | device_unregister(afu->chardev_s); | ||
443 | afu->chardev_s = NULL; | ||
444 | } | ||
445 | } | ||
446 | |||
447 | int cxl_register_afu(struct cxl_afu *afu) | ||
448 | { | ||
449 | afu->dev.class = cxl_class; | ||
450 | |||
451 | return device_register(&afu->dev); | ||
452 | } | ||
453 | |||
454 | int cxl_register_adapter(struct cxl *adapter) | ||
455 | { | ||
456 | adapter->dev.class = cxl_class; | ||
457 | |||
458 | /* | ||
459 | * Future: When we support dynamically reprogramming the PSL & AFU we | ||
460 | * will expose the interface to do that via a chardev: | ||
461 | * adapter->dev.devt = CXL_CARD_MKDEV(adapter); | ||
462 | */ | ||
463 | |||
464 | return device_register(&adapter->dev); | ||
465 | } | ||
466 | |||
467 | int __init cxl_file_init(void) | ||
468 | { | ||
469 | int rc; | ||
470 | |||
471 | /* | ||
472 | * If these change we really need to update API. Either change some | ||
473 | * flags or update API version number CXL_API_VERSION. | ||
474 | */ | ||
475 | BUILD_BUG_ON(CXL_API_VERSION != 1); | ||
476 | BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64); | ||
477 | BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8); | ||
478 | BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8); | ||
479 | BUILD_BUG_ON(sizeof(struct cxl_event_data_storage) != 32); | ||
480 | BUILD_BUG_ON(sizeof(struct cxl_event_afu_error) != 16); | ||
481 | |||
482 | if ((rc = alloc_chrdev_region(&cxl_dev, 0, CXL_NUM_MINORS, "cxl"))) { | ||
483 | pr_err("Unable to allocate CXL major number: %i\n", rc); | ||
484 | return rc; | ||
485 | } | ||
486 | |||
487 | pr_devel("CXL device allocated, MAJOR %i\n", MAJOR(cxl_dev)); | ||
488 | |||
489 | cxl_class = class_create(THIS_MODULE, "cxl"); | ||
490 | if (IS_ERR(cxl_class)) { | ||
491 | pr_err("Unable to create CXL class\n"); | ||
492 | rc = PTR_ERR(cxl_class); | ||
493 | goto err; | ||
494 | } | ||
495 | cxl_class->devnode = cxl_devnode; | ||
496 | |||
497 | return 0; | ||
498 | |||
499 | err: | ||
500 | unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS); | ||
501 | return rc; | ||
502 | } | ||
503 | |||
504 | void cxl_file_exit(void) | ||
505 | { | ||
506 | unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS); | ||
507 | class_destroy(cxl_class); | ||
508 | } | ||