diff options
Diffstat (limited to 'drivers/misc/cxl/api.c')
-rw-r--r-- | drivers/misc/cxl/api.c | 168 |
1 files changed, 167 insertions, 1 deletions
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index 6d228ccd884d..f3d34b941f85 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c | |||
@@ -13,6 +13,8 @@ | |||
13 | #include <linux/file.h> | 13 | #include <linux/file.h> |
14 | #include <misc/cxl.h> | 14 | #include <misc/cxl.h> |
15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
16 | #include <asm/pnv-pci.h> | ||
17 | #include <linux/msi.h> | ||
16 | 18 | ||
17 | #include "cxl.h" | 19 | #include "cxl.h" |
18 | 20 | ||
@@ -24,6 +26,8 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev) | |||
24 | int rc; | 26 | int rc; |
25 | 27 | ||
26 | afu = cxl_pci_to_afu(dev); | 28 | afu = cxl_pci_to_afu(dev); |
29 | if (IS_ERR(afu)) | ||
30 | return ERR_CAST(afu); | ||
27 | 31 | ||
28 | ctx = cxl_context_alloc(); | 32 | ctx = cxl_context_alloc(); |
29 | if (IS_ERR(ctx)) { | 33 | if (IS_ERR(ctx)) { |
@@ -94,6 +98,42 @@ static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num) | |||
94 | return 0; | 98 | return 0; |
95 | } | 99 | } |
96 | 100 | ||
101 | int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq) | ||
102 | { | ||
103 | if (*ctx == NULL || *afu_irq == 0) { | ||
104 | *afu_irq = 1; | ||
105 | *ctx = cxl_get_context(pdev); | ||
106 | } else { | ||
107 | (*afu_irq)++; | ||
108 | if (*afu_irq > cxl_get_max_irqs_per_process(pdev)) { | ||
109 | *ctx = list_next_entry(*ctx, extra_irq_contexts); | ||
110 | *afu_irq = 1; | ||
111 | } | ||
112 | } | ||
113 | return cxl_find_afu_irq(*ctx, *afu_irq); | ||
114 | } | ||
115 | /* Exported via cxl_base */ | ||
116 | |||
117 | int cxl_set_priv(struct cxl_context *ctx, void *priv) | ||
118 | { | ||
119 | if (!ctx) | ||
120 | return -EINVAL; | ||
121 | |||
122 | ctx->priv = priv; | ||
123 | |||
124 | return 0; | ||
125 | } | ||
126 | EXPORT_SYMBOL_GPL(cxl_set_priv); | ||
127 | |||
128 | void *cxl_get_priv(struct cxl_context *ctx) | ||
129 | { | ||
130 | if (!ctx) | ||
131 | return ERR_PTR(-EINVAL); | ||
132 | |||
133 | return ctx->priv; | ||
134 | } | ||
135 | EXPORT_SYMBOL_GPL(cxl_get_priv); | ||
136 | |||
97 | int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) | 137 | int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) |
98 | { | 138 | { |
99 | int res; | 139 | int res; |
@@ -102,7 +142,10 @@ int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) | |||
102 | if (num == 0) | 142 | if (num == 0) |
103 | num = ctx->afu->pp_irqs; | 143 | num = ctx->afu->pp_irqs; |
104 | res = afu_allocate_irqs(ctx, num); | 144 | res = afu_allocate_irqs(ctx, num); |
105 | if (!res && !cpu_has_feature(CPU_FTR_HVMODE)) { | 145 | if (res) |
146 | return res; | ||
147 | |||
148 | if (!cpu_has_feature(CPU_FTR_HVMODE)) { | ||
106 | /* In a guest, the PSL interrupt is not multiplexed. It was | 149 | /* In a guest, the PSL interrupt is not multiplexed. It was |
107 | * allocated above, and we need to set its handler | 150 | * allocated above, and we need to set its handler |
108 | */ | 151 | */ |
@@ -110,6 +153,13 @@ int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) | |||
110 | if (hwirq) | 153 | if (hwirq) |
111 | cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl"); | 154 | cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl"); |
112 | } | 155 | } |
156 | |||
157 | if (ctx->status == STARTED) { | ||
158 | if (cxl_ops->update_ivtes) | ||
159 | cxl_ops->update_ivtes(ctx); | ||
160 | else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n"); | ||
161 | } | ||
162 | |||
113 | return res; | 163 | return res; |
114 | } | 164 | } |
115 | EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs); | 165 | EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs); |
@@ -323,6 +373,23 @@ struct cxl_context *cxl_fops_get_context(struct file *file) | |||
323 | } | 373 | } |
324 | EXPORT_SYMBOL_GPL(cxl_fops_get_context); | 374 | EXPORT_SYMBOL_GPL(cxl_fops_get_context); |
325 | 375 | ||
376 | void cxl_set_driver_ops(struct cxl_context *ctx, | ||
377 | struct cxl_afu_driver_ops *ops) | ||
378 | { | ||
379 | WARN_ON(!ops->fetch_event || !ops->event_delivered); | ||
380 | atomic_set(&ctx->afu_driver_events, 0); | ||
381 | ctx->afu_driver_ops = ops; | ||
382 | } | ||
383 | EXPORT_SYMBOL_GPL(cxl_set_driver_ops); | ||
384 | |||
385 | void cxl_context_events_pending(struct cxl_context *ctx, | ||
386 | unsigned int new_events) | ||
387 | { | ||
388 | atomic_add(new_events, &ctx->afu_driver_events); | ||
389 | wake_up_all(&ctx->wq); | ||
390 | } | ||
391 | EXPORT_SYMBOL_GPL(cxl_context_events_pending); | ||
392 | |||
326 | int cxl_start_work(struct cxl_context *ctx, | 393 | int cxl_start_work(struct cxl_context *ctx, |
327 | struct cxl_ioctl_start_work *work) | 394 | struct cxl_ioctl_start_work *work) |
328 | { | 395 | { |
@@ -390,7 +457,106 @@ EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image); | |||
390 | ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count) | 457 | ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count) |
391 | { | 458 | { |
392 | struct cxl_afu *afu = cxl_pci_to_afu(dev); | 459 | struct cxl_afu *afu = cxl_pci_to_afu(dev); |
460 | if (IS_ERR(afu)) | ||
461 | return -ENODEV; | ||
393 | 462 | ||
394 | return cxl_ops->read_adapter_vpd(afu->adapter, buf, count); | 463 | return cxl_ops->read_adapter_vpd(afu->adapter, buf, count); |
395 | } | 464 | } |
396 | EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd); | 465 | EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd); |
466 | |||
467 | int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs) | ||
468 | { | ||
469 | struct cxl_afu *afu = cxl_pci_to_afu(dev); | ||
470 | if (IS_ERR(afu)) | ||
471 | return -ENODEV; | ||
472 | |||
473 | if (irqs > afu->adapter->user_irqs) | ||
474 | return -EINVAL; | ||
475 | |||
476 | /* Limit user_irqs to prevent the user increasing this via sysfs */ | ||
477 | afu->adapter->user_irqs = irqs; | ||
478 | afu->irqs_max = irqs; | ||
479 | |||
480 | return 0; | ||
481 | } | ||
482 | EXPORT_SYMBOL_GPL(cxl_set_max_irqs_per_process); | ||
483 | |||
484 | int cxl_get_max_irqs_per_process(struct pci_dev *dev) | ||
485 | { | ||
486 | struct cxl_afu *afu = cxl_pci_to_afu(dev); | ||
487 | if (IS_ERR(afu)) | ||
488 | return -ENODEV; | ||
489 | |||
490 | return afu->irqs_max; | ||
491 | } | ||
492 | EXPORT_SYMBOL_GPL(cxl_get_max_irqs_per_process); | ||
493 | |||
494 | /* | ||
495 | * This is a special interrupt allocation routine called from the PHB's MSI | ||
496 | * setup function. When capi interrupts are allocated in this manner they must | ||
497 | * still be associated with a running context, but since the MSI APIs have no | ||
498 | * way to specify this we use the default context associated with the device. | ||
499 | * | ||
500 | * The Mellanox CX4 has a hardware limitation that restricts the maximum AFU | ||
501 | * interrupt number, so in order to overcome this their driver informs us of | ||
502 | * the restriction by setting the maximum interrupts per context, and we | ||
503 | * allocate additional contexts as necessary so that we can keep the AFU | ||
504 | * interrupt number within the supported range. | ||
505 | */ | ||
506 | int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | ||
507 | { | ||
508 | struct cxl_context *ctx, *new_ctx, *default_ctx; | ||
509 | int remaining; | ||
510 | int rc; | ||
511 | |||
512 | ctx = default_ctx = cxl_get_context(pdev); | ||
513 | if (WARN_ON(!default_ctx)) | ||
514 | return -ENODEV; | ||
515 | |||
516 | remaining = nvec; | ||
517 | while (remaining > 0) { | ||
518 | rc = cxl_allocate_afu_irqs(ctx, min(remaining, ctx->afu->irqs_max)); | ||
519 | if (rc) { | ||
520 | pr_warn("%s: Failed to find enough free MSIs\n", pci_name(pdev)); | ||
521 | return rc; | ||
522 | } | ||
523 | remaining -= ctx->afu->irqs_max; | ||
524 | |||
525 | if (ctx != default_ctx && default_ctx->status == STARTED) { | ||
526 | WARN_ON(cxl_start_context(ctx, | ||
527 | be64_to_cpu(default_ctx->elem->common.wed), | ||
528 | NULL)); | ||
529 | } | ||
530 | |||
531 | if (remaining > 0) { | ||
532 | new_ctx = cxl_dev_context_init(pdev); | ||
533 | if (!new_ctx) { | ||
534 | pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev)); | ||
535 | return -ENOSPC; | ||
536 | } | ||
537 | list_add(&new_ctx->extra_irq_contexts, &ctx->extra_irq_contexts); | ||
538 | ctx = new_ctx; | ||
539 | } | ||
540 | } | ||
541 | |||
542 | return 0; | ||
543 | } | ||
544 | /* Exported via cxl_base */ | ||
545 | |||
546 | void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev) | ||
547 | { | ||
548 | struct cxl_context *ctx, *pos, *tmp; | ||
549 | |||
550 | ctx = cxl_get_context(pdev); | ||
551 | if (WARN_ON(!ctx)) | ||
552 | return; | ||
553 | |||
554 | cxl_free_afu_irqs(ctx); | ||
555 | list_for_each_entry_safe(pos, tmp, &ctx->extra_irq_contexts, extra_irq_contexts) { | ||
556 | cxl_stop_context(pos); | ||
557 | cxl_free_afu_irqs(pos); | ||
558 | list_del(&pos->extra_irq_contexts); | ||
559 | cxl_release_context(pos); | ||
560 | } | ||
561 | } | ||
562 | /* Exported via cxl_base */ | ||