aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/cxl/api.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/cxl/api.c')
-rw-r--r--drivers/misc/cxl/api.c168
1 files changed, 167 insertions, 1 deletions
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 6d228ccd884d..f3d34b941f85 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -13,6 +13,8 @@
13#include <linux/file.h> 13#include <linux/file.h>
14#include <misc/cxl.h> 14#include <misc/cxl.h>
15#include <linux/fs.h> 15#include <linux/fs.h>
16#include <asm/pnv-pci.h>
17#include <linux/msi.h>
16 18
17#include "cxl.h" 19#include "cxl.h"
18 20
@@ -24,6 +26,8 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
24 int rc; 26 int rc;
25 27
26 afu = cxl_pci_to_afu(dev); 28 afu = cxl_pci_to_afu(dev);
29 if (IS_ERR(afu))
30 return ERR_CAST(afu);
27 31
28 ctx = cxl_context_alloc(); 32 ctx = cxl_context_alloc();
29 if (IS_ERR(ctx)) { 33 if (IS_ERR(ctx)) {
@@ -94,6 +98,42 @@ static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
94 return 0; 98 return 0;
95} 99}
96 100
101int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
102{
103 if (*ctx == NULL || *afu_irq == 0) {
104 *afu_irq = 1;
105 *ctx = cxl_get_context(pdev);
106 } else {
107 (*afu_irq)++;
108 if (*afu_irq > cxl_get_max_irqs_per_process(pdev)) {
109 *ctx = list_next_entry(*ctx, extra_irq_contexts);
110 *afu_irq = 1;
111 }
112 }
113 return cxl_find_afu_irq(*ctx, *afu_irq);
114}
115/* Exported via cxl_base */
116
117int cxl_set_priv(struct cxl_context *ctx, void *priv)
118{
119 if (!ctx)
120 return -EINVAL;
121
122 ctx->priv = priv;
123
124 return 0;
125}
126EXPORT_SYMBOL_GPL(cxl_set_priv);
127
128void *cxl_get_priv(struct cxl_context *ctx)
129{
130 if (!ctx)
131 return ERR_PTR(-EINVAL);
132
133 return ctx->priv;
134}
135EXPORT_SYMBOL_GPL(cxl_get_priv);
136
97int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) 137int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
98{ 138{
99 int res; 139 int res;
@@ -102,7 +142,10 @@ int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
102 if (num == 0) 142 if (num == 0)
103 num = ctx->afu->pp_irqs; 143 num = ctx->afu->pp_irqs;
104 res = afu_allocate_irqs(ctx, num); 144 res = afu_allocate_irqs(ctx, num);
105 if (!res && !cpu_has_feature(CPU_FTR_HVMODE)) { 145 if (res)
146 return res;
147
148 if (!cpu_has_feature(CPU_FTR_HVMODE)) {
106 /* In a guest, the PSL interrupt is not multiplexed. It was 149 /* In a guest, the PSL interrupt is not multiplexed. It was
107 * allocated above, and we need to set its handler 150 * allocated above, and we need to set its handler
108 */ 151 */
@@ -110,6 +153,13 @@ int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
110 if (hwirq) 153 if (hwirq)
111 cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl"); 154 cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl");
112 } 155 }
156
157 if (ctx->status == STARTED) {
158 if (cxl_ops->update_ivtes)
159 cxl_ops->update_ivtes(ctx);
160 else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n");
161 }
162
113 return res; 163 return res;
114} 164}
115EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs); 165EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
@@ -323,6 +373,23 @@ struct cxl_context *cxl_fops_get_context(struct file *file)
323} 373}
324EXPORT_SYMBOL_GPL(cxl_fops_get_context); 374EXPORT_SYMBOL_GPL(cxl_fops_get_context);
325 375
376void cxl_set_driver_ops(struct cxl_context *ctx,
377 struct cxl_afu_driver_ops *ops)
378{
379 WARN_ON(!ops->fetch_event || !ops->event_delivered);
380 atomic_set(&ctx->afu_driver_events, 0);
381 ctx->afu_driver_ops = ops;
382}
383EXPORT_SYMBOL_GPL(cxl_set_driver_ops);
384
385void cxl_context_events_pending(struct cxl_context *ctx,
386 unsigned int new_events)
387{
388 atomic_add(new_events, &ctx->afu_driver_events);
389 wake_up_all(&ctx->wq);
390}
391EXPORT_SYMBOL_GPL(cxl_context_events_pending);
392
326int cxl_start_work(struct cxl_context *ctx, 393int cxl_start_work(struct cxl_context *ctx,
327 struct cxl_ioctl_start_work *work) 394 struct cxl_ioctl_start_work *work)
328{ 395{
@@ -390,7 +457,106 @@ EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
390ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count) 457ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count)
391{ 458{
392 struct cxl_afu *afu = cxl_pci_to_afu(dev); 459 struct cxl_afu *afu = cxl_pci_to_afu(dev);
460 if (IS_ERR(afu))
461 return -ENODEV;
393 462
394 return cxl_ops->read_adapter_vpd(afu->adapter, buf, count); 463 return cxl_ops->read_adapter_vpd(afu->adapter, buf, count);
395} 464}
396EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd); 465EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd);
466
467int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs)
468{
469 struct cxl_afu *afu = cxl_pci_to_afu(dev);
470 if (IS_ERR(afu))
471 return -ENODEV;
472
473 if (irqs > afu->adapter->user_irqs)
474 return -EINVAL;
475
476 /* Limit user_irqs to prevent the user increasing this via sysfs */
477 afu->adapter->user_irqs = irqs;
478 afu->irqs_max = irqs;
479
480 return 0;
481}
482EXPORT_SYMBOL_GPL(cxl_set_max_irqs_per_process);
483
484int cxl_get_max_irqs_per_process(struct pci_dev *dev)
485{
486 struct cxl_afu *afu = cxl_pci_to_afu(dev);
487 if (IS_ERR(afu))
488 return -ENODEV;
489
490 return afu->irqs_max;
491}
492EXPORT_SYMBOL_GPL(cxl_get_max_irqs_per_process);
493
494/*
495 * This is a special interrupt allocation routine called from the PHB's MSI
496 * setup function. When capi interrupts are allocated in this manner they must
497 * still be associated with a running context, but since the MSI APIs have no
498 * way to specify this we use the default context associated with the device.
499 *
500 * The Mellanox CX4 has a hardware limitation that restricts the maximum AFU
501 * interrupt number, so in order to overcome this their driver informs us of
502 * the restriction by setting the maximum interrupts per context, and we
503 * allocate additional contexts as necessary so that we can keep the AFU
504 * interrupt number within the supported range.
505 */
506int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
507{
508 struct cxl_context *ctx, *new_ctx, *default_ctx;
509 int remaining;
510 int rc;
511
512 ctx = default_ctx = cxl_get_context(pdev);
513 if (WARN_ON(!default_ctx))
514 return -ENODEV;
515
516 remaining = nvec;
517 while (remaining > 0) {
518 rc = cxl_allocate_afu_irqs(ctx, min(remaining, ctx->afu->irqs_max));
519 if (rc) {
520 pr_warn("%s: Failed to find enough free MSIs\n", pci_name(pdev));
521 return rc;
522 }
523 remaining -= ctx->afu->irqs_max;
524
525 if (ctx != default_ctx && default_ctx->status == STARTED) {
526 WARN_ON(cxl_start_context(ctx,
527 be64_to_cpu(default_ctx->elem->common.wed),
528 NULL));
529 }
530
531 if (remaining > 0) {
532 new_ctx = cxl_dev_context_init(pdev);
533 if (!new_ctx) {
534 pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev));
535 return -ENOSPC;
536 }
537 list_add(&new_ctx->extra_irq_contexts, &ctx->extra_irq_contexts);
538 ctx = new_ctx;
539 }
540 }
541
542 return 0;
543}
544/* Exported via cxl_base */
545
546void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
547{
548 struct cxl_context *ctx, *pos, *tmp;
549
550 ctx = cxl_get_context(pdev);
551 if (WARN_ON(!ctx))
552 return;
553
554 cxl_free_afu_irqs(ctx);
555 list_for_each_entry_safe(pos, tmp, &ctx->extra_irq_contexts, extra_irq_contexts) {
556 cxl_stop_context(pos);
557 cxl_free_afu_irqs(pos);
558 list_del(&pos->extra_irq_contexts);
559 cxl_release_context(pos);
560 }
561}
562/* Exported via cxl_base */