aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/ocxl/context.c
diff options
context:
space:
mode:
authorFrederic Barrat <fbarrat@linux.vnet.ibm.com>2018-01-23 06:31:41 -0500
committerMichael Ellerman <mpe@ellerman.id.au>2018-01-23 19:42:58 -0500
commit5ef3166e8a32d78dfa985a323aa45ed485ff663a (patch)
treee1321e75dc2f802294f94d71aff7509057d01077 /drivers/misc/ocxl/context.c
parent2cb3d64b26984703a6bb80e66adcc3727ad37f9f (diff)
ocxl: Driver code for 'generic' opencapi devices
Add an ocxl driver to handle generic opencapi devices. Of course, it's not meant to be the only opencapi driver, any device is free to implement its own. But if a host application only needs basic services like attaching to an opencapi adapter, have translation faults handled or allocate AFU interrupts, it should suffice. The AFU config space must follow the opencapi specification and use the expected vendor/device ID to be seen by the generic driver. The driver exposes the device AFUs as a char device in /dev/ocxl/ Note that the driver currently doesn't handle memory attached to the opencapi device. Signed-off-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com> Signed-off-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com> Signed-off-by: Alastair D'Silva <alastair@d-silva.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'drivers/misc/ocxl/context.c')
-rw-r--r--drivers/misc/ocxl/context.c230
1 files changed, 230 insertions, 0 deletions
diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c
new file mode 100644
index 000000000000..b34b836f924c
--- /dev/null
+++ b/drivers/misc/ocxl/context.c
@@ -0,0 +1,230 @@
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright 2017 IBM Corp.
3#include <linux/sched/mm.h>
4#include "ocxl_internal.h"
5
6struct ocxl_context *ocxl_context_alloc(void)
7{
8 return kzalloc(sizeof(struct ocxl_context), GFP_KERNEL);
9}
10
11int ocxl_context_init(struct ocxl_context *ctx, struct ocxl_afu *afu,
12 struct address_space *mapping)
13{
14 int pasid;
15
16 ctx->afu = afu;
17 mutex_lock(&afu->contexts_lock);
18 pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base,
19 afu->pasid_base + afu->pasid_max, GFP_KERNEL);
20 if (pasid < 0) {
21 mutex_unlock(&afu->contexts_lock);
22 return pasid;
23 }
24 afu->pasid_count++;
25 mutex_unlock(&afu->contexts_lock);
26
27 ctx->pasid = pasid;
28 ctx->status = OPENED;
29 mutex_init(&ctx->status_mutex);
30 ctx->mapping = mapping;
31 mutex_init(&ctx->mapping_lock);
32 init_waitqueue_head(&ctx->events_wq);
33 mutex_init(&ctx->xsl_error_lock);
34 /*
35 * Keep a reference on the AFU to make sure it's valid for the
36 * duration of the life of the context
37 */
38 ocxl_afu_get(afu);
39 return 0;
40}
41
42/*
43 * Callback for when a translation fault triggers an error
44 * data: a pointer to the context which triggered the fault
45 * addr: the address that triggered the error
46 * dsisr: the value of the PPC64 dsisr register
47 */
48static void xsl_fault_error(void *data, u64 addr, u64 dsisr)
49{
50 struct ocxl_context *ctx = (struct ocxl_context *) data;
51
52 mutex_lock(&ctx->xsl_error_lock);
53 ctx->xsl_error.addr = addr;
54 ctx->xsl_error.dsisr = dsisr;
55 ctx->xsl_error.count++;
56 mutex_unlock(&ctx->xsl_error_lock);
57
58 wake_up_all(&ctx->events_wq);
59}
60
61int ocxl_context_attach(struct ocxl_context *ctx, u64 amr)
62{
63 int rc;
64
65 mutex_lock(&ctx->status_mutex);
66 if (ctx->status != OPENED) {
67 rc = -EIO;
68 goto out;
69 }
70
71 rc = ocxl_link_add_pe(ctx->afu->fn->link, ctx->pasid,
72 current->mm->context.id, 0, amr, current->mm,
73 xsl_fault_error, ctx);
74 if (rc)
75 goto out;
76
77 ctx->status = ATTACHED;
78out:
79 mutex_unlock(&ctx->status_mutex);
80 return rc;
81}
82
83static int map_pp_mmio(struct vm_area_struct *vma, unsigned long address,
84 u64 offset, struct ocxl_context *ctx)
85{
86 u64 pp_mmio_addr;
87 int pasid_off;
88
89 if (offset >= ctx->afu->config.pp_mmio_stride)
90 return VM_FAULT_SIGBUS;
91
92 mutex_lock(&ctx->status_mutex);
93 if (ctx->status != ATTACHED) {
94 mutex_unlock(&ctx->status_mutex);
95 pr_debug("%s: Context not attached, failing mmio mmap\n",
96 __func__);
97 return VM_FAULT_SIGBUS;
98 }
99
100 pasid_off = ctx->pasid - ctx->afu->pasid_base;
101 pp_mmio_addr = ctx->afu->pp_mmio_start +
102 pasid_off * ctx->afu->config.pp_mmio_stride +
103 offset;
104
105 vm_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT);
106 mutex_unlock(&ctx->status_mutex);
107 return VM_FAULT_NOPAGE;
108}
109
110static int ocxl_mmap_fault(struct vm_fault *vmf)
111{
112 struct vm_area_struct *vma = vmf->vma;
113 struct ocxl_context *ctx = vma->vm_file->private_data;
114 u64 offset;
115 int rc;
116
117 offset = vmf->pgoff << PAGE_SHIFT;
118 pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__,
119 ctx->pasid, vmf->address, offset);
120
121 rc = map_pp_mmio(vma, vmf->address, offset, ctx);
122 return rc;
123}
124
125static const struct vm_operations_struct ocxl_vmops = {
126 .fault = ocxl_mmap_fault,
127};
128
129static int check_mmap_mmio(struct ocxl_context *ctx,
130 struct vm_area_struct *vma)
131{
132 if ((vma_pages(vma) + vma->vm_pgoff) >
133 (ctx->afu->config.pp_mmio_stride >> PAGE_SHIFT))
134 return -EINVAL;
135 return 0;
136}
137
138int ocxl_context_mmap(struct ocxl_context *ctx, struct vm_area_struct *vma)
139{
140 int rc;
141
142 rc = check_mmap_mmio(ctx, vma);
143 if (rc)
144 return rc;
145
146 vma->vm_flags |= VM_IO | VM_PFNMAP;
147 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
148 vma->vm_ops = &ocxl_vmops;
149 return 0;
150}
151
152int ocxl_context_detach(struct ocxl_context *ctx)
153{
154 struct pci_dev *dev;
155 int afu_control_pos;
156 enum ocxl_context_status status;
157 int rc;
158
159 mutex_lock(&ctx->status_mutex);
160 status = ctx->status;
161 ctx->status = CLOSED;
162 mutex_unlock(&ctx->status_mutex);
163 if (status != ATTACHED)
164 return 0;
165
166 dev = to_pci_dev(ctx->afu->fn->dev.parent);
167 afu_control_pos = ctx->afu->config.dvsec_afu_control_pos;
168
169 mutex_lock(&ctx->afu->afu_control_lock);
170 rc = ocxl_config_terminate_pasid(dev, afu_control_pos, ctx->pasid);
171 mutex_unlock(&ctx->afu->afu_control_lock);
172 if (rc) {
173 /*
174 * If we timeout waiting for the AFU to terminate the
175 * pasid, then it's dangerous to clean up the Process
176 * Element entry in the SPA, as it may be referenced
177 * in the future by the AFU. In which case, we would
178 * checkstop because of an invalid PE access (FIR
179 * register 2, bit 42). So leave the PE
180 * defined. Caller shouldn't free the context so that
181 * PASID remains allocated.
182 *
183 * A link reset will be required to cleanup the AFU
184 * and the SPA.
185 */
186 if (rc == -EBUSY)
187 return rc;
188 }
189 rc = ocxl_link_remove_pe(ctx->afu->fn->link, ctx->pasid);
190 if (rc) {
191 dev_warn(&ctx->afu->dev,
192 "Couldn't remove PE entry cleanly: %d\n", rc);
193 }
194 return 0;
195}
196
197void ocxl_context_detach_all(struct ocxl_afu *afu)
198{
199 struct ocxl_context *ctx;
200 int tmp;
201
202 mutex_lock(&afu->contexts_lock);
203 idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
204 ocxl_context_detach(ctx);
205 /*
206 * We are force detaching - remove any active mmio
207 * mappings so userspace cannot interfere with the
208 * card if it comes back. Easiest way to exercise
209 * this is to unbind and rebind the driver via sysfs
210 * while it is in use.
211 */
212 mutex_lock(&ctx->mapping_lock);
213 if (ctx->mapping)
214 unmap_mapping_range(ctx->mapping, 0, 0, 1);
215 mutex_unlock(&ctx->mapping_lock);
216 }
217 mutex_unlock(&afu->contexts_lock);
218}
219
220void ocxl_context_free(struct ocxl_context *ctx)
221{
222 mutex_lock(&ctx->afu->contexts_lock);
223 ctx->afu->pasid_count--;
224 idr_remove(&ctx->afu->contexts_idr, ctx->pasid);
225 mutex_unlock(&ctx->afu->contexts_lock);
226
227 /* reference to the AFU taken in ocxl_context_init */
228 ocxl_afu_put(ctx->afu);
229 kfree(ctx);
230}