aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/cxl
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2015-09-01 09:35:24 -0400
committerJiri Kosina <jkosina@suse.cz>2015-09-01 09:35:24 -0400
commit067e2601d3c076abbf45db91261f9065eaa879b2 (patch)
tree86c8d4b913873dbd3b4ff23562a3a8597984b4df /drivers/misc/cxl
parent3e097d1271ecdff2f251a54ddfc5eaa1f9821e96 (diff)
parent931830aa5c251e0803523213428f777a48bde254 (diff)
Merge branch 'for-4.3/gembird' into for-linus
Diffstat (limited to 'drivers/misc/cxl')
-rw-r--r--drivers/misc/cxl/Kconfig5
-rw-r--r--drivers/misc/cxl/Makefile4
-rw-r--r--drivers/misc/cxl/api.c329
-rw-r--r--drivers/misc/cxl/base.c2
-rw-r--r--drivers/misc/cxl/context.c50
-rw-r--r--drivers/misc/cxl/cxl.h38
-rw-r--r--drivers/misc/cxl/fault.c34
-rw-r--r--drivers/misc/cxl/file.c48
-rw-r--r--drivers/misc/cxl/irq.c37
-rw-r--r--drivers/misc/cxl/main.c4
-rw-r--r--drivers/misc/cxl/native.c83
-rw-r--r--drivers/misc/cxl/pci.c131
-rw-r--r--drivers/misc/cxl/sysfs.c35
-rw-r--r--drivers/misc/cxl/vphb.c271
14 files changed, 946 insertions, 125 deletions
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
index a990b39b4dfb..b6db9ebd52c2 100644
--- a/drivers/misc/cxl/Kconfig
+++ b/drivers/misc/cxl/Kconfig
@@ -7,10 +7,15 @@ config CXL_BASE
7 default n 7 default n
8 select PPC_COPRO_BASE 8 select PPC_COPRO_BASE
9 9
10config CXL_KERNEL_API
11 bool
12 default n
13
10config CXL 14config CXL
11 tristate "Support for IBM Coherent Accelerators (CXL)" 15 tristate "Support for IBM Coherent Accelerators (CXL)"
12 depends on PPC_POWERNV && PCI_MSI 16 depends on PPC_POWERNV && PCI_MSI
13 select CXL_BASE 17 select CXL_BASE
18 select CXL_KERNEL_API
14 default m 19 default m
15 help 20 help
16 Select this option to enable driver support for IBM Coherent 21 Select this option to enable driver support for IBM Coherent
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index edb494d3ff27..14e3f8219a11 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -1,4 +1,6 @@
1cxl-y += main.o file.o irq.o fault.o native.o context.o sysfs.o debugfs.o pci.o trace.o 1cxl-y += main.o file.o irq.o fault.o native.o
2cxl-y += context.o sysfs.o debugfs.o pci.o trace.o
3cxl-y += vphb.o api.o
2obj-$(CONFIG_CXL) += cxl.o 4obj-$(CONFIG_CXL) += cxl.o
3obj-$(CONFIG_CXL_BASE) += base.o 5obj-$(CONFIG_CXL_BASE) += base.o
4 6
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
new file mode 100644
index 000000000000..729e0851167d
--- /dev/null
+++ b/drivers/misc/cxl/api.c
@@ -0,0 +1,329 @@
1/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/pci.h>
11#include <linux/slab.h>
12#include <linux/anon_inodes.h>
13#include <linux/file.h>
14#include <misc/cxl.h>
15
16#include "cxl.h"
17
18struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
19{
20 struct cxl_afu *afu;
21 struct cxl_context *ctx;
22 int rc;
23
24 afu = cxl_pci_to_afu(dev);
25
26 get_device(&afu->dev);
27 ctx = cxl_context_alloc();
28 if (IS_ERR(ctx))
29 return ctx;
30
31 /* Make it a slave context. We can promote it later? */
32 rc = cxl_context_init(ctx, afu, false, NULL);
33 if (rc) {
34 kfree(ctx);
35 put_device(&afu->dev);
36 return ERR_PTR(-ENOMEM);
37 }
38 cxl_assign_psn_space(ctx);
39
40 return ctx;
41}
42EXPORT_SYMBOL_GPL(cxl_dev_context_init);
43
44struct cxl_context *cxl_get_context(struct pci_dev *dev)
45{
46 return dev->dev.archdata.cxl_ctx;
47}
48EXPORT_SYMBOL_GPL(cxl_get_context);
49
50struct device *cxl_get_phys_dev(struct pci_dev *dev)
51{
52 struct cxl_afu *afu;
53
54 afu = cxl_pci_to_afu(dev);
55
56 return afu->adapter->dev.parent;
57}
58EXPORT_SYMBOL_GPL(cxl_get_phys_dev);
59
60int cxl_release_context(struct cxl_context *ctx)
61{
62 if (ctx->status != CLOSED)
63 return -EBUSY;
64
65 put_device(&ctx->afu->dev);
66
67 cxl_context_free(ctx);
68
69 return 0;
70}
71EXPORT_SYMBOL_GPL(cxl_release_context);
72
73int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
74{
75 if (num == 0)
76 num = ctx->afu->pp_irqs;
77 return afu_allocate_irqs(ctx, num);
78}
79EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
80
81void cxl_free_afu_irqs(struct cxl_context *ctx)
82{
83 cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
84}
85EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
86
87static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
88{
89 __u16 range;
90 int r;
91
92 WARN_ON(num == 0);
93
94 for (r = 0; r < CXL_IRQ_RANGES; r++) {
95 range = ctx->irqs.range[r];
96 if (num < range) {
97 return ctx->irqs.offset[r] + num;
98 }
99 num -= range;
100 }
101 return 0;
102}
103
104int cxl_map_afu_irq(struct cxl_context *ctx, int num,
105 irq_handler_t handler, void *cookie, char *name)
106{
107 irq_hw_number_t hwirq;
108
109 /*
110 * Find interrupt we are to register.
111 */
112 hwirq = cxl_find_afu_irq(ctx, num);
113 if (!hwirq)
114 return -ENOENT;
115
116 return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name);
117}
118EXPORT_SYMBOL_GPL(cxl_map_afu_irq);
119
120void cxl_unmap_afu_irq(struct cxl_context *ctx, int num, void *cookie)
121{
122 irq_hw_number_t hwirq;
123 unsigned int virq;
124
125 hwirq = cxl_find_afu_irq(ctx, num);
126 if (!hwirq)
127 return;
128
129 virq = irq_find_mapping(NULL, hwirq);
130 if (virq)
131 cxl_unmap_irq(virq, cookie);
132}
133EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq);
134
135/*
136 * Start a context
137 * Code here similar to afu_ioctl_start_work().
138 */
139int cxl_start_context(struct cxl_context *ctx, u64 wed,
140 struct task_struct *task)
141{
142 int rc = 0;
143 bool kernel = true;
144
145 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
146
147 mutex_lock(&ctx->status_mutex);
148 if (ctx->status == STARTED)
149 goto out; /* already started */
150
151 if (task) {
152 ctx->pid = get_task_pid(task, PIDTYPE_PID);
153 get_pid(ctx->pid);
154 kernel = false;
155 }
156
157 cxl_ctx_get();
158
159 if ((rc = cxl_attach_process(ctx, kernel, wed , 0))) {
160 put_pid(ctx->pid);
161 cxl_ctx_put();
162 goto out;
163 }
164
165 ctx->status = STARTED;
166out:
167 mutex_unlock(&ctx->status_mutex);
168 return rc;
169}
170EXPORT_SYMBOL_GPL(cxl_start_context);
171
172int cxl_process_element(struct cxl_context *ctx)
173{
174 return ctx->pe;
175}
176EXPORT_SYMBOL_GPL(cxl_process_element);
177
178/* Stop a context. Returns 0 on success, otherwise -Errno */
179int cxl_stop_context(struct cxl_context *ctx)
180{
181 return __detach_context(ctx);
182}
183EXPORT_SYMBOL_GPL(cxl_stop_context);
184
185void cxl_set_master(struct cxl_context *ctx)
186{
187 ctx->master = true;
188 cxl_assign_psn_space(ctx);
189}
190EXPORT_SYMBOL_GPL(cxl_set_master);
191
192/* wrappers around afu_* file ops which are EXPORTED */
193int cxl_fd_open(struct inode *inode, struct file *file)
194{
195 return afu_open(inode, file);
196}
197EXPORT_SYMBOL_GPL(cxl_fd_open);
198int cxl_fd_release(struct inode *inode, struct file *file)
199{
200 return afu_release(inode, file);
201}
202EXPORT_SYMBOL_GPL(cxl_fd_release);
203long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
204{
205 return afu_ioctl(file, cmd, arg);
206}
207EXPORT_SYMBOL_GPL(cxl_fd_ioctl);
208int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm)
209{
210 return afu_mmap(file, vm);
211}
212EXPORT_SYMBOL_GPL(cxl_fd_mmap);
213unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
214{
215 return afu_poll(file, poll);
216}
217EXPORT_SYMBOL_GPL(cxl_fd_poll);
218ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
219 loff_t *off)
220{
221 return afu_read(file, buf, count, off);
222}
223EXPORT_SYMBOL_GPL(cxl_fd_read);
224
225#define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME
226
227/* Get a struct file and fd for a context and attach the ops */
228struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
229 int *fd)
230{
231 struct file *file;
232 int rc, flags, fdtmp;
233
234 flags = O_RDWR | O_CLOEXEC;
235
236 /* This code is similar to anon_inode_getfd() */
237 rc = get_unused_fd_flags(flags);
238 if (rc < 0)
239 return ERR_PTR(rc);
240 fdtmp = rc;
241
242 /*
243 * Patch the file ops. Needs to be careful that this is rentrant safe.
244 */
245 if (fops) {
246 PATCH_FOPS(open);
247 PATCH_FOPS(poll);
248 PATCH_FOPS(read);
249 PATCH_FOPS(release);
250 PATCH_FOPS(unlocked_ioctl);
251 PATCH_FOPS(compat_ioctl);
252 PATCH_FOPS(mmap);
253 } else /* use default ops */
254 fops = (struct file_operations *)&afu_fops;
255
256 file = anon_inode_getfile("cxl", fops, ctx, flags);
257 if (IS_ERR(file))
258 put_unused_fd(fdtmp);
259 *fd = fdtmp;
260 return file;
261}
262EXPORT_SYMBOL_GPL(cxl_get_fd);
263
264struct cxl_context *cxl_fops_get_context(struct file *file)
265{
266 return file->private_data;
267}
268EXPORT_SYMBOL_GPL(cxl_fops_get_context);
269
270int cxl_start_work(struct cxl_context *ctx,
271 struct cxl_ioctl_start_work *work)
272{
273 int rc;
274
275 /* code taken from afu_ioctl_start_work */
276 if (!(work->flags & CXL_START_WORK_NUM_IRQS))
277 work->num_interrupts = ctx->afu->pp_irqs;
278 else if ((work->num_interrupts < ctx->afu->pp_irqs) ||
279 (work->num_interrupts > ctx->afu->irqs_max)) {
280 return -EINVAL;
281 }
282
283 rc = afu_register_irqs(ctx, work->num_interrupts);
284 if (rc)
285 return rc;
286
287 rc = cxl_start_context(ctx, work->work_element_descriptor, current);
288 if (rc < 0) {
289 afu_release_irqs(ctx, ctx);
290 return rc;
291 }
292
293 return 0;
294}
295EXPORT_SYMBOL_GPL(cxl_start_work);
296
297void __iomem *cxl_psa_map(struct cxl_context *ctx)
298{
299 struct cxl_afu *afu = ctx->afu;
300 int rc;
301
302 rc = cxl_afu_check_and_enable(afu);
303 if (rc)
304 return NULL;
305
306 pr_devel("%s: psn_phys%llx size:%llx\n",
307 __func__, afu->psn_phys, afu->adapter->ps_size);
308 return ioremap(ctx->psn_phys, ctx->psn_size);
309}
310EXPORT_SYMBOL_GPL(cxl_psa_map);
311
312void cxl_psa_unmap(void __iomem *addr)
313{
314 iounmap(addr);
315}
316EXPORT_SYMBOL_GPL(cxl_psa_unmap);
317
318int cxl_afu_reset(struct cxl_context *ctx)
319{
320 struct cxl_afu *afu = ctx->afu;
321 int rc;
322
323 rc = __cxl_afu_reset(afu);
324 if (rc)
325 return rc;
326
327 return cxl_afu_check_and_enable(afu);
328}
329EXPORT_SYMBOL_GPL(cxl_afu_reset);
diff --git a/drivers/misc/cxl/base.c b/drivers/misc/cxl/base.c
index 0654ad83675e..a9f0dd3255a2 100644
--- a/drivers/misc/cxl/base.c
+++ b/drivers/misc/cxl/base.c
@@ -10,7 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/rcupdate.h> 11#include <linux/rcupdate.h>
12#include <asm/errno.h> 12#include <asm/errno.h>
13#include <misc/cxl.h> 13#include <misc/cxl-base.h>
14#include "cxl.h" 14#include "cxl.h"
15 15
16/* protected by rcu */ 16/* protected by rcu */
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index d1b55fe62817..1287148629c0 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -113,11 +113,11 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
113 113
114 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { 114 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
115 area = ctx->afu->psn_phys; 115 area = ctx->afu->psn_phys;
116 if (offset > ctx->afu->adapter->ps_size) 116 if (offset >= ctx->afu->adapter->ps_size)
117 return VM_FAULT_SIGBUS; 117 return VM_FAULT_SIGBUS;
118 } else { 118 } else {
119 area = ctx->psn_phys; 119 area = ctx->psn_phys;
120 if (offset > ctx->psn_size) 120 if (offset >= ctx->psn_size)
121 return VM_FAULT_SIGBUS; 121 return VM_FAULT_SIGBUS;
122 } 122 }
123 123
@@ -145,8 +145,16 @@ static const struct vm_operations_struct cxl_mmap_vmops = {
145 */ 145 */
146int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) 146int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
147{ 147{
148 u64 start = vma->vm_pgoff << PAGE_SHIFT;
148 u64 len = vma->vm_end - vma->vm_start; 149 u64 len = vma->vm_end - vma->vm_start;
149 len = min(len, ctx->psn_size); 150
151 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
152 if (start + len > ctx->afu->adapter->ps_size)
153 return -EINVAL;
154 } else {
155 if (start + len > ctx->psn_size)
156 return -EINVAL;
157 }
150 158
151 if (ctx->afu->current_mode != CXL_MODE_DEDICATED) { 159 if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
152 /* make sure there is a valid per process space for this AFU */ 160 /* make sure there is a valid per process space for this AFU */
@@ -174,7 +182,7 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
174 * return until all outstanding interrupts for this context have completed. The 182 * return until all outstanding interrupts for this context have completed. The
175 * hardware should no longer access *ctx after this has returned. 183 * hardware should no longer access *ctx after this has returned.
176 */ 184 */
177static void __detach_context(struct cxl_context *ctx) 185int __detach_context(struct cxl_context *ctx)
178{ 186{
179 enum cxl_context_status status; 187 enum cxl_context_status status;
180 188
@@ -183,12 +191,13 @@ static void __detach_context(struct cxl_context *ctx)
183 ctx->status = CLOSED; 191 ctx->status = CLOSED;
184 mutex_unlock(&ctx->status_mutex); 192 mutex_unlock(&ctx->status_mutex);
185 if (status != STARTED) 193 if (status != STARTED)
186 return; 194 return -EBUSY;
187 195
188 WARN_ON(cxl_detach_process(ctx)); 196 WARN_ON(cxl_detach_process(ctx));
189 afu_release_irqs(ctx);
190 flush_work(&ctx->fault_work); /* Only needed for dedicated process */ 197 flush_work(&ctx->fault_work); /* Only needed for dedicated process */
191 wake_up_all(&ctx->wq); 198 put_pid(ctx->pid);
199 cxl_ctx_put();
200 return 0;
192} 201}
193 202
194/* 203/*
@@ -199,7 +208,14 @@ static void __detach_context(struct cxl_context *ctx)
199 */ 208 */
200void cxl_context_detach(struct cxl_context *ctx) 209void cxl_context_detach(struct cxl_context *ctx)
201{ 210{
202 __detach_context(ctx); 211 int rc;
212
213 rc = __detach_context(ctx);
214 if (rc)
215 return;
216
217 afu_release_irqs(ctx, ctx);
218 wake_up_all(&ctx->wq);
203} 219}
204 220
205/* 221/*
@@ -216,7 +232,7 @@ void cxl_context_detach_all(struct cxl_afu *afu)
216 * Anything done in here needs to be setup before the IDR is 232 * Anything done in here needs to be setup before the IDR is
217 * created and torn down after the IDR removed 233 * created and torn down after the IDR removed
218 */ 234 */
219 __detach_context(ctx); 235 cxl_context_detach(ctx);
220 236
221 /* 237 /*
222 * We are force detaching - remove any active PSA mappings so 238 * We are force detaching - remove any active PSA mappings so
@@ -232,16 +248,20 @@ void cxl_context_detach_all(struct cxl_afu *afu)
232 mutex_unlock(&afu->contexts_lock); 248 mutex_unlock(&afu->contexts_lock);
233} 249}
234 250
235void cxl_context_free(struct cxl_context *ctx) 251static void reclaim_ctx(struct rcu_head *rcu)
236{ 252{
237 mutex_lock(&ctx->afu->contexts_lock); 253 struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
238 idr_remove(&ctx->afu->contexts_idr, ctx->pe);
239 mutex_unlock(&ctx->afu->contexts_lock);
240 synchronize_rcu();
241 254
242 free_page((u64)ctx->sstp); 255 free_page((u64)ctx->sstp);
243 ctx->sstp = NULL; 256 ctx->sstp = NULL;
244 257
245 put_pid(ctx->pid);
246 kfree(ctx); 258 kfree(ctx);
247} 259}
260
261void cxl_context_free(struct cxl_context *ctx)
262{
263 mutex_lock(&ctx->afu->contexts_lock);
264 idr_remove(&ctx->afu->contexts_idr, ctx->pe);
265 mutex_unlock(&ctx->afu->contexts_lock);
266 call_rcu(&ctx->rcu, reclaim_ctx);
267}
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index a1cee4767ec6..4fd66cabde1e 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -18,10 +18,11 @@
18#include <linux/pid.h> 18#include <linux/pid.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/pci.h> 20#include <linux/pci.h>
21#include <linux/fs.h>
21#include <asm/cputable.h> 22#include <asm/cputable.h>
22#include <asm/mmu.h> 23#include <asm/mmu.h>
23#include <asm/reg.h> 24#include <asm/reg.h>
24#include <misc/cxl.h> 25#include <misc/cxl-base.h>
25 26
26#include <uapi/misc/cxl.h> 27#include <uapi/misc/cxl.h>
27 28
@@ -315,8 +316,6 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
315#define CXL_MAX_SLICES 4 316#define CXL_MAX_SLICES 4
316#define MAX_AFU_MMIO_REGS 3 317#define MAX_AFU_MMIO_REGS 3
317 318
318#define CXL_MODE_DEDICATED 0x1
319#define CXL_MODE_DIRECTED 0x2
320#define CXL_MODE_TIME_SLICED 0x4 319#define CXL_MODE_TIME_SLICED 0x4
321#define CXL_SUPPORTED_MODES (CXL_MODE_DEDICATED | CXL_MODE_DIRECTED) 320#define CXL_SUPPORTED_MODES (CXL_MODE_DEDICATED | CXL_MODE_DIRECTED)
322 321
@@ -362,6 +361,10 @@ struct cxl_afu {
362 struct mutex spa_mutex; 361 struct mutex spa_mutex;
363 spinlock_t afu_cntl_lock; 362 spinlock_t afu_cntl_lock;
364 363
364 /* AFU error buffer fields and bin attribute for sysfs */
365 u64 eb_len, eb_offset;
366 struct bin_attribute attr_eb;
367
365 /* 368 /*
366 * Only the first part of the SPA is used for the process element 369 * Only the first part of the SPA is used for the process element
367 * linked list. The only other part that software needs to worry about 370 * linked list. The only other part that software needs to worry about
@@ -375,6 +378,9 @@ struct cxl_afu {
375 int spa_max_procs; 378 int spa_max_procs;
376 unsigned int psl_virq; 379 unsigned int psl_virq;
377 380
381 /* pointer to the vphb */
382 struct pci_controller *phb;
383
378 int pp_irqs; 384 int pp_irqs;
379 int irqs_max; 385 int irqs_max;
380 int num_procs; 386 int num_procs;
@@ -455,6 +461,8 @@ struct cxl_context {
455 bool pending_irq; 461 bool pending_irq;
456 bool pending_fault; 462 bool pending_fault;
457 bool pending_afu_err; 463 bool pending_afu_err;
464
465 struct rcu_head rcu;
458}; 466};
459 467
460struct cxl { 468struct cxl {
@@ -563,6 +571,9 @@ static inline void __iomem *_cxl_p2n_addr(struct cxl_afu *afu, cxl_p2n_reg_t reg
563u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off); 571u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off);
564u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off); 572u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off);
565 573
574ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
575 loff_t off, size_t count);
576
566 577
567struct cxl_calls { 578struct cxl_calls {
568 void (*cxl_slbia)(struct mm_struct *mm); 579 void (*cxl_slbia)(struct mm_struct *mm);
@@ -606,7 +617,7 @@ void cxl_release_psl_err_irq(struct cxl *adapter);
606int cxl_register_serr_irq(struct cxl_afu *afu); 617int cxl_register_serr_irq(struct cxl_afu *afu);
607void cxl_release_serr_irq(struct cxl_afu *afu); 618void cxl_release_serr_irq(struct cxl_afu *afu);
608int afu_register_irqs(struct cxl_context *ctx, u32 count); 619int afu_register_irqs(struct cxl_context *ctx, u32 count);
609void afu_release_irqs(struct cxl_context *ctx); 620void afu_release_irqs(struct cxl_context *ctx, void *cookie);
610irqreturn_t cxl_slice_irq_err(int irq, void *data); 621irqreturn_t cxl_slice_irq_err(int irq, void *data);
611 622
612int cxl_debugfs_init(void); 623int cxl_debugfs_init(void);
@@ -629,6 +640,10 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
629 struct address_space *mapping); 640 struct address_space *mapping);
630void cxl_context_free(struct cxl_context *ctx); 641void cxl_context_free(struct cxl_context *ctx);
631int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma); 642int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma);
643unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
644 irq_handler_t handler, void *cookie, const char *name);
645void cxl_unmap_irq(unsigned int virq, void *cookie);
646int __detach_context(struct cxl_context *ctx);
632 647
633/* This matches the layout of the H_COLLECT_CA_INT_INFO retbuf */ 648/* This matches the layout of the H_COLLECT_CA_INT_INFO retbuf */
634struct cxl_irq_info { 649struct cxl_irq_info {
@@ -642,6 +657,7 @@ struct cxl_irq_info {
642 u64 padding[3]; /* to match the expected retbuf size for plpar_hcall9 */ 657 u64 padding[3]; /* to match the expected retbuf size for plpar_hcall9 */
643}; 658};
644 659
660void cxl_assign_psn_space(struct cxl_context *ctx);
645int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, 661int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed,
646 u64 amr); 662 u64 amr);
647int cxl_detach_process(struct cxl_context *ctx); 663int cxl_detach_process(struct cxl_context *ctx);
@@ -653,11 +669,23 @@ int cxl_check_error(struct cxl_afu *afu);
653int cxl_afu_slbia(struct cxl_afu *afu); 669int cxl_afu_slbia(struct cxl_afu *afu);
654int cxl_tlb_slb_invalidate(struct cxl *adapter); 670int cxl_tlb_slb_invalidate(struct cxl *adapter);
655int cxl_afu_disable(struct cxl_afu *afu); 671int cxl_afu_disable(struct cxl_afu *afu);
656int cxl_afu_reset(struct cxl_afu *afu); 672int __cxl_afu_reset(struct cxl_afu *afu);
673int cxl_afu_check_and_enable(struct cxl_afu *afu);
657int cxl_psl_purge(struct cxl_afu *afu); 674int cxl_psl_purge(struct cxl_afu *afu);
658 675
659void cxl_stop_trace(struct cxl *cxl); 676void cxl_stop_trace(struct cxl *cxl);
677int cxl_pci_vphb_add(struct cxl_afu *afu);
678void cxl_pci_vphb_remove(struct cxl_afu *afu);
660 679
661extern struct pci_driver cxl_pci_driver; 680extern struct pci_driver cxl_pci_driver;
681int afu_allocate_irqs(struct cxl_context *ctx, u32 count);
682
683int afu_open(struct inode *inode, struct file *file);
684int afu_release(struct inode *inode, struct file *file);
685long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
686int afu_mmap(struct file *file, struct vm_area_struct *vm);
687unsigned int afu_poll(struct file *file, struct poll_table_struct *poll);
688ssize_t afu_read(struct file *file, char __user *buf, size_t count, loff_t *off);
689extern const struct file_operations afu_fops;
662 690
663#endif 691#endif
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index 5286b8b704f5..25a5418c55cb 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -172,8 +172,8 @@ void cxl_handle_fault(struct work_struct *fault_work)
172 container_of(fault_work, struct cxl_context, fault_work); 172 container_of(fault_work, struct cxl_context, fault_work);
173 u64 dsisr = ctx->dsisr; 173 u64 dsisr = ctx->dsisr;
174 u64 dar = ctx->dar; 174 u64 dar = ctx->dar;
175 struct task_struct *task; 175 struct task_struct *task = NULL;
176 struct mm_struct *mm; 176 struct mm_struct *mm = NULL;
177 177
178 if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || 178 if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
179 cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar || 179 cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
@@ -194,17 +194,19 @@ void cxl_handle_fault(struct work_struct *fault_work)
194 pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. " 194 pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
195 "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar); 195 "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
196 196
197 if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { 197 if (!ctx->kernel) {
198 pr_devel("cxl_handle_fault unable to get task %i\n", 198 if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
199 pid_nr(ctx->pid)); 199 pr_devel("cxl_handle_fault unable to get task %i\n",
200 cxl_ack_ae(ctx); 200 pid_nr(ctx->pid));
201 return; 201 cxl_ack_ae(ctx);
202 } 202 return;
203 if (!(mm = get_task_mm(task))) { 203 }
204 pr_devel("cxl_handle_fault unable to get mm %i\n", 204 if (!(mm = get_task_mm(task))) {
205 pid_nr(ctx->pid)); 205 pr_devel("cxl_handle_fault unable to get mm %i\n",
206 cxl_ack_ae(ctx); 206 pid_nr(ctx->pid));
207 goto out; 207 cxl_ack_ae(ctx);
208 goto out;
209 }
208 } 210 }
209 211
210 if (dsisr & CXL_PSL_DSISR_An_DS) 212 if (dsisr & CXL_PSL_DSISR_An_DS)
@@ -214,9 +216,11 @@ void cxl_handle_fault(struct work_struct *fault_work)
214 else 216 else
215 WARN(1, "cxl_handle_fault has nothing to handle\n"); 217 WARN(1, "cxl_handle_fault has nothing to handle\n");
216 218
217 mmput(mm); 219 if (mm)
220 mmput(mm);
218out: 221out:
219 put_task_struct(task); 222 if (task)
223 put_task_struct(task);
220} 224}
221 225
222static void cxl_prefault_one(struct cxl_context *ctx, u64 ea) 226static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 2364bcadb9a9..e3f4b69527a9 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -96,7 +96,8 @@ err_put_adapter:
96 put_device(&adapter->dev); 96 put_device(&adapter->dev);
97 return rc; 97 return rc;
98} 98}
99static int afu_open(struct inode *inode, struct file *file) 99
100int afu_open(struct inode *inode, struct file *file)
100{ 101{
101 return __afu_open(inode, file, false); 102 return __afu_open(inode, file, false);
102} 103}
@@ -106,7 +107,7 @@ static int afu_master_open(struct inode *inode, struct file *file)
106 return __afu_open(inode, file, true); 107 return __afu_open(inode, file, true);
107} 108}
108 109
109static int afu_release(struct inode *inode, struct file *file) 110int afu_release(struct inode *inode, struct file *file)
110{ 111{
111 struct cxl_context *ctx = file->private_data; 112 struct cxl_context *ctx = file->private_data;
112 113
@@ -128,7 +129,6 @@ static int afu_release(struct inode *inode, struct file *file)
128 */ 129 */
129 cxl_context_free(ctx); 130 cxl_context_free(ctx);
130 131
131 cxl_ctx_put();
132 return 0; 132 return 0;
133} 133}
134 134
@@ -191,7 +191,7 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
191 191
192 if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor, 192 if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor,
193 amr))) { 193 amr))) {
194 afu_release_irqs(ctx); 194 afu_release_irqs(ctx, ctx);
195 goto out; 195 goto out;
196 } 196 }
197 197
@@ -212,7 +212,26 @@ static long afu_ioctl_process_element(struct cxl_context *ctx,
212 return 0; 212 return 0;
213} 213}
214 214
215static long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 215static long afu_ioctl_get_afu_id(struct cxl_context *ctx,
216 struct cxl_afu_id __user *upafuid)
217{
218 struct cxl_afu_id afuid = { 0 };
219
220 afuid.card_id = ctx->afu->adapter->adapter_num;
221 afuid.afu_offset = ctx->afu->slice;
222 afuid.afu_mode = ctx->afu->current_mode;
223
224 /* set the flag bit in case the afu is a slave */
225 if (ctx->afu->current_mode == CXL_MODE_DIRECTED && !ctx->master)
226 afuid.flags |= CXL_AFUID_FLAG_SLAVE;
227
228 if (copy_to_user(upafuid, &afuid, sizeof(afuid)))
229 return -EFAULT;
230
231 return 0;
232}
233
234long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
216{ 235{
217 struct cxl_context *ctx = file->private_data; 236 struct cxl_context *ctx = file->private_data;
218 237
@@ -225,17 +244,20 @@ static long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
225 return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg); 244 return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg);
226 case CXL_IOCTL_GET_PROCESS_ELEMENT: 245 case CXL_IOCTL_GET_PROCESS_ELEMENT:
227 return afu_ioctl_process_element(ctx, (__u32 __user *)arg); 246 return afu_ioctl_process_element(ctx, (__u32 __user *)arg);
247 case CXL_IOCTL_GET_AFU_ID:
248 return afu_ioctl_get_afu_id(ctx, (struct cxl_afu_id __user *)
249 arg);
228 } 250 }
229 return -EINVAL; 251 return -EINVAL;
230} 252}
231 253
232static long afu_compat_ioctl(struct file *file, unsigned int cmd, 254long afu_compat_ioctl(struct file *file, unsigned int cmd,
233 unsigned long arg) 255 unsigned long arg)
234{ 256{
235 return afu_ioctl(file, cmd, arg); 257 return afu_ioctl(file, cmd, arg);
236} 258}
237 259
238static int afu_mmap(struct file *file, struct vm_area_struct *vm) 260int afu_mmap(struct file *file, struct vm_area_struct *vm)
239{ 261{
240 struct cxl_context *ctx = file->private_data; 262 struct cxl_context *ctx = file->private_data;
241 263
@@ -246,7 +268,7 @@ static int afu_mmap(struct file *file, struct vm_area_struct *vm)
246 return cxl_context_iomap(ctx, vm); 268 return cxl_context_iomap(ctx, vm);
247} 269}
248 270
249static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll) 271unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
250{ 272{
251 struct cxl_context *ctx = file->private_data; 273 struct cxl_context *ctx = file->private_data;
252 int mask = 0; 274 int mask = 0;
@@ -278,7 +300,7 @@ static inline int ctx_event_pending(struct cxl_context *ctx)
278 ctx->pending_afu_err || (ctx->status == CLOSED)); 300 ctx->pending_afu_err || (ctx->status == CLOSED));
279} 301}
280 302
281static ssize_t afu_read(struct file *file, char __user *buf, size_t count, 303ssize_t afu_read(struct file *file, char __user *buf, size_t count,
282 loff_t *off) 304 loff_t *off)
283{ 305{
284 struct cxl_context *ctx = file->private_data; 306 struct cxl_context *ctx = file->private_data;
@@ -359,7 +381,11 @@ out:
359 return rc; 381 return rc;
360} 382}
361 383
362static const struct file_operations afu_fops = { 384/*
385 * Note: if this is updated, we need to update api.c to patch the new ones in
386 * too
387 */
388const struct file_operations afu_fops = {
363 .owner = THIS_MODULE, 389 .owner = THIS_MODULE,
364 .open = afu_open, 390 .open = afu_open,
365 .poll = afu_poll, 391 .poll = afu_poll,
@@ -370,7 +396,7 @@ static const struct file_operations afu_fops = {
370 .mmap = afu_mmap, 396 .mmap = afu_mmap,
371}; 397};
372 398
373static const struct file_operations afu_master_fops = { 399const struct file_operations afu_master_fops = {
374 .owner = THIS_MODULE, 400 .owner = THIS_MODULE,
375 .open = afu_master_open, 401 .open = afu_master_open,
376 .poll = afu_poll, 402 .poll = afu_poll,
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index c8929c526691..680cd263436d 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -14,7 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/pid.h> 15#include <linux/pid.h>
16#include <asm/cputable.h> 16#include <asm/cputable.h>
17#include <misc/cxl.h> 17#include <misc/cxl-base.h>
18 18
19#include "cxl.h" 19#include "cxl.h"
20#include "trace.h" 20#include "trace.h"
@@ -416,9 +416,8 @@ void afu_irq_name_free(struct cxl_context *ctx)
416 } 416 }
417} 417}
418 418
419int afu_register_irqs(struct cxl_context *ctx, u32 count) 419int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
420{ 420{
421 irq_hw_number_t hwirq;
422 int rc, r, i, j = 1; 421 int rc, r, i, j = 1;
423 struct cxl_irq_name *irq_name; 422 struct cxl_irq_name *irq_name;
424 423
@@ -458,6 +457,18 @@ int afu_register_irqs(struct cxl_context *ctx, u32 count)
458 j++; 457 j++;
459 } 458 }
460 } 459 }
460 return 0;
461
462out:
463 afu_irq_name_free(ctx);
464 return -ENOMEM;
465}
466
467void afu_register_hwirqs(struct cxl_context *ctx)
468{
469 irq_hw_number_t hwirq;
470 struct cxl_irq_name *irq_name;
471 int r,i;
461 472
462 /* We've allocated all memory now, so let's do the irq allocations */ 473 /* We've allocated all memory now, so let's do the irq allocations */
463 irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list); 474 irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list);
@@ -469,15 +480,21 @@ int afu_register_irqs(struct cxl_context *ctx, u32 count)
469 irq_name = list_next_entry(irq_name, list); 480 irq_name = list_next_entry(irq_name, list);
470 } 481 }
471 } 482 }
483}
472 484
473 return 0; 485int afu_register_irqs(struct cxl_context *ctx, u32 count)
486{
487 int rc;
474 488
475out: 489 rc = afu_allocate_irqs(ctx, count);
476 afu_irq_name_free(ctx); 490 if (rc)
477 return -ENOMEM; 491 return rc;
478} 492
493 afu_register_hwirqs(ctx);
494 return 0;
495 }
479 496
480void afu_release_irqs(struct cxl_context *ctx) 497void afu_release_irqs(struct cxl_context *ctx, void *cookie)
481{ 498{
482 irq_hw_number_t hwirq; 499 irq_hw_number_t hwirq;
483 unsigned int virq; 500 unsigned int virq;
@@ -488,7 +505,7 @@ void afu_release_irqs(struct cxl_context *ctx)
488 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 505 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
489 virq = irq_find_mapping(NULL, hwirq); 506 virq = irq_find_mapping(NULL, hwirq);
490 if (virq) 507 if (virq)
491 cxl_unmap_irq(virq, ctx); 508 cxl_unmap_irq(virq, cookie);
492 } 509 }
493 } 510 }
494 511
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index 8ccddceead66..4a164ab8b35a 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -20,7 +20,7 @@
20#include <linux/idr.h> 20#include <linux/idr.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <asm/cputable.h> 22#include <asm/cputable.h>
23#include <misc/cxl.h> 23#include <misc/cxl-base.h>
24 24
25#include "cxl.h" 25#include "cxl.h"
26#include "trace.h" 26#include "trace.h"
@@ -73,7 +73,7 @@ static inline void cxl_slbia_core(struct mm_struct *mm)
73 spin_lock(&adapter->afu_list_lock); 73 spin_lock(&adapter->afu_list_lock);
74 for (slice = 0; slice < adapter->slices; slice++) { 74 for (slice = 0; slice < adapter->slices; slice++) {
75 afu = adapter->afu[slice]; 75 afu = adapter->afu[slice];
76 if (!afu->enabled) 76 if (!afu || !afu->enabled)
77 continue; 77 continue;
78 rcu_read_lock(); 78 rcu_read_lock();
79 idr_for_each_entry(&afu->contexts_idr, ctx, id) 79 idr_for_each_entry(&afu->contexts_idr, ctx, id)
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 29185fc61276..10567f245818 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -15,7 +15,7 @@
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <asm/synch.h> 17#include <asm/synch.h>
18#include <misc/cxl.h> 18#include <misc/cxl-base.h>
19 19
20#include "cxl.h" 20#include "cxl.h"
21#include "trace.h" 21#include "trace.h"
@@ -73,7 +73,7 @@ int cxl_afu_disable(struct cxl_afu *afu)
73} 73}
74 74
75/* This will disable as well as reset */ 75/* This will disable as well as reset */
76int cxl_afu_reset(struct cxl_afu *afu) 76int __cxl_afu_reset(struct cxl_afu *afu)
77{ 77{
78 pr_devel("AFU reset request\n"); 78 pr_devel("AFU reset request\n");
79 79
@@ -83,7 +83,7 @@ int cxl_afu_reset(struct cxl_afu *afu)
83 false); 83 false);
84} 84}
85 85
86static int afu_check_and_enable(struct cxl_afu *afu) 86int cxl_afu_check_and_enable(struct cxl_afu *afu)
87{ 87{
88 if (afu->enabled) 88 if (afu->enabled)
89 return 0; 89 return 0;
@@ -379,7 +379,7 @@ static int remove_process_element(struct cxl_context *ctx)
379} 379}
380 380
381 381
382static void assign_psn_space(struct cxl_context *ctx) 382void cxl_assign_psn_space(struct cxl_context *ctx)
383{ 383{
384 if (!ctx->afu->pp_size || ctx->master) { 384 if (!ctx->afu->pp_size || ctx->master) {
385 ctx->psn_phys = ctx->afu->psn_phys; 385 ctx->psn_phys = ctx->afu->psn_phys;
@@ -430,34 +430,46 @@ err:
430#define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE)) 430#define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
431#endif 431#endif
432 432
433static u64 calculate_sr(struct cxl_context *ctx)
434{
435 u64 sr = 0;
436
437 if (ctx->master)
438 sr |= CXL_PSL_SR_An_MP;
439 if (mfspr(SPRN_LPCR) & LPCR_TC)
440 sr |= CXL_PSL_SR_An_TC;
441 if (ctx->kernel) {
442 sr |= CXL_PSL_SR_An_R | (mfmsr() & MSR_SF);
443 sr |= CXL_PSL_SR_An_HV;
444 } else {
445 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
446 set_endian(sr);
447 sr &= ~(CXL_PSL_SR_An_HV);
448 if (!test_tsk_thread_flag(current, TIF_32BIT))
449 sr |= CXL_PSL_SR_An_SF;
450 }
451 return sr;
452}
453
433static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) 454static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
434{ 455{
435 u64 sr; 456 u32 pid;
436 int r, result; 457 int r, result;
437 458
438 assign_psn_space(ctx); 459 cxl_assign_psn_space(ctx);
439 460
440 ctx->elem->ctxtime = 0; /* disable */ 461 ctx->elem->ctxtime = 0; /* disable */
441 ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); 462 ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
442 ctx->elem->haurp = 0; /* disable */ 463 ctx->elem->haurp = 0; /* disable */
443 ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1)); 464 ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1));
444 465
445 sr = 0; 466 pid = current->pid;
446 if (ctx->master) 467 if (ctx->kernel)
447 sr |= CXL_PSL_SR_An_MP; 468 pid = 0;
448 if (mfspr(SPRN_LPCR) & LPCR_TC)
449 sr |= CXL_PSL_SR_An_TC;
450 /* HV=0, PR=1, R=1 for userspace
451 * For kernel contexts: this would need to change
452 */
453 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
454 set_endian(sr);
455 sr &= ~(CXL_PSL_SR_An_HV);
456 if (!test_tsk_thread_flag(current, TIF_32BIT))
457 sr |= CXL_PSL_SR_An_SF;
458 ctx->elem->common.pid = cpu_to_be32(current->pid);
459 ctx->elem->common.tid = 0; 469 ctx->elem->common.tid = 0;
460 ctx->elem->sr = cpu_to_be64(sr); 470 ctx->elem->common.pid = cpu_to_be32(pid);
471
472 ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
461 473
462 ctx->elem->common.csrp = 0; /* disable */ 474 ctx->elem->common.csrp = 0; /* disable */
463 ctx->elem->common.aurp0 = 0; /* disable */ 475 ctx->elem->common.aurp0 = 0; /* disable */
@@ -477,7 +489,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
477 ctx->elem->common.wed = cpu_to_be64(wed); 489 ctx->elem->common.wed = cpu_to_be64(wed);
478 490
479 /* first guy needs to enable */ 491 /* first guy needs to enable */
480 if ((result = afu_check_and_enable(ctx->afu))) 492 if ((result = cxl_afu_check_and_enable(ctx->afu)))
481 return result; 493 return result;
482 494
483 add_process_element(ctx); 495 add_process_element(ctx);
@@ -495,7 +507,7 @@ static int deactivate_afu_directed(struct cxl_afu *afu)
495 cxl_sysfs_afu_m_remove(afu); 507 cxl_sysfs_afu_m_remove(afu);
496 cxl_chardev_afu_remove(afu); 508 cxl_chardev_afu_remove(afu);
497 509
498 cxl_afu_reset(afu); 510 __cxl_afu_reset(afu);
499 cxl_afu_disable(afu); 511 cxl_afu_disable(afu);
500 cxl_psl_purge(afu); 512 cxl_psl_purge(afu);
501 513
@@ -530,20 +542,15 @@ static int activate_dedicated_process(struct cxl_afu *afu)
530static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr) 542static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
531{ 543{
532 struct cxl_afu *afu = ctx->afu; 544 struct cxl_afu *afu = ctx->afu;
533 u64 sr; 545 u64 pid;
534 int rc; 546 int rc;
535 547
536 sr = 0; 548 pid = (u64)current->pid << 32;
537 set_endian(sr); 549 if (ctx->kernel)
538 if (ctx->master) 550 pid = 0;
539 sr |= CXL_PSL_SR_An_MP; 551 cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid);
540 if (mfspr(SPRN_LPCR) & LPCR_TC) 552
541 sr |= CXL_PSL_SR_An_TC; 553 cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx));
542 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
543 if (!test_tsk_thread_flag(current, TIF_32BIT))
544 sr |= CXL_PSL_SR_An_SF;
545 cxl_p2n_write(afu, CXL_PSL_PID_TID_An, (u64)current->pid << 32);
546 cxl_p1n_write(afu, CXL_PSL_SR_An, sr);
547 554
548 if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1))) 555 if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
549 return rc; 556 return rc;
@@ -564,9 +571,9 @@ static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
564 cxl_p2n_write(afu, CXL_PSL_AMR_An, amr); 571 cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
565 572
566 /* master only context for dedicated */ 573 /* master only context for dedicated */
567 assign_psn_space(ctx); 574 cxl_assign_psn_space(ctx);
568 575
569 if ((rc = cxl_afu_reset(afu))) 576 if ((rc = __cxl_afu_reset(afu)))
570 return rc; 577 return rc;
571 578
572 cxl_p2n_write(afu, CXL_PSL_WED_An, wed); 579 cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
@@ -629,7 +636,7 @@ int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
629 636
630static inline int detach_process_native_dedicated(struct cxl_context *ctx) 637static inline int detach_process_native_dedicated(struct cxl_context *ctx)
631{ 638{
632 cxl_afu_reset(ctx->afu); 639 __cxl_afu_reset(ctx->afu);
633 cxl_afu_disable(ctx->afu); 640 cxl_afu_disable(ctx->afu);
634 cxl_psl_purge(ctx->afu); 641 cxl_psl_purge(ctx->afu);
635 return 0; 642 return 0;
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 1ef01647265f..32ad09705949 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -90,6 +90,7 @@
90/* This works a little different than the p1/p2 register accesses to make it 90/* This works a little different than the p1/p2 register accesses to make it
91 * easier to pull out individual fields */ 91 * easier to pull out individual fields */
92#define AFUD_READ(afu, off) in_be64(afu->afu_desc_mmio + off) 92#define AFUD_READ(afu, off) in_be64(afu->afu_desc_mmio + off)
93#define AFUD_READ_LE(afu, off) in_le64(afu->afu_desc_mmio + off)
93#define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit))) 94#define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit)))
94#define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be)) 95#define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
95 96
@@ -204,7 +205,7 @@ static void dump_cxl_config_space(struct pci_dev *dev)
204 dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n", 205 dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n",
205 p1_base(dev), p1_size(dev)); 206 p1_base(dev), p1_size(dev));
206 dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n", 207 dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n",
207 p1_base(dev), p2_size(dev)); 208 p2_base(dev), p2_size(dev));
208 dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n", 209 dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n",
209 pci_resource_start(dev, 4), pci_resource_len(dev, 4)); 210 pci_resource_start(dev, 4), pci_resource_len(dev, 4));
210 211
@@ -286,7 +287,8 @@ static void dump_cxl_config_space(struct pci_dev *dev)
286 287
287static void dump_afu_descriptor(struct cxl_afu *afu) 288static void dump_afu_descriptor(struct cxl_afu *afu)
288{ 289{
289 u64 val; 290 u64 val, afu_cr_num, afu_cr_off, afu_cr_len;
291 int i;
290 292
291#define show_reg(name, what) \ 293#define show_reg(name, what) \
292 dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what) 294 dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what)
@@ -296,6 +298,7 @@ static void dump_afu_descriptor(struct cxl_afu *afu)
296 show_reg("num_of_processes", AFUD_NUM_PROCS(val)); 298 show_reg("num_of_processes", AFUD_NUM_PROCS(val));
297 show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val)); 299 show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val));
298 show_reg("req_prog_mode", val & 0xffffULL); 300 show_reg("req_prog_mode", val & 0xffffULL);
301 afu_cr_num = AFUD_NUM_CRS(val);
299 302
300 val = AFUD_READ(afu, 0x8); 303 val = AFUD_READ(afu, 0x8);
301 show_reg("Reserved", val); 304 show_reg("Reserved", val);
@@ -307,8 +310,10 @@ static void dump_afu_descriptor(struct cxl_afu *afu)
307 val = AFUD_READ_CR(afu); 310 val = AFUD_READ_CR(afu);
308 show_reg("Reserved", (val >> (63-7)) & 0xff); 311 show_reg("Reserved", (val >> (63-7)) & 0xff);
309 show_reg("AFU_CR_len", AFUD_CR_LEN(val)); 312 show_reg("AFU_CR_len", AFUD_CR_LEN(val));
313 afu_cr_len = AFUD_CR_LEN(val) * 256;
310 314
311 val = AFUD_READ_CR_OFF(afu); 315 val = AFUD_READ_CR_OFF(afu);
316 afu_cr_off = val;
312 show_reg("AFU_CR_offset", val); 317 show_reg("AFU_CR_offset", val);
313 318
314 val = AFUD_READ_PPPSA(afu); 319 val = AFUD_READ_PPPSA(afu);
@@ -325,6 +330,11 @@ static void dump_afu_descriptor(struct cxl_afu *afu)
325 val = AFUD_READ_EB_OFF(afu); 330 val = AFUD_READ_EB_OFF(afu);
326 show_reg("AFU_EB_offset", val); 331 show_reg("AFU_EB_offset", val);
327 332
333 for (i = 0; i < afu_cr_num; i++) {
334 val = AFUD_READ_LE(afu, afu_cr_off + i * afu_cr_len);
335 show_reg("CR Vendor", val & 0xffff);
336 show_reg("CR Device", (val >> 16) & 0xffff);
337 }
328#undef show_reg 338#undef show_reg
329} 339}
330 340
@@ -529,7 +539,7 @@ err:
529 539
530static void cxl_unmap_slice_regs(struct cxl_afu *afu) 540static void cxl_unmap_slice_regs(struct cxl_afu *afu)
531{ 541{
532 if (afu->p1n_mmio) 542 if (afu->p2n_mmio)
533 iounmap(afu->p2n_mmio); 543 iounmap(afu->p2n_mmio);
534 if (afu->p1n_mmio) 544 if (afu->p1n_mmio)
535 iounmap(afu->p1n_mmio); 545 iounmap(afu->p1n_mmio);
@@ -593,6 +603,22 @@ static int cxl_read_afu_descriptor(struct cxl_afu *afu)
593 afu->crs_len = AFUD_CR_LEN(val) * 256; 603 afu->crs_len = AFUD_CR_LEN(val) * 256;
594 afu->crs_offset = AFUD_READ_CR_OFF(afu); 604 afu->crs_offset = AFUD_READ_CR_OFF(afu);
595 605
606
607 /* eb_len is in multiple of 4K */
608 afu->eb_len = AFUD_EB_LEN(AFUD_READ_EB(afu)) * 4096;
609 afu->eb_offset = AFUD_READ_EB_OFF(afu);
610
611 /* eb_off is 4K aligned so lower 12 bits are always zero */
612 if (EXTRACT_PPC_BITS(afu->eb_offset, 0, 11) != 0) {
613 dev_warn(&afu->dev,
614 "Invalid AFU error buffer offset %Lx\n",
615 afu->eb_offset);
616 dev_info(&afu->dev,
617 "Ignoring AFU error buffer in the descriptor\n");
618 /* indicate that no afu buffer exists */
619 afu->eb_len = 0;
620 }
621
596 return 0; 622 return 0;
597} 623}
598 624
@@ -631,7 +657,7 @@ static int sanitise_afu_regs(struct cxl_afu *afu)
631 reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 657 reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
632 if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { 658 if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
633 dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#.16llx\n", reg); 659 dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#.16llx\n", reg);
634 if (cxl_afu_reset(afu)) 660 if (__cxl_afu_reset(afu))
635 return -EIO; 661 return -EIO;
636 if (cxl_afu_disable(afu)) 662 if (cxl_afu_disable(afu))
637 return -EIO; 663 return -EIO;
@@ -672,6 +698,50 @@ static int sanitise_afu_regs(struct cxl_afu *afu)
672 return 0; 698 return 0;
673} 699}
674 700
701#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
702/*
703 * afu_eb_read:
704 * Called from sysfs and reads the afu error info buffer. The h/w only supports
705 * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte
706 * aligned the function uses a bounce buffer which can be max PAGE_SIZE.
707 */
708ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
709 loff_t off, size_t count)
710{
711 loff_t aligned_start, aligned_end;
712 size_t aligned_length;
713 void *tbuf;
714 const void __iomem *ebuf = afu->afu_desc_mmio + afu->eb_offset;
715
716 if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
717 return 0;
718
719 /* calculate aligned read window */
720 count = min((size_t)(afu->eb_len - off), count);
721 aligned_start = round_down(off, 8);
722 aligned_end = round_up(off + count, 8);
723 aligned_length = aligned_end - aligned_start;
724
725 /* max we can copy in one read is PAGE_SIZE */
726 if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) {
727 aligned_length = ERR_BUFF_MAX_COPY_SIZE;
728 count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
729 }
730
731 /* use bounce buffer for copy */
732 tbuf = (void *)__get_free_page(GFP_TEMPORARY);
733 if (!tbuf)
734 return -ENOMEM;
735
736 /* perform aligned read from the mmio region */
737 memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length);
738 memcpy(buf, tbuf + (off & 0x7), count);
739
740 free_page((unsigned long)tbuf);
741
742 return count;
743}
744
675static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) 745static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
676{ 746{
677 struct cxl_afu *afu; 747 struct cxl_afu *afu;
@@ -691,7 +761,7 @@ static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
691 goto err2; 761 goto err2;
692 762
693 /* We need to reset the AFU before we can read the AFU descriptor */ 763 /* We need to reset the AFU before we can read the AFU descriptor */
694 if ((rc = cxl_afu_reset(afu))) 764 if ((rc = __cxl_afu_reset(afu)))
695 goto err2; 765 goto err2;
696 766
697 if (cxl_verbose) 767 if (cxl_verbose)
@@ -731,6 +801,9 @@ static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
731 801
732 adapter->afu[afu->slice] = afu; 802 adapter->afu[afu->slice] = afu;
733 803
804 if ((rc = cxl_pci_vphb_add(afu)))
805 dev_info(&afu->dev, "Can't register vPHB\n");
806
734 return 0; 807 return 0;
735 808
736err_put2: 809err_put2:
@@ -783,8 +856,10 @@ int cxl_reset(struct cxl *adapter)
783 856
784 dev_info(&dev->dev, "CXL reset\n"); 857 dev_info(&dev->dev, "CXL reset\n");
785 858
786 for (i = 0; i < adapter->slices; i++) 859 for (i = 0; i < adapter->slices; i++) {
860 cxl_pci_vphb_remove(adapter->afu[i]);
787 cxl_remove_afu(adapter->afu[i]); 861 cxl_remove_afu(adapter->afu[i]);
862 }
788 863
789 /* pcie_warm_reset requests a fundamental pci reset which includes a 864 /* pcie_warm_reset requests a fundamental pci reset which includes a
790 * PERST assert/deassert. PERST triggers a loading of the image 865 * PERST assert/deassert. PERST triggers a loading of the image
@@ -857,13 +932,13 @@ static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
857 u8 image_state; 932 u8 image_state;
858 933
859 if (!(vsec = find_cxl_vsec(dev))) { 934 if (!(vsec = find_cxl_vsec(dev))) {
860 dev_err(&adapter->dev, "ABORTING: CXL VSEC not found!\n"); 935 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
861 return -ENODEV; 936 return -ENODEV;
862 } 937 }
863 938
864 CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen); 939 CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen);
865 if (vseclen < CXL_VSEC_MIN_SIZE) { 940 if (vseclen < CXL_VSEC_MIN_SIZE) {
866 pr_err("ABORTING: CXL VSEC too short\n"); 941 dev_err(&dev->dev, "ABORTING: CXL VSEC too short\n");
867 return -EINVAL; 942 return -EINVAL;
868 } 943 }
869 944
@@ -902,24 +977,24 @@ static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
902 return -EBUSY; 977 return -EBUSY;
903 978
904 if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) { 979 if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) {
905 dev_err(&adapter->dev, "ABORTING: CXL requires unsupported features\n"); 980 dev_err(&dev->dev, "ABORTING: CXL requires unsupported features\n");
906 return -EINVAL; 981 return -EINVAL;
907 } 982 }
908 983
909 if (!adapter->slices) { 984 if (!adapter->slices) {
910 /* Once we support dynamic reprogramming we can use the card if 985 /* Once we support dynamic reprogramming we can use the card if
911 * it supports loadable AFUs */ 986 * it supports loadable AFUs */
912 dev_err(&adapter->dev, "ABORTING: Device has no AFUs\n"); 987 dev_err(&dev->dev, "ABORTING: Device has no AFUs\n");
913 return -EINVAL; 988 return -EINVAL;
914 } 989 }
915 990
916 if (!adapter->afu_desc_off || !adapter->afu_desc_size) { 991 if (!adapter->afu_desc_off || !adapter->afu_desc_size) {
917 dev_err(&adapter->dev, "ABORTING: VSEC shows no AFU descriptors\n"); 992 dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n");
918 return -EINVAL; 993 return -EINVAL;
919 } 994 }
920 995
921 if (adapter->ps_size > p2_size(dev) - adapter->ps_off) { 996 if (adapter->ps_size > p2_size(dev) - adapter->ps_off) {
922 dev_err(&adapter->dev, "ABORTING: Problem state size larger than " 997 dev_err(&dev->dev, "ABORTING: Problem state size larger than "
923 "available in BAR2: 0x%llx > 0x%llx\n", 998 "available in BAR2: 0x%llx > 0x%llx\n",
924 adapter->ps_size, p2_size(dev) - adapter->ps_off); 999 adapter->ps_size, p2_size(dev) - adapter->ps_off);
925 return -EINVAL; 1000 return -EINVAL;
@@ -968,6 +1043,15 @@ static struct cxl *cxl_init_adapter(struct pci_dev *dev)
968 if (!(adapter = cxl_alloc_adapter(dev))) 1043 if (!(adapter = cxl_alloc_adapter(dev)))
969 return ERR_PTR(-ENOMEM); 1044 return ERR_PTR(-ENOMEM);
970 1045
1046 if ((rc = cxl_read_vsec(adapter, dev)))
1047 goto err1;
1048
1049 if ((rc = cxl_vsec_looks_ok(adapter, dev)))
1050 goto err1;
1051
1052 if ((rc = setup_cxl_bars(dev)))
1053 goto err1;
1054
971 if ((rc = switch_card_to_cxl(dev))) 1055 if ((rc = switch_card_to_cxl(dev)))
972 goto err1; 1056 goto err1;
973 1057
@@ -977,12 +1061,6 @@ static struct cxl *cxl_init_adapter(struct pci_dev *dev)
977 if ((rc = dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))) 1061 if ((rc = dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)))
978 goto err2; 1062 goto err2;
979 1063
980 if ((rc = cxl_read_vsec(adapter, dev)))
981 goto err2;
982
983 if ((rc = cxl_vsec_looks_ok(adapter, dev)))
984 goto err2;
985
986 if ((rc = cxl_update_image_control(adapter))) 1064 if ((rc = cxl_update_image_control(adapter)))
987 goto err2; 1065 goto err2;
988 1066
@@ -1067,9 +1145,6 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
1067 if (cxl_verbose) 1145 if (cxl_verbose)
1068 dump_cxl_config_space(dev); 1146 dump_cxl_config_space(dev);
1069 1147
1070 if ((rc = setup_cxl_bars(dev)))
1071 return rc;
1072
1073 if ((rc = pci_enable_device(dev))) { 1148 if ((rc = pci_enable_device(dev))) {
1074 dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc); 1149 dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
1075 return rc; 1150 return rc;
@@ -1078,6 +1153,7 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
1078 adapter = cxl_init_adapter(dev); 1153 adapter = cxl_init_adapter(dev);
1079 if (IS_ERR(adapter)) { 1154 if (IS_ERR(adapter)) {
1080 dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter)); 1155 dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
1156 pci_disable_device(dev);
1081 return PTR_ERR(adapter); 1157 return PTR_ERR(adapter);
1082 } 1158 }
1083 1159
@@ -1092,16 +1168,18 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
1092static void cxl_remove(struct pci_dev *dev) 1168static void cxl_remove(struct pci_dev *dev)
1093{ 1169{
1094 struct cxl *adapter = pci_get_drvdata(dev); 1170 struct cxl *adapter = pci_get_drvdata(dev);
1095 int afu; 1171 struct cxl_afu *afu;
1096 1172 int i;
1097 dev_warn(&dev->dev, "pci remove\n");
1098 1173
1099 /* 1174 /*
1100 * Lock to prevent someone grabbing a ref through the adapter list as 1175 * Lock to prevent someone grabbing a ref through the adapter list as
1101 * we are removing it 1176 * we are removing it
1102 */ 1177 */
1103 for (afu = 0; afu < adapter->slices; afu++) 1178 for (i = 0; i < adapter->slices; i++) {
1104 cxl_remove_afu(adapter->afu[afu]); 1179 afu = adapter->afu[i];
1180 cxl_pci_vphb_remove(afu);
1181 cxl_remove_afu(afu);
1182 }
1105 cxl_remove_adapter(adapter); 1183 cxl_remove_adapter(adapter);
1106} 1184}
1107 1185
@@ -1110,4 +1188,5 @@ struct pci_driver cxl_pci_driver = {
1110 .id_table = cxl_pci_tbl, 1188 .id_table = cxl_pci_tbl,
1111 .probe = cxl_probe, 1189 .probe = cxl_probe,
1112 .remove = cxl_remove, 1190 .remove = cxl_remove,
1191 .shutdown = cxl_remove,
1113}; 1192};
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index d0c38c7bc0c4..31f38bc71a3d 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -185,7 +185,7 @@ static ssize_t reset_store_afu(struct device *device,
185 goto err; 185 goto err;
186 } 186 }
187 187
188 if ((rc = cxl_afu_reset(afu))) 188 if ((rc = __cxl_afu_reset(afu)))
189 goto err; 189 goto err;
190 190
191 rc = count; 191 rc = count;
@@ -356,6 +356,16 @@ static ssize_t api_version_compatible_show(struct device *device,
356 return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE); 356 return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE);
357} 357}
358 358
359static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
360 struct bin_attribute *bin_attr, char *buf,
361 loff_t off, size_t count)
362{
363 struct cxl_afu *afu = to_cxl_afu(container_of(kobj,
364 struct device, kobj));
365
366 return cxl_afu_read_err_buffer(afu, buf, off, count);
367}
368
359static struct device_attribute afu_attrs[] = { 369static struct device_attribute afu_attrs[] = {
360 __ATTR_RO(mmio_size), 370 __ATTR_RO(mmio_size),
361 __ATTR_RO(irqs_min), 371 __ATTR_RO(irqs_min),
@@ -534,6 +544,10 @@ void cxl_sysfs_afu_remove(struct cxl_afu *afu)
534 struct afu_config_record *cr, *tmp; 544 struct afu_config_record *cr, *tmp;
535 int i; 545 int i;
536 546
547 /* remove the err buffer bin attribute */
548 if (afu->eb_len)
549 device_remove_bin_file(&afu->dev, &afu->attr_eb);
550
537 for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) 551 for (i = 0; i < ARRAY_SIZE(afu_attrs); i++)
538 device_remove_file(&afu->dev, &afu_attrs[i]); 552 device_remove_file(&afu->dev, &afu_attrs[i]);
539 553
@@ -555,6 +569,22 @@ int cxl_sysfs_afu_add(struct cxl_afu *afu)
555 goto err; 569 goto err;
556 } 570 }
557 571
572 /* conditionally create the add the binary file for error info buffer */
573 if (afu->eb_len) {
574 afu->attr_eb.attr.name = "afu_err_buff";
575 afu->attr_eb.attr.mode = S_IRUGO;
576 afu->attr_eb.size = afu->eb_len;
577 afu->attr_eb.read = afu_eb_read;
578
579 rc = device_create_bin_file(&afu->dev, &afu->attr_eb);
580 if (rc) {
581 dev_err(&afu->dev,
582 "Unable to create eb attr for the afu. Err(%d)\n",
583 rc);
584 goto err;
585 }
586 }
587
558 for (i = 0; i < afu->crs_num; i++) { 588 for (i = 0; i < afu->crs_num; i++) {
559 cr = cxl_sysfs_afu_new_cr(afu, i); 589 cr = cxl_sysfs_afu_new_cr(afu, i);
560 if (IS_ERR(cr)) { 590 if (IS_ERR(cr)) {
@@ -570,6 +600,9 @@ err1:
570 cxl_sysfs_afu_remove(afu); 600 cxl_sysfs_afu_remove(afu);
571 return rc; 601 return rc;
572err: 602err:
603 /* reset the eb_len as we havent created the bin attr */
604 afu->eb_len = 0;
605
573 for (i--; i >= 0; i--) 606 for (i--; i >= 0; i--)
574 device_remove_file(&afu->dev, &afu_attrs[i]); 607 device_remove_file(&afu->dev, &afu_attrs[i]);
575 return rc; 608 return rc;
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
new file mode 100644
index 000000000000..2eba002b580b
--- /dev/null
+++ b/drivers/misc/cxl/vphb.c
@@ -0,0 +1,271 @@
1/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/pci.h>
11#include <misc/cxl.h>
12#include "cxl.h"
13
14static int cxl_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
15{
16 if (dma_mask < DMA_BIT_MASK(64)) {
17 pr_info("%s only 64bit DMA supported on CXL", __func__);
18 return -EIO;
19 }
20
21 *(pdev->dev.dma_mask) = dma_mask;
22 return 0;
23}
24
25static int cxl_pci_probe_mode(struct pci_bus *bus)
26{
27 return PCI_PROBE_NORMAL;
28}
29
30static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
31{
32 return -ENODEV;
33}
34
35static void cxl_teardown_msi_irqs(struct pci_dev *pdev)
36{
37 /*
38 * MSI should never be set but need still need to provide this call
39 * back.
40 */
41}
42
43static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
44{
45 struct pci_controller *phb;
46 struct cxl_afu *afu;
47 struct cxl_context *ctx;
48
49 phb = pci_bus_to_host(dev->bus);
50 afu = (struct cxl_afu *)phb->private_data;
51 set_dma_ops(&dev->dev, &dma_direct_ops);
52 set_dma_offset(&dev->dev, PAGE_OFFSET);
53
54 /*
55 * Allocate a context to do cxl things too. If we eventually do real
56 * DMA ops, we'll need a default context to attach them to
57 */
58 ctx = cxl_dev_context_init(dev);
59 if (!ctx)
60 return false;
61 dev->dev.archdata.cxl_ctx = ctx;
62
63 return (cxl_afu_check_and_enable(afu) == 0);
64}
65
66static void cxl_pci_disable_device(struct pci_dev *dev)
67{
68 struct cxl_context *ctx = cxl_get_context(dev);
69
70 if (ctx) {
71 if (ctx->status == STARTED) {
72 dev_err(&dev->dev, "Default context started\n");
73 return;
74 }
75 dev->dev.archdata.cxl_ctx = NULL;
76 cxl_release_context(ctx);
77 }
78}
79
80static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus,
81 unsigned long type)
82{
83 return 1;
84}
85
86static void cxl_pci_reset_secondary_bus(struct pci_dev *dev)
87{
88 /* Should we do an AFU reset here ? */
89}
90
91static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
92{
93 return (bus << 8) + devfn;
94}
95
96static unsigned long cxl_pcie_cfg_addr(struct pci_controller* phb,
97 u8 bus, u8 devfn, int offset)
98{
99 int record = cxl_pcie_cfg_record(bus, devfn);
100
101 return (unsigned long)phb->cfg_addr + ((unsigned long)phb->cfg_data * record) + offset;
102}
103
104
105static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
106 int offset, int len,
107 volatile void __iomem **ioaddr,
108 u32 *mask, int *shift)
109{
110 struct pci_controller *phb;
111 struct cxl_afu *afu;
112 unsigned long addr;
113
114 phb = pci_bus_to_host(bus);
115 if (phb == NULL)
116 return PCIBIOS_DEVICE_NOT_FOUND;
117 afu = (struct cxl_afu *)phb->private_data;
118
119 if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num)
120 return PCIBIOS_DEVICE_NOT_FOUND;
121 if (offset >= (unsigned long)phb->cfg_data)
122 return PCIBIOS_BAD_REGISTER_NUMBER;
123 addr = cxl_pcie_cfg_addr(phb, bus->number, devfn, offset);
124
125 *ioaddr = (void *)(addr & ~0x3ULL);
126 *shift = ((addr & 0x3) * 8);
127 switch (len) {
128 case 1:
129 *mask = 0xff;
130 break;
131 case 2:
132 *mask = 0xffff;
133 break;
134 default:
135 *mask = 0xffffffff;
136 break;
137 }
138 return 0;
139}
140
141static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
142 int offset, int len, u32 *val)
143{
144 volatile void __iomem *ioaddr;
145 int shift, rc;
146 u32 mask;
147
148 rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr,
149 &mask, &shift);
150 if (rc)
151 return rc;
152
153 /* Can only read 32 bits */
154 *val = (in_le32(ioaddr) >> shift) & mask;
155 return PCIBIOS_SUCCESSFUL;
156}
157
158static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
159 int offset, int len, u32 val)
160{
161 volatile void __iomem *ioaddr;
162 u32 v, mask;
163 int shift, rc;
164
165 rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr,
166 &mask, &shift);
167 if (rc)
168 return rc;
169
170 /* Can only write 32 bits so do read-modify-write */
171 mask <<= shift;
172 val <<= shift;
173
174 v = (in_le32(ioaddr) & ~mask) || (val & mask);
175
176 out_le32(ioaddr, v);
177 return PCIBIOS_SUCCESSFUL;
178}
179
180static struct pci_ops cxl_pcie_pci_ops =
181{
182 .read = cxl_pcie_read_config,
183 .write = cxl_pcie_write_config,
184};
185
186
187static struct pci_controller_ops cxl_pci_controller_ops =
188{
189 .probe_mode = cxl_pci_probe_mode,
190 .enable_device_hook = cxl_pci_enable_device_hook,
191 .disable_device = cxl_pci_disable_device,
192 .release_device = cxl_pci_disable_device,
193 .window_alignment = cxl_pci_window_alignment,
194 .reset_secondary_bus = cxl_pci_reset_secondary_bus,
195 .setup_msi_irqs = cxl_setup_msi_irqs,
196 .teardown_msi_irqs = cxl_teardown_msi_irqs,
197 .dma_set_mask = cxl_dma_set_mask,
198};
199
200int cxl_pci_vphb_add(struct cxl_afu *afu)
201{
202 struct pci_dev *phys_dev;
203 struct pci_controller *phb, *phys_phb;
204
205 phys_dev = to_pci_dev(afu->adapter->dev.parent);
206 phys_phb = pci_bus_to_host(phys_dev->bus);
207
208 /* Alloc and setup PHB data structure */
209 phb = pcibios_alloc_controller(phys_phb->dn);
210
211 if (!phb)
212 return -ENODEV;
213
214 /* Setup parent in sysfs */
215 phb->parent = &phys_dev->dev;
216
217 /* Setup the PHB using arch provided callback */
218 phb->ops = &cxl_pcie_pci_ops;
219 phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset;
220 phb->cfg_data = (void *)(u64)afu->crs_len;
221 phb->private_data = afu;
222 phb->controller_ops = cxl_pci_controller_ops;
223
224 /* Scan the bus */
225 pcibios_scan_phb(phb);
226 if (phb->bus == NULL)
227 return -ENXIO;
228
229 /* Claim resources. This might need some rework as well depending
230 * whether we are doing probe-only or not, like assigning unassigned
231 * resources etc...
232 */
233 pcibios_claim_one_bus(phb->bus);
234
235 /* Add probed PCI devices to the device model */
236 pci_bus_add_devices(phb->bus);
237
238 afu->phb = phb;
239
240 return 0;
241}
242
243
244void cxl_pci_vphb_remove(struct cxl_afu *afu)
245{
246 struct pci_controller *phb;
247
248 /* If there is no configuration record we won't have one of these */
249 if (!afu || !afu->phb)
250 return;
251
252 phb = afu->phb;
253
254 pci_remove_root_bus(phb->bus);
255}
256
257struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
258{
259 struct pci_controller *phb;
260
261 phb = pci_bus_to_host(dev->bus);
262
263 return (struct cxl_afu *)phb->private_data;
264}
265EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
266
267unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev)
268{
269 return cxl_pcie_cfg_record(dev->bus->number, dev->devfn);
270}
271EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record);