aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristophe Lombard <clombard@linux.vnet.ibm.com>2016-03-04 06:26:36 -0500
committerMichael Ellerman <mpe@ellerman.id.au>2016-03-09 07:36:52 -0500
commit14baf4d9c739e6e69150512d2eb23c71fffcc192 (patch)
tree0ed05f5ee469c2933b49bbd53b6777b67f975fa2
parentcbffa3a5146a90f46806cef3a98b8be5833727e8 (diff)
cxl: Add guest-specific code
The new of.c file contains code to parse the device tree to find out about cxl adapters and AFUs. guest.c implements the guest-specific callbacks for the backend API. The process element ID is not known until the context is attached, so we have to separate the context ID assigned by the cxl driver from the process element ID visible to the user applications. In bare-metal, the 2 IDs match. Co-authored-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com> Signed-off-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com> Signed-off-by: Christophe Lombard <clombard@linux.vnet.ibm.com> Reviewed-by: Manoj Kumar <manoj@linux.vnet.ibm.com> Acked-by: Ian Munsie <imunsie@au1.ibm.com> [mpe: Fix SMP=n build, fix PSERIES=n build, minor whitespace fixes] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--drivers/misc/cxl/Makefile1
-rw-r--r--drivers/misc/cxl/api.c2
-rw-r--r--drivers/misc/cxl/context.c6
-rw-r--r--drivers/misc/cxl/cxl.h37
-rw-r--r--drivers/misc/cxl/file.c2
-rw-r--r--drivers/misc/cxl/guest.c950
-rw-r--r--drivers/misc/cxl/hcalls.c1
-rw-r--r--drivers/misc/cxl/hcalls.h1
-rw-r--r--drivers/misc/cxl/main.c23
-rw-r--r--drivers/misc/cxl/of.c513
10 files changed, 1525 insertions, 11 deletions
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index be2ac5ce349f..a2f49cf4a168 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -4,6 +4,7 @@ ccflags-$(CONFIG_PPC_WERROR) += -Werror
4cxl-y += main.o file.o irq.o fault.o native.o 4cxl-y += main.o file.o irq.o fault.o native.o
5cxl-y += context.o sysfs.o debugfs.o pci.o trace.o 5cxl-y += context.o sysfs.o debugfs.o pci.o trace.o
6cxl-y += vphb.o api.o 6cxl-y += vphb.o api.o
7cxl-$(CONFIG_PPC_PSERIES) += guest.o of.o hcalls.o
7obj-$(CONFIG_CXL) += cxl.o 8obj-$(CONFIG_CXL) += cxl.o
8obj-$(CONFIG_CXL_BASE) += base.o 9obj-$(CONFIG_CXL_BASE) += base.o
9 10
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 31eb842ee6c0..325f9578a556 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -191,7 +191,7 @@ EXPORT_SYMBOL_GPL(cxl_start_context);
191 191
192int cxl_process_element(struct cxl_context *ctx) 192int cxl_process_element(struct cxl_context *ctx)
193{ 193{
194 return ctx->pe; 194 return ctx->external_pe;
195} 195}
196EXPORT_SYMBOL_GPL(cxl_process_element); 196EXPORT_SYMBOL_GPL(cxl_process_element);
197 197
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 200837f7612b..180c85a32825 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -95,8 +95,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
95 return i; 95 return i;
96 96
97 ctx->pe = i; 97 ctx->pe = i;
98 if (cpu_has_feature(CPU_FTR_HVMODE)) 98 if (cpu_has_feature(CPU_FTR_HVMODE)) {
99 ctx->elem = &ctx->afu->native->spa[i]; 99 ctx->elem = &ctx->afu->native->spa[i];
100 ctx->external_pe = ctx->pe;
101 } else {
102 ctx->external_pe = -1; /* assigned when attaching */
103 }
100 ctx->pe_inserted = false; 104 ctx->pe_inserted = false;
101 105
102 /* 106 /*
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 3a1fabd41072..4372a87ff3aa 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -433,6 +433,12 @@ struct cxl_irq_name {
433 char *name; 433 char *name;
434}; 434};
435 435
436struct irq_avail {
437 irq_hw_number_t offset;
438 irq_hw_number_t range;
439 unsigned long *bitmap;
440};
441
436/* 442/*
437 * This is a cxl context. If the PSL is in dedicated mode, there will be one 443 * This is a cxl context. If the PSL is in dedicated mode, there will be one
438 * of these per AFU. If in AFU directed there can be lots of these. 444 * of these per AFU. If in AFU directed there can be lots of these.
@@ -488,7 +494,19 @@ struct cxl_context {
488 494
489 struct cxl_process_element *elem; 495 struct cxl_process_element *elem;
490 496
491 int pe; /* process element handle */ 497 /*
498 * pe is the process element handle, assigned by this driver when the
499 * context is initialized.
500 *
501 * external_pe is the PE shown outside of cxl.
502 * On bare-metal, pe=external_pe, because we decide what the handle is.
503 * In a guest, we only find out about the pe used by pHyp when the
504 * context is attached, and that's the value we want to report outside
505 * of cxl.
506 */
507 int pe;
508 int external_pe;
509
492 u32 irq_count; 510 u32 irq_count;
493 bool pe_inserted; 511 bool pe_inserted;
494 bool master; 512 bool master;
@@ -782,6 +800,7 @@ void cxl_pci_vphb_reconfigure(struct cxl_afu *afu);
782void cxl_pci_vphb_remove(struct cxl_afu *afu); 800void cxl_pci_vphb_remove(struct cxl_afu *afu);
783 801
784extern struct pci_driver cxl_pci_driver; 802extern struct pci_driver cxl_pci_driver;
803extern struct platform_driver cxl_of_driver;
785int afu_allocate_irqs(struct cxl_context *ctx, u32 count); 804int afu_allocate_irqs(struct cxl_context *ctx, u32 count);
786 805
787int afu_open(struct inode *inode, struct file *file); 806int afu_open(struct inode *inode, struct file *file);
@@ -792,6 +811,21 @@ unsigned int afu_poll(struct file *file, struct poll_table_struct *poll);
792ssize_t afu_read(struct file *file, char __user *buf, size_t count, loff_t *off); 811ssize_t afu_read(struct file *file, char __user *buf, size_t count, loff_t *off);
793extern const struct file_operations afu_fops; 812extern const struct file_operations afu_fops;
794 813
814struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *dev);
815void cxl_guest_remove_adapter(struct cxl *adapter);
816int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np);
817int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np);
818ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len);
819ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len);
820int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np);
821void cxl_guest_remove_afu(struct cxl_afu *afu);
822int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np);
823int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *afu_np);
824int cxl_guest_add_chardev(struct cxl *adapter);
825void cxl_guest_remove_chardev(struct cxl *adapter);
826void cxl_guest_reload_module(struct cxl *adapter);
827int cxl_of_probe(struct platform_device *pdev);
828
795struct cxl_backend_ops { 829struct cxl_backend_ops {
796 struct module *module; 830 struct module *module;
797 int (*adapter_reset)(struct cxl *adapter); 831 int (*adapter_reset)(struct cxl *adapter);
@@ -824,6 +858,7 @@ struct cxl_backend_ops {
824 int (*afu_cr_read64)(struct cxl_afu *afu, int cr_idx, u64 offset, u64 *val); 858 int (*afu_cr_read64)(struct cxl_afu *afu, int cr_idx, u64 offset, u64 *val);
825}; 859};
826extern const struct cxl_backend_ops cxl_native_ops; 860extern const struct cxl_backend_ops cxl_native_ops;
861extern const struct cxl_backend_ops cxl_guest_ops;
827extern const struct cxl_backend_ops *cxl_ops; 862extern const struct cxl_backend_ops *cxl_ops;
828 863
829#endif 864#endif
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index b8ce29bc52d5..df4d49a6c67a 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -228,7 +228,7 @@ static long afu_ioctl_process_element(struct cxl_context *ctx,
228{ 228{
229 pr_devel("%s: pe: %i\n", __func__, ctx->pe); 229 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
230 230
231 if (copy_to_user(upe, &ctx->pe, sizeof(__u32))) 231 if (copy_to_user(upe, &ctx->external_pe, sizeof(__u32)))
232 return -EFAULT; 232 return -EFAULT;
233 233
234 return 0; 234 return 0;
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
new file mode 100644
index 000000000000..d7d53d869283
--- /dev/null
+++ b/drivers/misc/cxl/guest.c
@@ -0,0 +1,950 @@
1/*
2 * Copyright 2015 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/spinlock.h>
11#include <linux/uaccess.h>
12#include <linux/delay.h>
13
14#include "cxl.h"
15#include "hcalls.h"
16#include "trace.h"
17
18
19static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr,
20 u64 errstat)
21{
22 pr_devel("in %s\n", __func__);
23 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
24
25 return cxl_ops->ack_irq(ctx, 0, errstat);
26}
27
28static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu,
29 void *buf, size_t len)
30{
31 unsigned int entries, mod;
32 unsigned long **vpd_buf = NULL;
33 struct sg_list *le;
34 int rc = 0, i, tocopy;
35 u64 out = 0;
36
37 if (buf == NULL)
38 return -EINVAL;
39
40 /* number of entries in the list */
41 entries = len / SG_BUFFER_SIZE;
42 mod = len % SG_BUFFER_SIZE;
43 if (mod)
44 entries++;
45
46 if (entries > SG_MAX_ENTRIES) {
47 entries = SG_MAX_ENTRIES;
48 len = SG_MAX_ENTRIES * SG_BUFFER_SIZE;
49 mod = 0;
50 }
51
52 vpd_buf = kzalloc(entries * sizeof(unsigned long *), GFP_KERNEL);
53 if (!vpd_buf)
54 return -ENOMEM;
55
56 le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
57 if (!le) {
58 rc = -ENOMEM;
59 goto err1;
60 }
61
62 for (i = 0; i < entries; i++) {
63 vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
64 if (!vpd_buf[i]) {
65 rc = -ENOMEM;
66 goto err2;
67 }
68 le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i]));
69 le[i].len = cpu_to_be64(SG_BUFFER_SIZE);
70 if ((i == (entries - 1)) && mod)
71 le[i].len = cpu_to_be64(mod);
72 }
73
74 if (adapter)
75 rc = cxl_h_collect_vpd_adapter(adapter->guest->handle,
76 virt_to_phys(le), entries, &out);
77 else
78 rc = cxl_h_collect_vpd(afu->guest->handle, 0,
79 virt_to_phys(le), entries, &out);
80 pr_devel("length of available (entries: %i), vpd: %#llx\n",
81 entries, out);
82
83 if (!rc) {
84 /*
85 * hcall returns in 'out' the size of available VPDs.
86 * It fills the buffer with as much data as possible.
87 */
88 if (out < len)
89 len = out;
90 rc = len;
91 if (out) {
92 for (i = 0; i < entries; i++) {
93 if (len < SG_BUFFER_SIZE)
94 tocopy = len;
95 else
96 tocopy = SG_BUFFER_SIZE;
97 memcpy(buf, vpd_buf[i], tocopy);
98 buf += tocopy;
99 len -= tocopy;
100 }
101 }
102 }
103err2:
104 for (i = 0; i < entries; i++) {
105 if (vpd_buf[i])
106 free_page((unsigned long) vpd_buf[i]);
107 }
108 free_page((unsigned long) le);
109err1:
110 kfree(vpd_buf);
111 return rc;
112}
113
114static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info)
115{
116 return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info);
117}
118
119static irqreturn_t guest_psl_irq(int irq, void *data)
120{
121 struct cxl_context *ctx = data;
122 struct cxl_irq_info irq_info;
123 int rc;
124
125 pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq);
126 rc = guest_get_irq_info(ctx, &irq_info);
127 if (rc) {
128 WARN(1, "Unable to get IRQ info: %i\n", rc);
129 return IRQ_HANDLED;
130 }
131
132 rc = cxl_irq(irq, ctx, &irq_info);
133 return rc;
134}
135
136static irqreturn_t guest_slice_irq_err(int irq, void *data)
137{
138 struct cxl_afu *afu = data;
139 int rc;
140 u64 serr;
141
142 WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
143 rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr);
144 if (rc) {
145 dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc);
146 return IRQ_HANDLED;
147 }
148 dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
149
150 rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr);
151 if (rc)
152 dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n",
153 rc);
154
155 return IRQ_HANDLED;
156}
157
158
159static int irq_alloc_range(struct cxl *adapter, int len, int *irq)
160{
161 int i, n;
162 struct irq_avail *cur;
163
164 for (i = 0; i < adapter->guest->irq_nranges; i++) {
165 cur = &adapter->guest->irq_avail[i];
166 n = bitmap_find_next_zero_area(cur->bitmap, cur->range,
167 0, len, 0);
168 if (n < cur->range) {
169 bitmap_set(cur->bitmap, n, len);
170 *irq = cur->offset + n;
171 pr_devel("guest: allocate IRQs %#x->%#x\n",
172 *irq, *irq + len - 1);
173
174 return 0;
175 }
176 }
177 return -ENOSPC;
178}
179
180static int irq_free_range(struct cxl *adapter, int irq, int len)
181{
182 int i, n;
183 struct irq_avail *cur;
184
185 if (len == 0)
186 return -ENOENT;
187
188 for (i = 0; i < adapter->guest->irq_nranges; i++) {
189 cur = &adapter->guest->irq_avail[i];
190 if (irq >= cur->offset &&
191 (irq + len) <= (cur->offset + cur->range)) {
192 n = irq - cur->offset;
193 bitmap_clear(cur->bitmap, n, len);
194 pr_devel("guest: release IRQs %#x->%#x\n",
195 irq, irq + len - 1);
196 return 0;
197 }
198 }
199 return -ENOENT;
200}
201
202static int guest_reset(struct cxl *adapter)
203{
204 int rc;
205
206 pr_devel("Adapter reset request\n");
207 rc = cxl_h_reset_adapter(adapter->guest->handle);
208 return rc;
209}
210
211static int guest_alloc_one_irq(struct cxl *adapter)
212{
213 int irq;
214
215 spin_lock(&adapter->guest->irq_alloc_lock);
216 if (irq_alloc_range(adapter, 1, &irq))
217 irq = -ENOSPC;
218 spin_unlock(&adapter->guest->irq_alloc_lock);
219 return irq;
220}
221
222static void guest_release_one_irq(struct cxl *adapter, int irq)
223{
224 spin_lock(&adapter->guest->irq_alloc_lock);
225 irq_free_range(adapter, irq, 1);
226 spin_unlock(&adapter->guest->irq_alloc_lock);
227}
228
229static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
230 struct cxl *adapter, unsigned int num)
231{
232 int i, try, irq;
233
234 memset(irqs, 0, sizeof(struct cxl_irq_ranges));
235
236 spin_lock(&adapter->guest->irq_alloc_lock);
237 for (i = 0; i < CXL_IRQ_RANGES && num; i++) {
238 try = num;
239 while (try) {
240 if (irq_alloc_range(adapter, try, &irq) == 0)
241 break;
242 try /= 2;
243 }
244 if (!try)
245 goto error;
246 irqs->offset[i] = irq;
247 irqs->range[i] = try;
248 num -= try;
249 }
250 if (num)
251 goto error;
252 spin_unlock(&adapter->guest->irq_alloc_lock);
253 return 0;
254
255error:
256 for (i = 0; i < CXL_IRQ_RANGES; i++)
257 irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
258 spin_unlock(&adapter->guest->irq_alloc_lock);
259 return -ENOSPC;
260}
261
262static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs,
263 struct cxl *adapter)
264{
265 int i;
266
267 spin_lock(&adapter->guest->irq_alloc_lock);
268 for (i = 0; i < CXL_IRQ_RANGES; i++)
269 irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
270 spin_unlock(&adapter->guest->irq_alloc_lock);
271}
272
273static int guest_register_serr_irq(struct cxl_afu *afu)
274{
275 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
276 dev_name(&afu->dev));
277 if (!afu->err_irq_name)
278 return -ENOMEM;
279
280 if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq,
281 guest_slice_irq_err, afu, afu->err_irq_name))) {
282 kfree(afu->err_irq_name);
283 afu->err_irq_name = NULL;
284 return -ENOMEM;
285 }
286
287 return 0;
288}
289
290static void guest_release_serr_irq(struct cxl_afu *afu)
291{
292 cxl_unmap_irq(afu->serr_virq, afu);
293 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
294 kfree(afu->err_irq_name);
295}
296
297static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
298{
299 return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token,
300 tfc >> 32, (psl_reset_mask != 0));
301}
302
303static void disable_afu_irqs(struct cxl_context *ctx)
304{
305 irq_hw_number_t hwirq;
306 unsigned int virq;
307 int r, i;
308
309 pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice);
310 for (r = 0; r < CXL_IRQ_RANGES; r++) {
311 hwirq = ctx->irqs.offset[r];
312 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
313 virq = irq_find_mapping(NULL, hwirq);
314 disable_irq(virq);
315 }
316 }
317}
318
319static void enable_afu_irqs(struct cxl_context *ctx)
320{
321 irq_hw_number_t hwirq;
322 unsigned int virq;
323 int r, i;
324
325 pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice);
326 for (r = 0; r < CXL_IRQ_RANGES; r++) {
327 hwirq = ctx->irqs.offset[r];
328 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
329 virq = irq_find_mapping(NULL, hwirq);
330 enable_irq(virq);
331 }
332 }
333}
334
335static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx,
336 u64 offset, u64 *val)
337{
338 unsigned long cr;
339 char c;
340 int rc = 0;
341
342 if (afu->crs_len < sz)
343 return -ENOENT;
344
345 if (unlikely(offset >= afu->crs_len))
346 return -ERANGE;
347
348 cr = get_zeroed_page(GFP_KERNEL);
349 if (!cr)
350 return -ENOMEM;
351
352 rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset,
353 virt_to_phys((void *)cr), sz);
354 if (rc)
355 goto err;
356
357 switch (sz) {
358 case 1:
359 c = *((char *) cr);
360 *val = c;
361 break;
362 case 2:
363 *val = in_le16((u16 *)cr);
364 break;
365 case 4:
366 *val = in_le32((unsigned *)cr);
367 break;
368 case 8:
369 *val = in_le64((u64 *)cr);
370 break;
371 default:
372 WARN_ON(1);
373 }
374err:
375 free_page(cr);
376 return rc;
377}
378
379static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset,
380 u32 *out)
381{
382 int rc;
383 u64 val;
384
385 rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val);
386 if (!rc)
387 *out = (u32) val;
388 return rc;
389}
390
391static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset,
392 u16 *out)
393{
394 int rc;
395 u64 val;
396
397 rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val);
398 if (!rc)
399 *out = (u16) val;
400 return rc;
401}
402
403static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset,
404 u8 *out)
405{
406 int rc;
407 u64 val;
408
409 rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val);
410 if (!rc)
411 *out = (u8) val;
412 return rc;
413}
414
415static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset,
416 u64 *out)
417{
418 return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out);
419}
420
421static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
422{
423 struct cxl_process_element_hcall *elem;
424 struct cxl *adapter = ctx->afu->adapter;
425 const struct cred *cred;
426 u32 pid, idx;
427 int rc, r, i;
428 u64 mmio_addr, mmio_size;
429 __be64 flags = 0;
430
431 /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */
432 if (!(elem = (struct cxl_process_element_hcall *)
433 get_zeroed_page(GFP_KERNEL)))
434 return -ENOMEM;
435
436 elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION);
437 if (ctx->kernel) {
438 pid = 0;
439 flags |= CXL_PE_TRANSLATION_ENABLED;
440 flags |= CXL_PE_PRIVILEGED_PROCESS;
441 if (mfmsr() & MSR_SF)
442 flags |= CXL_PE_64_BIT;
443 } else {
444 pid = current->pid;
445 flags |= CXL_PE_PROBLEM_STATE;
446 flags |= CXL_PE_TRANSLATION_ENABLED;
447 if (!test_tsk_thread_flag(current, TIF_32BIT))
448 flags |= CXL_PE_64_BIT;
449 cred = get_current_cred();
450 if (uid_eq(cred->euid, GLOBAL_ROOT_UID))
451 flags |= CXL_PE_PRIVILEGED_PROCESS;
452 put_cred(cred);
453 }
454 elem->flags = cpu_to_be64(flags);
455 elem->common.tid = cpu_to_be32(0); /* Unused */
456 elem->common.pid = cpu_to_be32(pid);
457 elem->common.csrp = cpu_to_be64(0); /* disable */
458 elem->common.aurp0 = cpu_to_be64(0); /* disable */
459 elem->common.aurp1 = cpu_to_be64(0); /* disable */
460
461 cxl_prefault(ctx, wed);
462
463 elem->common.sstp0 = cpu_to_be64(ctx->sstp0);
464 elem->common.sstp1 = cpu_to_be64(ctx->sstp1);
465 for (r = 0; r < CXL_IRQ_RANGES; r++) {
466 for (i = 0; i < ctx->irqs.range[r]; i++) {
467 if (r == 0 && i == 0) {
468 elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]);
469 } else {
470 idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset;
471 elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8);
472 }
473 }
474 }
475 elem->common.amr = cpu_to_be64(amr);
476 elem->common.wed = cpu_to_be64(wed);
477
478 disable_afu_irqs(ctx);
479
480 rc = cxl_h_attach_process(ctx->afu->guest->handle, elem,
481 &ctx->process_token, &mmio_addr, &mmio_size);
482 if (rc == H_SUCCESS) {
483 if (ctx->master || !ctx->afu->pp_psa) {
484 ctx->psn_phys = ctx->afu->psn_phys;
485 ctx->psn_size = ctx->afu->adapter->ps_size;
486 } else {
487 ctx->psn_phys = mmio_addr;
488 ctx->psn_size = mmio_size;
489 }
490 if (ctx->afu->pp_psa && mmio_size &&
491 ctx->afu->pp_size == 0) {
492 /*
493 * There's no property in the device tree to read the
494 * pp_size. We only find out at the 1st attach.
495 * Compared to bare-metal, it is too late and we
496 * should really lock here. However, on powerVM,
497 * pp_size is really only used to display in /sys.
498 * Being discussed with pHyp for their next release.
499 */
500 ctx->afu->pp_size = mmio_size;
501 }
502 /* from PAPR: process element is bytes 4-7 of process token */
503 ctx->external_pe = ctx->process_token & 0xFFFFFFFF;
504 pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx",
505 ctx->pe, ctx->external_pe, ctx->psn_size);
506 ctx->pe_inserted = true;
507 enable_afu_irqs(ctx);
508 }
509
510 free_page((u64)elem);
511 return rc;
512}
513
514static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
515{
516 pr_devel("in %s\n", __func__);
517
518 ctx->kernel = kernel;
519 if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
520 return attach_afu_directed(ctx, wed, amr);
521
522 /* dedicated mode not supported on FW840 */
523
524 return -EINVAL;
525}
526
527static int detach_afu_directed(struct cxl_context *ctx)
528{
529 if (!ctx->pe_inserted)
530 return 0;
531 if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token))
532 return -1;
533 return 0;
534}
535
536static int guest_detach_process(struct cxl_context *ctx)
537{
538 pr_devel("in %s\n", __func__);
539 trace_cxl_detach(ctx);
540
541 if (!cxl_ops->link_ok(ctx->afu->adapter))
542 return -EIO;
543
544 if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
545 return detach_afu_directed(ctx);
546
547 return -EINVAL;
548}
549
550static void guest_release_afu(struct device *dev)
551{
552 struct cxl_afu *afu = to_cxl_afu(dev);
553
554 pr_devel("%s\n", __func__);
555
556 idr_destroy(&afu->contexts_idr);
557
558 kfree(afu->guest);
559 kfree(afu);
560}
561
562ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len)
563{
564 return guest_collect_vpd(NULL, afu, buf, len);
565}
566
567#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
568static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
569 loff_t off, size_t count)
570{
571 void *tbuf = NULL;
572 int rc = 0;
573
574 tbuf = (void *) get_zeroed_page(GFP_KERNEL);
575 if (!tbuf)
576 return -ENOMEM;
577
578 rc = cxl_h_get_afu_err(afu->guest->handle,
579 off & 0x7,
580 virt_to_phys(tbuf),
581 count);
582 if (rc)
583 goto err;
584
585 if (count > ERR_BUFF_MAX_COPY_SIZE)
586 count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
587 memcpy(buf, tbuf, count);
588err:
589 free_page((u64)tbuf);
590
591 return rc;
592}
593
594static int guest_afu_check_and_enable(struct cxl_afu *afu)
595{
596 return 0;
597}
598
599static int activate_afu_directed(struct cxl_afu *afu)
600{
601 int rc;
602
603 dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice);
604
605 afu->current_mode = CXL_MODE_DIRECTED;
606
607 afu->num_procs = afu->max_procs_virtualised;
608
609 if ((rc = cxl_chardev_m_afu_add(afu)))
610 return rc;
611
612 if ((rc = cxl_sysfs_afu_m_add(afu)))
613 goto err;
614
615 if ((rc = cxl_chardev_s_afu_add(afu)))
616 goto err1;
617
618 return 0;
619err1:
620 cxl_sysfs_afu_m_remove(afu);
621err:
622 cxl_chardev_afu_remove(afu);
623 return rc;
624}
625
626static int guest_afu_activate_mode(struct cxl_afu *afu, int mode)
627{
628 if (!mode)
629 return 0;
630 if (!(mode & afu->modes_supported))
631 return -EINVAL;
632
633 if (mode == CXL_MODE_DIRECTED)
634 return activate_afu_directed(afu);
635
636 if (mode == CXL_MODE_DEDICATED)
637 dev_err(&afu->dev, "Dedicated mode not supported\n");
638
639 return -EINVAL;
640}
641
642static int deactivate_afu_directed(struct cxl_afu *afu)
643{
644 dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice);
645
646 afu->current_mode = 0;
647 afu->num_procs = 0;
648
649 cxl_sysfs_afu_m_remove(afu);
650 cxl_chardev_afu_remove(afu);
651
652 cxl_ops->afu_reset(afu);
653
654 return 0;
655}
656
657static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode)
658{
659 if (!mode)
660 return 0;
661 if (!(mode & afu->modes_supported))
662 return -EINVAL;
663
664 if (mode == CXL_MODE_DIRECTED)
665 return deactivate_afu_directed(afu);
666 return 0;
667}
668
669static int guest_afu_reset(struct cxl_afu *afu)
670{
671 pr_devel("AFU(%d) reset request\n", afu->slice);
672 return cxl_h_reset_afu(afu->guest->handle);
673}
674
675static int guest_map_slice_regs(struct cxl_afu *afu)
676{
677 if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) {
678 dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n",
679 afu->slice);
680 return -ENOMEM;
681 }
682 return 0;
683}
684
685static void guest_unmap_slice_regs(struct cxl_afu *afu)
686{
687 if (afu->p2n_mmio)
688 iounmap(afu->p2n_mmio);
689}
690
691static bool guest_link_ok(struct cxl *cxl)
692{
693 return true;
694}
695
696static int afu_properties_look_ok(struct cxl_afu *afu)
697{
698 if (afu->pp_irqs < 0) {
699 dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n");
700 return -EINVAL;
701 }
702
703 if (afu->max_procs_virtualised < 1) {
704 dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n");
705 return -EINVAL;
706 }
707
708 if (afu->crs_len < 0) {
709 dev_err(&afu->dev, "Unexpected configuration record size value\n");
710 return -EINVAL;
711 }
712
713 return 0;
714}
715
716int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np)
717{
718 struct cxl_afu *afu;
719 bool free = true;
720 int rc;
721
722 pr_devel("in %s - AFU(%d)\n", __func__, slice);
723 if (!(afu = cxl_alloc_afu(adapter, slice)))
724 return -ENOMEM;
725
726 if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) {
727 kfree(afu);
728 return -ENOMEM;
729 }
730
731 if ((rc = dev_set_name(&afu->dev, "afu%i.%i",
732 adapter->adapter_num,
733 slice)))
734 goto err1;
735
736 adapter->slices++;
737
738 if ((rc = cxl_of_read_afu_handle(afu, afu_np)))
739 goto err1;
740
741 if ((rc = cxl_ops->afu_reset(afu)))
742 goto err1;
743
744 if ((rc = cxl_of_read_afu_properties(afu, afu_np)))
745 goto err1;
746
747 if ((rc = afu_properties_look_ok(afu)))
748 goto err1;
749
750 if ((rc = guest_map_slice_regs(afu)))
751 goto err1;
752
753 if ((rc = guest_register_serr_irq(afu)))
754 goto err2;
755
756 /*
757 * After we call this function we must not free the afu directly, even
758 * if it returns an error!
759 */
760 if ((rc = cxl_register_afu(afu)))
761 goto err_put1;
762
763 if ((rc = cxl_sysfs_afu_add(afu)))
764 goto err_put1;
765
766 /*
767 * pHyp doesn't expose the programming models supported by the
768 * AFU. pHyp currently only supports directed mode. If it adds
769 * dedicated mode later, this version of cxl has no way to
770 * detect it. So we'll initialize the driver, but the first
771 * attach will fail.
772 * Being discussed with pHyp to do better (likely new property)
773 */
774 if (afu->max_procs_virtualised == 1)
775 afu->modes_supported = CXL_MODE_DEDICATED;
776 else
777 afu->modes_supported = CXL_MODE_DIRECTED;
778
779 if ((rc = cxl_afu_select_best_mode(afu)))
780 goto err_put2;
781
782 adapter->afu[afu->slice] = afu;
783
784 afu->enabled = true;
785
786 return 0;
787
788err_put2:
789 cxl_sysfs_afu_remove(afu);
790err_put1:
791 device_unregister(&afu->dev);
792 free = false;
793 guest_release_serr_irq(afu);
794err2:
795 guest_unmap_slice_regs(afu);
796err1:
797 if (free) {
798 kfree(afu->guest);
799 kfree(afu);
800 }
801 return rc;
802}
803
804void cxl_guest_remove_afu(struct cxl_afu *afu)
805{
806 pr_devel("in %s - AFU(%d)\n", __func__, afu->slice);
807
808 if (!afu)
809 return;
810
811 cxl_sysfs_afu_remove(afu);
812
813 spin_lock(&afu->adapter->afu_list_lock);
814 afu->adapter->afu[afu->slice] = NULL;
815 spin_unlock(&afu->adapter->afu_list_lock);
816
817 cxl_context_detach_all(afu);
818 cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
819 guest_release_serr_irq(afu);
820 guest_unmap_slice_regs(afu);
821
822 device_unregister(&afu->dev);
823}
824
825static void free_adapter(struct cxl *adapter)
826{
827 struct irq_avail *cur;
828 int i;
829
830 if (adapter->guest->irq_avail) {
831 for (i = 0; i < adapter->guest->irq_nranges; i++) {
832 cur = &adapter->guest->irq_avail[i];
833 kfree(cur->bitmap);
834 }
835 kfree(adapter->guest->irq_avail);
836 }
837 kfree(adapter->guest->status);
838 cxl_remove_adapter_nr(adapter);
839 kfree(adapter->guest);
840 kfree(adapter);
841}
842
843static int properties_look_ok(struct cxl *adapter)
844{
845 /* The absence of this property means that the operational
846 * status is unknown or okay
847 */
848 if (strlen(adapter->guest->status) &&
849 strcmp(adapter->guest->status, "okay")) {
850 pr_err("ABORTING:Bad operational status of the device\n");
851 return -EINVAL;
852 }
853
854 return 0;
855}
856
857ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
858{
859 return guest_collect_vpd(adapter, NULL, buf, len);
860}
861
862void cxl_guest_remove_adapter(struct cxl *adapter)
863{
864 pr_devel("in %s\n", __func__);
865
866 cxl_sysfs_adapter_remove(adapter);
867
868 device_unregister(&adapter->dev);
869}
870
871static void release_adapter(struct device *dev)
872{
873 free_adapter(to_cxl_adapter(dev));
874}
875
876struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev)
877{
878 struct cxl *adapter;
879 bool free = true;
880 int rc;
881
882 if (!(adapter = cxl_alloc_adapter()))
883 return ERR_PTR(-ENOMEM);
884
885 if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) {
886 free_adapter(adapter);
887 return ERR_PTR(-ENOMEM);
888 }
889
890 adapter->slices = 0;
891 adapter->guest->pdev = pdev;
892 adapter->dev.parent = &pdev->dev;
893 adapter->dev.release = release_adapter;
894 dev_set_drvdata(&pdev->dev, adapter);
895
896 if ((rc = cxl_of_read_adapter_handle(adapter, np)))
897 goto err1;
898
899 if ((rc = cxl_of_read_adapter_properties(adapter, np)))
900 goto err1;
901
902 if ((rc = properties_look_ok(adapter)))
903 goto err1;
904
905 /*
906 * After we call this function we must not free the adapter directly,
907 * even if it returns an error!
908 */
909 if ((rc = cxl_register_adapter(adapter)))
910 goto err_put1;
911
912 if ((rc = cxl_sysfs_adapter_add(adapter)))
913 goto err_put1;
914
915 return adapter;
916
917err_put1:
918 device_unregister(&adapter->dev);
919 free = false;
920err1:
921 if (free)
922 free_adapter(adapter);
923 return ERR_PTR(rc);
924}
925
926const struct cxl_backend_ops cxl_guest_ops = {
927 .module = THIS_MODULE,
928 .adapter_reset = guest_reset,
929 .alloc_one_irq = guest_alloc_one_irq,
930 .release_one_irq = guest_release_one_irq,
931 .alloc_irq_ranges = guest_alloc_irq_ranges,
932 .release_irq_ranges = guest_release_irq_ranges,
933 .setup_irq = NULL,
934 .handle_psl_slice_error = guest_handle_psl_slice_error,
935 .psl_interrupt = guest_psl_irq,
936 .ack_irq = guest_ack_irq,
937 .attach_process = guest_attach_process,
938 .detach_process = guest_detach_process,
939 .link_ok = guest_link_ok,
940 .release_afu = guest_release_afu,
941 .afu_read_err_buffer = guest_afu_read_err_buffer,
942 .afu_check_and_enable = guest_afu_check_and_enable,
943 .afu_activate_mode = guest_afu_activate_mode,
944 .afu_deactivate_mode = guest_afu_deactivate_mode,
945 .afu_reset = guest_afu_reset,
946 .afu_cr_read8 = guest_afu_cr_read8,
947 .afu_cr_read16 = guest_afu_cr_read16,
948 .afu_cr_read32 = guest_afu_cr_read32,
949 .afu_cr_read64 = guest_afu_cr_read64,
950};
diff --git a/drivers/misc/cxl/hcalls.c b/drivers/misc/cxl/hcalls.c
index 7e4c517f928e..24131e2537c5 100644
--- a/drivers/misc/cxl/hcalls.c
+++ b/drivers/misc/cxl/hcalls.c
@@ -11,7 +11,6 @@
11#include <linux/compiler.h> 11#include <linux/compiler.h>
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <asm/hvcall.h>
15#include <asm/byteorder.h> 14#include <asm/byteorder.h>
16#include "hcalls.h" 15#include "hcalls.h"
17 16
diff --git a/drivers/misc/cxl/hcalls.h b/drivers/misc/cxl/hcalls.h
index 6bfab323578d..3e25522a5df6 100644
--- a/drivers/misc/cxl/hcalls.h
+++ b/drivers/misc/cxl/hcalls.h
@@ -12,6 +12,7 @@
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <asm/byteorder.h> 14#include <asm/byteorder.h>
15#include <asm/hvcall.h>
15#include "cxl.h" 16#include "cxl.h"
16 17
17#define SG_BUFFER_SIZE 4096 18#define SG_BUFFER_SIZE 4096
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index 927ba5a954f6..14b15835dc4a 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -285,9 +285,6 @@ static int __init init_cxl(void)
285{ 285{
286 int rc = 0; 286 int rc = 0;
287 287
288 if (!cpu_has_feature(CPU_FTR_HVMODE))
289 return -EPERM;
290
291 if ((rc = cxl_file_init())) 288 if ((rc = cxl_file_init()))
292 return rc; 289 return rc;
293 290
@@ -296,8 +293,17 @@ static int __init init_cxl(void)
296 if ((rc = register_cxl_calls(&cxl_calls))) 293 if ((rc = register_cxl_calls(&cxl_calls)))
297 goto err; 294 goto err;
298 295
299 cxl_ops = &cxl_native_ops; 296 if (cpu_has_feature(CPU_FTR_HVMODE)) {
300 if ((rc = pci_register_driver(&cxl_pci_driver))) 297 cxl_ops = &cxl_native_ops;
298 rc = pci_register_driver(&cxl_pci_driver);
299 }
300#ifdef CONFIG_PPC_PSERIES
301 else {
302 cxl_ops = &cxl_guest_ops;
303 rc = platform_driver_register(&cxl_of_driver);
304 }
305#endif
306 if (rc)
301 goto err1; 307 goto err1;
302 308
303 return 0; 309 return 0;
@@ -312,7 +318,12 @@ err:
312 318
313static void exit_cxl(void) 319static void exit_cxl(void)
314{ 320{
315 pci_unregister_driver(&cxl_pci_driver); 321 if (cpu_has_feature(CPU_FTR_HVMODE))
322 pci_unregister_driver(&cxl_pci_driver);
323#ifdef CONFIG_PPC_PSERIES
324 else
325 platform_driver_unregister(&cxl_of_driver);
326#endif
316 327
317 cxl_debugfs_exit(); 328 cxl_debugfs_exit();
318 cxl_file_exit(); 329 cxl_file_exit();
diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c
new file mode 100644
index 000000000000..edc458395f68
--- /dev/null
+++ b/drivers/misc/cxl/of.c
@@ -0,0 +1,513 @@
1/*
2 * Copyright 2015 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/platform_device.h>
13#include <linux/slab.h>
14#include <linux/of_address.h>
15#include <linux/of_platform.h>
16
17#include "cxl.h"
18
19
20static const __be32 *read_prop_string(const struct device_node *np,
21 const char *prop_name)
22{
23 const __be32 *prop;
24
25 prop = of_get_property(np, prop_name, NULL);
26 if (cxl_verbose && prop)
27 pr_info("%s: %s\n", prop_name, (char *) prop);
28 return prop;
29}
30
31static const __be32 *read_prop_dword(const struct device_node *np,
32 const char *prop_name, u32 *val)
33{
34 const __be32 *prop;
35
36 prop = of_get_property(np, prop_name, NULL);
37 if (prop)
38 *val = be32_to_cpu(prop[0]);
39 if (cxl_verbose && prop)
40 pr_info("%s: %#x (%u)\n", prop_name, *val, *val);
41 return prop;
42}
43
44static const __be64 *read_prop64_dword(const struct device_node *np,
45 const char *prop_name, u64 *val)
46{
47 const __be64 *prop;
48
49 prop = of_get_property(np, prop_name, NULL);
50 if (prop)
51 *val = be64_to_cpu(prop[0]);
52 if (cxl_verbose && prop)
53 pr_info("%s: %#llx (%llu)\n", prop_name, *val, *val);
54 return prop;
55}
56
57
58static int read_handle(struct device_node *np, u64 *handle)
59{
60 const __be32 *prop;
61 u64 size;
62
63 /* Get address and size of the node */
64 prop = of_get_address(np, 0, &size, NULL);
65 if (size)
66 return -EINVAL;
67
68 /* Helper to read a big number; size is in cells (not bytes) */
69 *handle = of_read_number(prop, of_n_addr_cells(np));
70 return 0;
71}
72
73static int read_phys_addr(struct device_node *np, char *prop_name,
74 struct cxl_afu *afu)
75{
76 int i, len, entry_size, naddr, nsize, type;
77 u64 addr, size;
78 const __be32 *prop;
79
80 naddr = of_n_addr_cells(np);
81 nsize = of_n_size_cells(np);
82
83 prop = of_get_property(np, prop_name, &len);
84 if (prop) {
85 entry_size = naddr + nsize;
86 for (i = 0; i < (len / 4); i += entry_size, prop += entry_size) {
87 type = be32_to_cpu(prop[0]);
88 addr = of_read_number(prop, naddr);
89 size = of_read_number(&prop[naddr], nsize);
90 switch (type) {
91 case 0: /* unit address */
92 afu->guest->handle = addr;
93 break;
94 case 1: /* p2 area */
95 afu->guest->p2n_phys += addr;
96 afu->guest->p2n_size = size;
97 break;
98 case 2: /* problem state area */
99 afu->psn_phys += addr;
100 afu->adapter->ps_size = size;
101 break;
102 default:
103 pr_err("Invalid address type %d found in %s property of AFU\n",
104 type, prop_name);
105 return -EINVAL;
106 }
107 if (cxl_verbose)
108 pr_info("%s: %#x %#llx (size %#llx)\n",
109 prop_name, type, addr, size);
110 }
111 }
112 return 0;
113}
114
115static int read_vpd(struct cxl *adapter, struct cxl_afu *afu)
116{
117 char vpd[256];
118 int rc;
119 size_t len = sizeof(vpd);
120
121 memset(vpd, 0, len);
122
123 if (adapter)
124 rc = cxl_guest_read_adapter_vpd(adapter, vpd, len);
125 else
126 rc = cxl_guest_read_afu_vpd(afu, vpd, len);
127
128 if (rc > 0) {
129 cxl_dump_debug_buffer(vpd, rc);
130 rc = 0;
131 }
132 return rc;
133}
134
135int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np)
136{
137 if (read_handle(afu_np, &afu->guest->handle))
138 return -EINVAL;
139 pr_devel("AFU handle: 0x%.16llx\n", afu->guest->handle);
140
141 return 0;
142}
143
144int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np)
145{
146 int i, len, rc;
147 char *p;
148 const __be32 *prop;
149 u16 device_id, vendor_id;
150 u32 val = 0, class_code;
151
152 /* Properties are read in the same order as listed in PAPR */
153
154 if (cxl_verbose) {
155 pr_info("Dump of the 'ibm,coherent-platform-function' node properties:\n");
156
157 prop = of_get_property(np, "compatible", &len);
158 i = 0;
159 while (i < len) {
160 p = (char *) prop + i;
161 pr_info("compatible: %s\n", p);
162 i += strlen(p) + 1;
163 }
164 read_prop_string(np, "name");
165 }
166
167 rc = read_phys_addr(np, "reg", afu);
168 if (rc)
169 return rc;
170
171 rc = read_phys_addr(np, "assigned-addresses", afu);
172 if (rc)
173 return rc;
174
175 if (afu->psn_phys == 0)
176 afu->psa = false;
177 else
178 afu->psa = true;
179
180 if (cxl_verbose) {
181 read_prop_string(np, "ibm,loc-code");
182 read_prop_string(np, "device_type");
183 }
184
185 read_prop_dword(np, "ibm,#processes", &afu->max_procs_virtualised);
186
187 if (cxl_verbose) {
188 read_prop_dword(np, "ibm,scratchpad-size", &val);
189 read_prop_dword(np, "ibm,programmable", &val);
190 read_prop_string(np, "ibm,phandle");
191 read_vpd(NULL, afu);
192 }
193
194 read_prop_dword(np, "ibm,max-ints-per-process", &afu->guest->max_ints);
195 afu->irqs_max = afu->guest->max_ints;
196
197 prop = read_prop_dword(np, "ibm,min-ints-per-process", &afu->pp_irqs);
198 if (prop) {
199 /* One extra interrupt for the PSL interrupt is already
200 * included. Remove it now to keep only AFU interrupts and
201 * match the native case.
202 */
203 afu->pp_irqs--;
204 }
205
206 if (cxl_verbose) {
207 read_prop_dword(np, "ibm,max-ints", &val);
208 read_prop_dword(np, "ibm,vpd-size", &val);
209 }
210
211 read_prop64_dword(np, "ibm,error-buffer-size", &afu->eb_len);
212 afu->eb_offset = 0;
213
214 if (cxl_verbose)
215 read_prop_dword(np, "ibm,config-record-type", &val);
216
217 read_prop64_dword(np, "ibm,config-record-size", &afu->crs_len);
218 afu->crs_offset = 0;
219
220 read_prop_dword(np, "ibm,#config-records", &afu->crs_num);
221
222 if (cxl_verbose) {
223 for (i = 0; i < afu->crs_num; i++) {
224 rc = cxl_ops->afu_cr_read16(afu, i, PCI_DEVICE_ID,
225 &device_id);
226 if (!rc)
227 pr_info("record %d - device-id: %#x\n",
228 i, device_id);
229 rc = cxl_ops->afu_cr_read16(afu, i, PCI_VENDOR_ID,
230 &vendor_id);
231 if (!rc)
232 pr_info("record %d - vendor-id: %#x\n",
233 i, vendor_id);
234 rc = cxl_ops->afu_cr_read32(afu, i, PCI_CLASS_REVISION,
235 &class_code);
236 if (!rc) {
237 class_code >>= 8;
238 pr_info("record %d - class-code: %#x\n",
239 i, class_code);
240 }
241 }
242
243 read_prop_dword(np, "ibm,function-number", &val);
244 read_prop_dword(np, "ibm,privileged-function", &val);
245 read_prop_dword(np, "vendor-id", &val);
246 read_prop_dword(np, "device-id", &val);
247 read_prop_dword(np, "revision-id", &val);
248 read_prop_dword(np, "class-code", &val);
249 read_prop_dword(np, "subsystem-vendor-id", &val);
250 read_prop_dword(np, "subsystem-id", &val);
251 }
252 /*
253 * if "ibm,process-mmio" doesn't exist then per-process mmio is
254 * not supported
255 */
256 val = 0;
257 prop = read_prop_dword(np, "ibm,process-mmio", &val);
258 if (prop && val == 1)
259 afu->pp_psa = true;
260 else
261 afu->pp_psa = false;
262
263 if (cxl_verbose) {
264 read_prop_dword(np, "ibm,supports-aur", &val);
265 read_prop_dword(np, "ibm,supports-csrp", &val);
266 read_prop_dword(np, "ibm,supports-prr", &val);
267 }
268
269 prop = read_prop_dword(np, "ibm,function-error-interrupt", &val);
270 if (prop)
271 afu->serr_hwirq = val;
272
273 pr_devel("AFU handle: %#llx\n", afu->guest->handle);
274 pr_devel("p2n_phys: %#llx (size %#llx)\n",
275 afu->guest->p2n_phys, afu->guest->p2n_size);
276 pr_devel("psn_phys: %#llx (size %#llx)\n",
277 afu->psn_phys, afu->adapter->ps_size);
278 pr_devel("Max number of processes virtualised=%i\n",
279 afu->max_procs_virtualised);
280 pr_devel("Per-process irqs min=%i, max=%i\n", afu->pp_irqs,
281 afu->irqs_max);
282 pr_devel("Slice error interrupt=%#lx\n", afu->serr_hwirq);
283
284 return 0;
285}
286
287static int read_adapter_irq_config(struct cxl *adapter, struct device_node *np)
288{
289 const __be32 *ranges;
290 int len, nranges, i;
291 struct irq_avail *cur;
292
293 ranges = of_get_property(np, "interrupt-ranges", &len);
294 if (ranges == NULL || len < (2 * sizeof(int)))
295 return -EINVAL;
296
297 /*
298 * encoded array of two cells per entry, each cell encoded as
299 * with encode-int
300 */
301 nranges = len / (2 * sizeof(int));
302 if (nranges == 0 || (nranges * 2 * sizeof(int)) != len)
303 return -EINVAL;
304
305 adapter->guest->irq_avail = kzalloc(nranges * sizeof(struct irq_avail),
306 GFP_KERNEL);
307 if (adapter->guest->irq_avail == NULL)
308 return -ENOMEM;
309
310 adapter->guest->irq_base_offset = be32_to_cpu(ranges[0]);
311 for (i = 0; i < nranges; i++) {
312 cur = &adapter->guest->irq_avail[i];
313 cur->offset = be32_to_cpu(ranges[i * 2]);
314 cur->range = be32_to_cpu(ranges[i * 2 + 1]);
315 cur->bitmap = kcalloc(BITS_TO_LONGS(cur->range),
316 sizeof(*cur->bitmap), GFP_KERNEL);
317 if (cur->bitmap == NULL)
318 goto err;
319 if (cur->offset < adapter->guest->irq_base_offset)
320 adapter->guest->irq_base_offset = cur->offset;
321 if (cxl_verbose)
322 pr_info("available IRQ range: %#lx-%#lx (%lu)\n",
323 cur->offset, cur->offset + cur->range - 1,
324 cur->range);
325 }
326 adapter->guest->irq_nranges = nranges;
327 spin_lock_init(&adapter->guest->irq_alloc_lock);
328
329 return 0;
330err:
331 for (i--; i >= 0; i--) {
332 cur = &adapter->guest->irq_avail[i];
333 kfree(cur->bitmap);
334 }
335 kfree(adapter->guest->irq_avail);
336 adapter->guest->irq_avail = NULL;
337 return -ENOMEM;
338}
339
340int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np)
341{
342 if (read_handle(np, &adapter->guest->handle))
343 return -EINVAL;
344 pr_devel("Adapter handle: 0x%.16llx\n", adapter->guest->handle);
345
346 return 0;
347}
348
349int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np)
350{
351 int rc, len, naddr, i;
352 char *p;
353 const __be32 *prop;
354 u32 val = 0;
355
356 /* Properties are read in the same order as listed in PAPR */
357
358 naddr = of_n_addr_cells(np);
359
360 if (cxl_verbose) {
361 pr_info("Dump of the 'ibm,coherent-platform-facility' node properties:\n");
362
363 read_prop_dword(np, "#address-cells", &val);
364 read_prop_dword(np, "#size-cells", &val);
365
366 prop = of_get_property(np, "compatible", &len);
367 i = 0;
368 while (i < len) {
369 p = (char *) prop + i;
370 pr_info("compatible: %s\n", p);
371 i += strlen(p) + 1;
372 }
373 read_prop_string(np, "name");
374 read_prop_string(np, "model");
375
376 prop = of_get_property(np, "reg", NULL);
377 if (prop) {
378 pr_info("reg: addr:%#llx size:%#x\n",
379 of_read_number(prop, naddr),
380 be32_to_cpu(prop[naddr]));
381 }
382
383 read_prop_string(np, "ibm,loc-code");
384 }
385
386 if ((rc = read_adapter_irq_config(adapter, np)))
387 return rc;
388
389 if (cxl_verbose) {
390 read_prop_string(np, "device_type");
391 read_prop_string(np, "ibm,phandle");
392 }
393
394 prop = read_prop_dword(np, "ibm,caia-version", &val);
395 if (prop) {
396 adapter->caia_major = (val & 0xFF00) >> 8;
397 adapter->caia_minor = val & 0xFF;
398 }
399
400 prop = read_prop_dword(np, "ibm,psl-revision", &val);
401 if (prop)
402 adapter->psl_rev = val;
403
404 prop = read_prop_string(np, "status");
405 if (prop) {
406 adapter->guest->status = kasprintf(GFP_KERNEL, "%s", (char *) prop);
407 if (adapter->guest->status == NULL)
408 return -ENOMEM;
409 }
410
411 prop = read_prop_dword(np, "vendor-id", &val);
412 if (prop)
413 adapter->guest->vendor = val;
414
415 prop = read_prop_dword(np, "device-id", &val);
416 if (prop)
417 adapter->guest->device = val;
418
419 if (cxl_verbose) {
420 read_prop_dword(np, "ibm,privileged-facility", &val);
421 read_prop_dword(np, "revision-id", &val);
422 read_prop_dword(np, "class-code", &val);
423 }
424
425 prop = read_prop_dword(np, "subsystem-vendor-id", &val);
426 if (prop)
427 adapter->guest->subsystem_vendor = val;
428
429 prop = read_prop_dword(np, "subsystem-id", &val);
430 if (prop)
431 adapter->guest->subsystem = val;
432
433 if (cxl_verbose)
434 read_vpd(adapter, NULL);
435
436 return 0;
437}
438
439static int cxl_of_remove(struct platform_device *pdev)
440{
441 struct cxl *adapter;
442 int afu;
443
444 adapter = dev_get_drvdata(&pdev->dev);
445 for (afu = 0; afu < adapter->slices; afu++)
446 cxl_guest_remove_afu(adapter->afu[afu]);
447
448 cxl_guest_remove_adapter(adapter);
449 return 0;
450}
451
452static void cxl_of_shutdown(struct platform_device *pdev)
453{
454 cxl_of_remove(pdev);
455}
456
457int cxl_of_probe(struct platform_device *pdev)
458{
459 struct device_node *np = NULL;
460 struct device_node *afu_np = NULL;
461 struct cxl *adapter = NULL;
462 int ret;
463 int slice, slice_ok;
464
465 pr_devel("in %s\n", __func__);
466
467 np = pdev->dev.of_node;
468 if (np == NULL)
469 return -ENODEV;
470
471 /* init adapter */
472 adapter = cxl_guest_init_adapter(np, pdev);
473 if (IS_ERR(adapter)) {
474 dev_err(&pdev->dev, "guest_init_adapter failed: %li\n", PTR_ERR(adapter));
475 return PTR_ERR(adapter);
476 }
477
478 /* init afu */
479 slice_ok = 0;
480 for (afu_np = NULL, slice = 0; (afu_np = of_get_next_child(np, afu_np)); slice++) {
481 if ((ret = cxl_guest_init_afu(adapter, slice, afu_np)))
482 dev_err(&pdev->dev, "AFU %i failed to initialise: %i\n",
483 slice, ret);
484 else
485 slice_ok++;
486 }
487
488 if (slice_ok == 0) {
489 dev_info(&pdev->dev, "No active AFU");
490 adapter->slices = 0;
491 }
492
493 if (afu_np)
494 of_node_put(afu_np);
495 return 0;
496}
497
498static const struct of_device_id cxl_of_match[] = {
499 { .compatible = "ibm,coherent-platform-facility",},
500 {},
501};
502MODULE_DEVICE_TABLE(of, cxl_of_match);
503
504struct platform_driver cxl_of_driver = {
505 .driver = {
506 .name = "cxl_of",
507 .of_match_table = cxl_of_match,
508 .owner = THIS_MODULE
509 },
510 .probe = cxl_of_probe,
511 .remove = cxl_of_remove,
512 .shutdown = cxl_of_shutdown,
513};