diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-19 18:38:41 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-19 18:38:41 -0400 |
commit | d5e2d00898bdfed9586472679760fc81a2ca2d02 (patch) | |
tree | 55ac781983bf7144230ad8a5a995ce02b6ac39a1 /drivers/misc | |
parent | 31e182363b39d84031eadf0caf6d99fd9eb056f0 (diff) | |
parent | 6e669f085d595cb6053920832c89f1a13067db44 (diff) |
Merge tag 'powerpc-4.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman:
"This was delayed a day or two by some build-breakage on old toolchains
which we've now fixed.
There's two PCI commits both acked by Bjorn.
There's one commit to mm/hugepage.c which is (co)authored by Kirill.
Highlights:
- Restructure Linux PTE on Book3S/64 to Radix format from Paul
Mackerras
- Book3s 64 MMU cleanup in preparation for Radix MMU from Aneesh
Kumar K.V
- Add POWER9 cputable entry from Michael Neuling
- FPU/Altivec/VSX save/restore optimisations from Cyril Bur
- Add support for new ftrace ABI on ppc64le from Torsten Duwe
Various cleanups & minor fixes from:
- Adam Buchbinder, Andrew Donnellan, Balbir Singh, Christophe Leroy,
Cyril Bur, Luis Henriques, Madhavan Srinivasan, Pan Xinhui, Russell
Currey, Sukadev Bhattiprolu, Suraj Jitindar Singh.
General:
- atomics: Allow architectures to define their own __atomic_op_*
helpers from Boqun Feng
- Implement atomic{, 64}_*_return_* variants and acquire/release/
relaxed variants for (cmp)xchg from Boqun Feng
- Add powernv_defconfig from Jeremy Kerr
- Fix BUG_ON() reporting in real mode from Balbir Singh
- Add xmon command to dump OPAL msglog from Andrew Donnellan
- Add xmon command to dump process/task similar to ps(1) from Douglas
Miller
- Clean up memory hotplug failure paths from David Gibson
pci/eeh:
- Redesign SR-IOV on PowerNV to give absolute isolation between VFs
from Wei Yang.
- EEH Support for SRIOV VFs from Wei Yang and Gavin Shan.
- PCI/IOV: Rename and export virtfn_{add, remove} from Wei Yang
- PCI: Add pcibios_bus_add_device() weak function from Wei Yang
- MAINTAINERS: Update EEH details and maintainership from Russell
Currey
cxl:
- Support added to the CXL driver for running on both bare-metal and
hypervisor systems, from Christophe Lombard and Frederic Barrat.
- Ignore probes for virtual afu pci devices from Vaibhav Jain
perf:
- Export Power8 generic and cache events to sysfs from Sukadev
Bhattiprolu
- hv-24x7: Fix usage with chip events, display change in counter
values, display domain indices in sysfs, eliminate domain suffix in
event names, from Sukadev Bhattiprolu
Freescale:
- Updates from Scott: "Highlights include 8xx optimizations, 32-bit
checksum optimizations, 86xx consolidation, e5500/e6500 cpu
hotplug, more fman and other dt bits, and minor fixes/cleanup"
* tag 'powerpc-4.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (179 commits)
powerpc: Fix unrecoverable SLB miss during restore_math()
powerpc/8xx: Fix do_mtspr_cpu6() build on older compilers
powerpc/rcpm: Fix build break when SMP=n
powerpc/book3e-64: Use hardcoded mttmr opcode
powerpc/fsl/dts: Add "jedec,spi-nor" flash compatible
powerpc/T104xRDB: add tdm riser card node to device tree
powerpc32: PAGE_EXEC required for inittext
powerpc/mpc85xx: Add pcsphy nodes to FManV3 device tree
powerpc/mpc85xx: Add MDIO bus muxing support to the board device tree(s)
powerpc/86xx: Introduce and use common dtsi
powerpc/86xx: Update device tree
powerpc/86xx: Move dts files to fsl directory
powerpc/86xx: Switch to kconfig fragments approach
powerpc/86xx: Update defconfigs
powerpc/86xx: Consolidate common platform code
powerpc32: Remove one insn in mulhdu
powerpc32: small optimisation in flush_icache_range()
powerpc: Simplify test in __dma_sync()
powerpc32: move xxxxx_dcache_range() functions inline
powerpc32: Remove clear_pages() and define clear_page() inline
...
Diffstat (limited to 'drivers/misc')
-rw-r--r-- | drivers/misc/cxl/Makefile | 1 | ||||
-rw-r--r-- | drivers/misc/cxl/api.c | 83 | ||||
-rw-r--r-- | drivers/misc/cxl/base.c | 32 | ||||
-rw-r--r-- | drivers/misc/cxl/context.c | 11 | ||||
-rw-r--r-- | drivers/misc/cxl/cxl.h | 288 | ||||
-rw-r--r-- | drivers/misc/cxl/debugfs.c | 4 | ||||
-rw-r--r-- | drivers/misc/cxl/fault.c | 25 | ||||
-rw-r--r-- | drivers/misc/cxl/file.c | 28 | ||||
-rw-r--r-- | drivers/misc/cxl/flash.c | 538 | ||||
-rw-r--r-- | drivers/misc/cxl/guest.c | 1177 | ||||
-rw-r--r-- | drivers/misc/cxl/hcalls.c | 647 | ||||
-rw-r--r-- | drivers/misc/cxl/hcalls.h | 204 | ||||
-rw-r--r-- | drivers/misc/cxl/irq.c | 309 | ||||
-rw-r--r-- | drivers/misc/cxl/main.c | 122 | ||||
-rw-r--r-- | drivers/misc/cxl/native.c | 469 | ||||
-rw-r--r-- | drivers/misc/cxl/of.c | 513 | ||||
-rw-r--r-- | drivers/misc/cxl/pci.c | 267 | ||||
-rw-r--r-- | drivers/misc/cxl/sysfs.c | 123 | ||||
-rw-r--r-- | drivers/misc/cxl/trace.h | 193 | ||||
-rw-r--r-- | drivers/misc/cxl/vphb.c | 167 |
20 files changed, 4469 insertions, 732 deletions
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile index be2ac5ce349f..8a55c1aa11aa 100644 --- a/drivers/misc/cxl/Makefile +++ b/drivers/misc/cxl/Makefile | |||
@@ -4,6 +4,7 @@ ccflags-$(CONFIG_PPC_WERROR) += -Werror | |||
4 | cxl-y += main.o file.o irq.o fault.o native.o | 4 | cxl-y += main.o file.o irq.o fault.o native.o |
5 | cxl-y += context.o sysfs.o debugfs.o pci.o trace.o | 5 | cxl-y += context.o sysfs.o debugfs.o pci.o trace.o |
6 | cxl-y += vphb.o api.o | 6 | cxl-y += vphb.o api.o |
7 | cxl-$(CONFIG_PPC_PSERIES) += flash.o guest.o of.o hcalls.o | ||
7 | obj-$(CONFIG_CXL) += cxl.o | 8 | obj-$(CONFIG_CXL) += cxl.o |
8 | obj-$(CONFIG_CXL_BASE) += base.o | 9 | obj-$(CONFIG_CXL_BASE) += base.o |
9 | 10 | ||
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index ea3eeb7011e1..2107c948406d 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c | |||
@@ -51,8 +51,6 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev) | |||
51 | if (rc) | 51 | if (rc) |
52 | goto err_mapping; | 52 | goto err_mapping; |
53 | 53 | ||
54 | cxl_assign_psn_space(ctx); | ||
55 | |||
56 | return ctx; | 54 | return ctx; |
57 | 55 | ||
58 | err_mapping: | 56 | err_mapping: |
@@ -78,7 +76,6 @@ struct device *cxl_get_phys_dev(struct pci_dev *dev) | |||
78 | 76 | ||
79 | return afu->adapter->dev.parent; | 77 | return afu->adapter->dev.parent; |
80 | } | 78 | } |
81 | EXPORT_SYMBOL_GPL(cxl_get_phys_dev); | ||
82 | 79 | ||
83 | int cxl_release_context(struct cxl_context *ctx) | 80 | int cxl_release_context(struct cxl_context *ctx) |
84 | { | 81 | { |
@@ -91,28 +88,11 @@ int cxl_release_context(struct cxl_context *ctx) | |||
91 | } | 88 | } |
92 | EXPORT_SYMBOL_GPL(cxl_release_context); | 89 | EXPORT_SYMBOL_GPL(cxl_release_context); |
93 | 90 | ||
94 | int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) | ||
95 | { | ||
96 | if (num == 0) | ||
97 | num = ctx->afu->pp_irqs; | ||
98 | return afu_allocate_irqs(ctx, num); | ||
99 | } | ||
100 | EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs); | ||
101 | |||
102 | void cxl_free_afu_irqs(struct cxl_context *ctx) | ||
103 | { | ||
104 | afu_irq_name_free(ctx); | ||
105 | cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); | ||
106 | } | ||
107 | EXPORT_SYMBOL_GPL(cxl_free_afu_irqs); | ||
108 | |||
109 | static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num) | 91 | static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num) |
110 | { | 92 | { |
111 | __u16 range; | 93 | __u16 range; |
112 | int r; | 94 | int r; |
113 | 95 | ||
114 | WARN_ON(num == 0); | ||
115 | |||
116 | for (r = 0; r < CXL_IRQ_RANGES; r++) { | 96 | for (r = 0; r < CXL_IRQ_RANGES; r++) { |
117 | range = ctx->irqs.range[r]; | 97 | range = ctx->irqs.range[r]; |
118 | if (num < range) { | 98 | if (num < range) { |
@@ -123,6 +103,44 @@ static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num) | |||
123 | return 0; | 103 | return 0; |
124 | } | 104 | } |
125 | 105 | ||
106 | int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) | ||
107 | { | ||
108 | int res; | ||
109 | irq_hw_number_t hwirq; | ||
110 | |||
111 | if (num == 0) | ||
112 | num = ctx->afu->pp_irqs; | ||
113 | res = afu_allocate_irqs(ctx, num); | ||
114 | if (!res && !cpu_has_feature(CPU_FTR_HVMODE)) { | ||
115 | /* In a guest, the PSL interrupt is not multiplexed. It was | ||
116 | * allocated above, and we need to set its handler | ||
117 | */ | ||
118 | hwirq = cxl_find_afu_irq(ctx, 0); | ||
119 | if (hwirq) | ||
120 | cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl"); | ||
121 | } | ||
122 | return res; | ||
123 | } | ||
124 | EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs); | ||
125 | |||
126 | void cxl_free_afu_irqs(struct cxl_context *ctx) | ||
127 | { | ||
128 | irq_hw_number_t hwirq; | ||
129 | unsigned int virq; | ||
130 | |||
131 | if (!cpu_has_feature(CPU_FTR_HVMODE)) { | ||
132 | hwirq = cxl_find_afu_irq(ctx, 0); | ||
133 | if (hwirq) { | ||
134 | virq = irq_find_mapping(NULL, hwirq); | ||
135 | if (virq) | ||
136 | cxl_unmap_irq(virq, ctx); | ||
137 | } | ||
138 | } | ||
139 | afu_irq_name_free(ctx); | ||
140 | cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); | ||
141 | } | ||
142 | EXPORT_SYMBOL_GPL(cxl_free_afu_irqs); | ||
143 | |||
126 | int cxl_map_afu_irq(struct cxl_context *ctx, int num, | 144 | int cxl_map_afu_irq(struct cxl_context *ctx, int num, |
127 | irq_handler_t handler, void *cookie, char *name) | 145 | irq_handler_t handler, void *cookie, char *name) |
128 | { | 146 | { |
@@ -178,7 +196,7 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed, | |||
178 | 196 | ||
179 | cxl_ctx_get(); | 197 | cxl_ctx_get(); |
180 | 198 | ||
181 | if ((rc = cxl_attach_process(ctx, kernel, wed , 0))) { | 199 | if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) { |
182 | put_pid(ctx->pid); | 200 | put_pid(ctx->pid); |
183 | cxl_ctx_put(); | 201 | cxl_ctx_put(); |
184 | goto out; | 202 | goto out; |
@@ -193,7 +211,7 @@ EXPORT_SYMBOL_GPL(cxl_start_context); | |||
193 | 211 | ||
194 | int cxl_process_element(struct cxl_context *ctx) | 212 | int cxl_process_element(struct cxl_context *ctx) |
195 | { | 213 | { |
196 | return ctx->pe; | 214 | return ctx->external_pe; |
197 | } | 215 | } |
198 | EXPORT_SYMBOL_GPL(cxl_process_element); | 216 | EXPORT_SYMBOL_GPL(cxl_process_element); |
199 | 217 | ||
@@ -207,7 +225,6 @@ EXPORT_SYMBOL_GPL(cxl_stop_context); | |||
207 | void cxl_set_master(struct cxl_context *ctx) | 225 | void cxl_set_master(struct cxl_context *ctx) |
208 | { | 226 | { |
209 | ctx->master = true; | 227 | ctx->master = true; |
210 | cxl_assign_psn_space(ctx); | ||
211 | } | 228 | } |
212 | EXPORT_SYMBOL_GPL(cxl_set_master); | 229 | EXPORT_SYMBOL_GPL(cxl_set_master); |
213 | 230 | ||
@@ -325,15 +342,11 @@ EXPORT_SYMBOL_GPL(cxl_start_work); | |||
325 | 342 | ||
326 | void __iomem *cxl_psa_map(struct cxl_context *ctx) | 343 | void __iomem *cxl_psa_map(struct cxl_context *ctx) |
327 | { | 344 | { |
328 | struct cxl_afu *afu = ctx->afu; | 345 | if (ctx->status != STARTED) |
329 | int rc; | ||
330 | |||
331 | rc = cxl_afu_check_and_enable(afu); | ||
332 | if (rc) | ||
333 | return NULL; | 346 | return NULL; |
334 | 347 | ||
335 | pr_devel("%s: psn_phys%llx size:%llx\n", | 348 | pr_devel("%s: psn_phys%llx size:%llx\n", |
336 | __func__, afu->psn_phys, afu->adapter->ps_size); | 349 | __func__, ctx->psn_phys, ctx->psn_size); |
337 | return ioremap(ctx->psn_phys, ctx->psn_size); | 350 | return ioremap(ctx->psn_phys, ctx->psn_size); |
338 | } | 351 | } |
339 | EXPORT_SYMBOL_GPL(cxl_psa_map); | 352 | EXPORT_SYMBOL_GPL(cxl_psa_map); |
@@ -349,11 +362,11 @@ int cxl_afu_reset(struct cxl_context *ctx) | |||
349 | struct cxl_afu *afu = ctx->afu; | 362 | struct cxl_afu *afu = ctx->afu; |
350 | int rc; | 363 | int rc; |
351 | 364 | ||
352 | rc = __cxl_afu_reset(afu); | 365 | rc = cxl_ops->afu_reset(afu); |
353 | if (rc) | 366 | if (rc) |
354 | return rc; | 367 | return rc; |
355 | 368 | ||
356 | return cxl_afu_check_and_enable(afu); | 369 | return cxl_ops->afu_check_and_enable(afu); |
357 | } | 370 | } |
358 | EXPORT_SYMBOL_GPL(cxl_afu_reset); | 371 | EXPORT_SYMBOL_GPL(cxl_afu_reset); |
359 | 372 | ||
@@ -363,3 +376,11 @@ void cxl_perst_reloads_same_image(struct cxl_afu *afu, | |||
363 | afu->adapter->perst_same_image = perst_reloads_same_image; | 376 | afu->adapter->perst_same_image = perst_reloads_same_image; |
364 | } | 377 | } |
365 | EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image); | 378 | EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image); |
379 | |||
380 | ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count) | ||
381 | { | ||
382 | struct cxl_afu *afu = cxl_pci_to_afu(dev); | ||
383 | |||
384 | return cxl_ops->read_adapter_vpd(afu->adapter, buf, count); | ||
385 | } | ||
386 | EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd); | ||
diff --git a/drivers/misc/cxl/base.c b/drivers/misc/cxl/base.c index a9f0dd3255a2..9b90ec6c07cd 100644 --- a/drivers/misc/cxl/base.c +++ b/drivers/misc/cxl/base.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/rcupdate.h> | 11 | #include <linux/rcupdate.h> |
12 | #include <asm/errno.h> | 12 | #include <asm/errno.h> |
13 | #include <misc/cxl-base.h> | 13 | #include <misc/cxl-base.h> |
14 | #include <linux/of_platform.h> | ||
14 | #include "cxl.h" | 15 | #include "cxl.h" |
15 | 16 | ||
16 | /* protected by rcu */ | 17 | /* protected by rcu */ |
@@ -84,3 +85,34 @@ void unregister_cxl_calls(struct cxl_calls *calls) | |||
84 | synchronize_rcu(); | 85 | synchronize_rcu(); |
85 | } | 86 | } |
86 | EXPORT_SYMBOL_GPL(unregister_cxl_calls); | 87 | EXPORT_SYMBOL_GPL(unregister_cxl_calls); |
88 | |||
89 | int cxl_update_properties(struct device_node *dn, | ||
90 | struct property *new_prop) | ||
91 | { | ||
92 | return of_update_property(dn, new_prop); | ||
93 | } | ||
94 | EXPORT_SYMBOL_GPL(cxl_update_properties); | ||
95 | |||
96 | static int __init cxl_base_init(void) | ||
97 | { | ||
98 | struct device_node *np = NULL; | ||
99 | struct platform_device *dev; | ||
100 | int count = 0; | ||
101 | |||
102 | /* | ||
103 | * Scan for compatible devices in guest only | ||
104 | */ | ||
105 | if (cpu_has_feature(CPU_FTR_HVMODE)) | ||
106 | return 0; | ||
107 | |||
108 | while ((np = of_find_compatible_node(np, NULL, | ||
109 | "ibm,coherent-platform-facility"))) { | ||
110 | dev = of_platform_device_create(np, NULL, NULL); | ||
111 | if (dev) | ||
112 | count++; | ||
113 | } | ||
114 | pr_devel("Found %d cxl device(s)\n", count); | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | module_init(cxl_base_init); | ||
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 262b88eac414..10370f280500 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c | |||
@@ -95,7 +95,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, | |||
95 | return i; | 95 | return i; |
96 | 96 | ||
97 | ctx->pe = i; | 97 | ctx->pe = i; |
98 | ctx->elem = &ctx->afu->spa[i]; | 98 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
99 | ctx->elem = &ctx->afu->native->spa[i]; | ||
100 | ctx->external_pe = ctx->pe; | ||
101 | } else { | ||
102 | ctx->external_pe = -1; /* assigned when attaching */ | ||
103 | } | ||
99 | ctx->pe_inserted = false; | 104 | ctx->pe_inserted = false; |
100 | 105 | ||
101 | /* | 106 | /* |
@@ -214,8 +219,8 @@ int __detach_context(struct cxl_context *ctx) | |||
214 | /* Only warn if we detached while the link was OK. | 219 | /* Only warn if we detached while the link was OK. |
215 | * If detach fails when hw is down, we don't care. | 220 | * If detach fails when hw is down, we don't care. |
216 | */ | 221 | */ |
217 | WARN_ON(cxl_detach_process(ctx) && | 222 | WARN_ON(cxl_ops->detach_process(ctx) && |
218 | cxl_adapter_link_ok(ctx->afu->adapter)); | 223 | cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)); |
219 | flush_work(&ctx->fault_work); /* Only needed for dedicated process */ | 224 | flush_work(&ctx->fault_work); /* Only needed for dedicated process */ |
220 | 225 | ||
221 | /* release the reference to the group leader and mm handling pid */ | 226 | /* release the reference to the group leader and mm handling pid */ |
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index a521bc72cec2..38e21cf7806e 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h | |||
@@ -324,6 +324,10 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; | |||
324 | #define CXL_MODE_TIME_SLICED 0x4 | 324 | #define CXL_MODE_TIME_SLICED 0x4 |
325 | #define CXL_SUPPORTED_MODES (CXL_MODE_DEDICATED | CXL_MODE_DIRECTED) | 325 | #define CXL_SUPPORTED_MODES (CXL_MODE_DEDICATED | CXL_MODE_DIRECTED) |
326 | 326 | ||
327 | #define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */ | ||
328 | #define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS) | ||
329 | #define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS) | ||
330 | |||
327 | enum cxl_context_status { | 331 | enum cxl_context_status { |
328 | CLOSED, | 332 | CLOSED, |
329 | OPENED, | 333 | OPENED, |
@@ -336,6 +340,12 @@ enum prefault_modes { | |||
336 | CXL_PREFAULT_ALL, | 340 | CXL_PREFAULT_ALL, |
337 | }; | 341 | }; |
338 | 342 | ||
343 | enum cxl_attrs { | ||
344 | CXL_ADAPTER_ATTRS, | ||
345 | CXL_AFU_MASTER_ATTRS, | ||
346 | CXL_AFU_ATTRS, | ||
347 | }; | ||
348 | |||
339 | struct cxl_sste { | 349 | struct cxl_sste { |
340 | __be64 esid_data; | 350 | __be64 esid_data; |
341 | __be64 vsid_data; | 351 | __be64 vsid_data; |
@@ -344,18 +354,46 @@ struct cxl_sste { | |||
344 | #define to_cxl_adapter(d) container_of(d, struct cxl, dev) | 354 | #define to_cxl_adapter(d) container_of(d, struct cxl, dev) |
345 | #define to_cxl_afu(d) container_of(d, struct cxl_afu, dev) | 355 | #define to_cxl_afu(d) container_of(d, struct cxl_afu, dev) |
346 | 356 | ||
347 | struct cxl_afu { | 357 | struct cxl_afu_native { |
358 | void __iomem *p1n_mmio; | ||
359 | void __iomem *afu_desc_mmio; | ||
348 | irq_hw_number_t psl_hwirq; | 360 | irq_hw_number_t psl_hwirq; |
361 | unsigned int psl_virq; | ||
362 | struct mutex spa_mutex; | ||
363 | /* | ||
364 | * Only the first part of the SPA is used for the process element | ||
365 | * linked list. The only other part that software needs to worry about | ||
366 | * is sw_command_status, which we store a separate pointer to. | ||
367 | * Everything else in the SPA is only used by hardware | ||
368 | */ | ||
369 | struct cxl_process_element *spa; | ||
370 | __be64 *sw_command_status; | ||
371 | unsigned int spa_size; | ||
372 | int spa_order; | ||
373 | int spa_max_procs; | ||
374 | u64 pp_offset; | ||
375 | }; | ||
376 | |||
377 | struct cxl_afu_guest { | ||
378 | u64 handle; | ||
379 | phys_addr_t p2n_phys; | ||
380 | u64 p2n_size; | ||
381 | int max_ints; | ||
382 | struct mutex recovery_lock; | ||
383 | int previous_state; | ||
384 | }; | ||
385 | |||
386 | struct cxl_afu { | ||
387 | struct cxl_afu_native *native; | ||
388 | struct cxl_afu_guest *guest; | ||
349 | irq_hw_number_t serr_hwirq; | 389 | irq_hw_number_t serr_hwirq; |
350 | char *err_irq_name; | ||
351 | char *psl_irq_name; | ||
352 | unsigned int serr_virq; | 390 | unsigned int serr_virq; |
353 | void __iomem *p1n_mmio; | 391 | char *psl_irq_name; |
392 | char *err_irq_name; | ||
354 | void __iomem *p2n_mmio; | 393 | void __iomem *p2n_mmio; |
355 | phys_addr_t psn_phys; | 394 | phys_addr_t psn_phys; |
356 | u64 pp_offset; | ||
357 | u64 pp_size; | 395 | u64 pp_size; |
358 | void __iomem *afu_desc_mmio; | 396 | |
359 | struct cxl *adapter; | 397 | struct cxl *adapter; |
360 | struct device dev; | 398 | struct device dev; |
361 | struct cdev afu_cdev_s, afu_cdev_m, afu_cdev_d; | 399 | struct cdev afu_cdev_s, afu_cdev_m, afu_cdev_d; |
@@ -363,26 +401,12 @@ struct cxl_afu { | |||
363 | struct idr contexts_idr; | 401 | struct idr contexts_idr; |
364 | struct dentry *debugfs; | 402 | struct dentry *debugfs; |
365 | struct mutex contexts_lock; | 403 | struct mutex contexts_lock; |
366 | struct mutex spa_mutex; | ||
367 | spinlock_t afu_cntl_lock; | 404 | spinlock_t afu_cntl_lock; |
368 | 405 | ||
369 | /* AFU error buffer fields and bin attribute for sysfs */ | 406 | /* AFU error buffer fields and bin attribute for sysfs */ |
370 | u64 eb_len, eb_offset; | 407 | u64 eb_len, eb_offset; |
371 | struct bin_attribute attr_eb; | 408 | struct bin_attribute attr_eb; |
372 | 409 | ||
373 | /* | ||
374 | * Only the first part of the SPA is used for the process element | ||
375 | * linked list. The only other part that software needs to worry about | ||
376 | * is sw_command_status, which we store a separate pointer to. | ||
377 | * Everything else in the SPA is only used by hardware | ||
378 | */ | ||
379 | struct cxl_process_element *spa; | ||
380 | __be64 *sw_command_status; | ||
381 | unsigned int spa_size; | ||
382 | int spa_order; | ||
383 | int spa_max_procs; | ||
384 | unsigned int psl_virq; | ||
385 | |||
386 | /* pointer to the vphb */ | 410 | /* pointer to the vphb */ |
387 | struct pci_controller *phb; | 411 | struct pci_controller *phb; |
388 | 412 | ||
@@ -421,6 +445,12 @@ struct cxl_irq_name { | |||
421 | char *name; | 445 | char *name; |
422 | }; | 446 | }; |
423 | 447 | ||
448 | struct irq_avail { | ||
449 | irq_hw_number_t offset; | ||
450 | irq_hw_number_t range; | ||
451 | unsigned long *bitmap; | ||
452 | }; | ||
453 | |||
424 | /* | 454 | /* |
425 | * This is a cxl context. If the PSL is in dedicated mode, there will be one | 455 | * This is a cxl context. If the PSL is in dedicated mode, there will be one |
426 | * of these per AFU. If in AFU directed there can be lots of these. | 456 | * of these per AFU. If in AFU directed there can be lots of these. |
@@ -476,7 +506,19 @@ struct cxl_context { | |||
476 | 506 | ||
477 | struct cxl_process_element *elem; | 507 | struct cxl_process_element *elem; |
478 | 508 | ||
479 | int pe; /* process element handle */ | 509 | /* |
510 | * pe is the process element handle, assigned by this driver when the | ||
511 | * context is initialized. | ||
512 | * | ||
513 | * external_pe is the PE shown outside of cxl. | ||
514 | * On bare-metal, pe=external_pe, because we decide what the handle is. | ||
515 | * In a guest, we only find out about the pe used by pHyp when the | ||
516 | * context is attached, and that's the value we want to report outside | ||
517 | * of cxl. | ||
518 | */ | ||
519 | int pe; | ||
520 | int external_pe; | ||
521 | |||
480 | u32 irq_count; | 522 | u32 irq_count; |
481 | bool pe_inserted; | 523 | bool pe_inserted; |
482 | bool master; | 524 | bool master; |
@@ -488,11 +530,34 @@ struct cxl_context { | |||
488 | struct rcu_head rcu; | 530 | struct rcu_head rcu; |
489 | }; | 531 | }; |
490 | 532 | ||
491 | struct cxl { | 533 | struct cxl_native { |
534 | u64 afu_desc_off; | ||
535 | u64 afu_desc_size; | ||
492 | void __iomem *p1_mmio; | 536 | void __iomem *p1_mmio; |
493 | void __iomem *p2_mmio; | 537 | void __iomem *p2_mmio; |
494 | irq_hw_number_t err_hwirq; | 538 | irq_hw_number_t err_hwirq; |
495 | unsigned int err_virq; | 539 | unsigned int err_virq; |
540 | u64 ps_off; | ||
541 | }; | ||
542 | |||
543 | struct cxl_guest { | ||
544 | struct platform_device *pdev; | ||
545 | int irq_nranges; | ||
546 | struct cdev cdev; | ||
547 | irq_hw_number_t irq_base_offset; | ||
548 | struct irq_avail *irq_avail; | ||
549 | spinlock_t irq_alloc_lock; | ||
550 | u64 handle; | ||
551 | char *status; | ||
552 | u16 vendor; | ||
553 | u16 device; | ||
554 | u16 subsystem_vendor; | ||
555 | u16 subsystem; | ||
556 | }; | ||
557 | |||
558 | struct cxl { | ||
559 | struct cxl_native *native; | ||
560 | struct cxl_guest *guest; | ||
496 | spinlock_t afu_list_lock; | 561 | spinlock_t afu_list_lock; |
497 | struct cxl_afu *afu[CXL_MAX_SLICES]; | 562 | struct cxl_afu *afu[CXL_MAX_SLICES]; |
498 | struct device dev; | 563 | struct device dev; |
@@ -503,9 +568,6 @@ struct cxl { | |||
503 | struct bin_attribute cxl_attr; | 568 | struct bin_attribute cxl_attr; |
504 | int adapter_num; | 569 | int adapter_num; |
505 | int user_irqs; | 570 | int user_irqs; |
506 | u64 afu_desc_off; | ||
507 | u64 afu_desc_size; | ||
508 | u64 ps_off; | ||
509 | u64 ps_size; | 571 | u64 ps_size; |
510 | u16 psl_rev; | 572 | u16 psl_rev; |
511 | u16 base_image; | 573 | u16 base_image; |
@@ -519,13 +581,15 @@ struct cxl { | |||
519 | bool perst_same_image; | 581 | bool perst_same_image; |
520 | }; | 582 | }; |
521 | 583 | ||
522 | int cxl_alloc_one_irq(struct cxl *adapter); | 584 | int cxl_pci_alloc_one_irq(struct cxl *adapter); |
523 | void cxl_release_one_irq(struct cxl *adapter, int hwirq); | 585 | void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq); |
524 | int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num); | 586 | int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num); |
525 | void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter); | 587 | void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter); |
526 | int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq); | 588 | int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq); |
527 | int cxl_update_image_control(struct cxl *adapter); | 589 | int cxl_update_image_control(struct cxl *adapter); |
528 | int cxl_reset(struct cxl *adapter); | 590 | int cxl_pci_reset(struct cxl *adapter); |
591 | void cxl_pci_release_afu(struct device *dev); | ||
592 | ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len); | ||
529 | 593 | ||
530 | /* common == phyp + powernv */ | 594 | /* common == phyp + powernv */ |
531 | struct cxl_process_element_common { | 595 | struct cxl_process_element_common { |
@@ -555,29 +619,32 @@ struct cxl_process_element { | |||
555 | __be32 software_state; | 619 | __be32 software_state; |
556 | } __packed; | 620 | } __packed; |
557 | 621 | ||
558 | static inline bool cxl_adapter_link_ok(struct cxl *cxl) | 622 | static inline bool cxl_adapter_link_ok(struct cxl *cxl, struct cxl_afu *afu) |
559 | { | 623 | { |
560 | struct pci_dev *pdev; | 624 | struct pci_dev *pdev; |
561 | 625 | ||
562 | pdev = to_pci_dev(cxl->dev.parent); | 626 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
563 | return !pci_channel_offline(pdev); | 627 | pdev = to_pci_dev(cxl->dev.parent); |
628 | return !pci_channel_offline(pdev); | ||
629 | } | ||
630 | return true; | ||
564 | } | 631 | } |
565 | 632 | ||
566 | static inline void __iomem *_cxl_p1_addr(struct cxl *cxl, cxl_p1_reg_t reg) | 633 | static inline void __iomem *_cxl_p1_addr(struct cxl *cxl, cxl_p1_reg_t reg) |
567 | { | 634 | { |
568 | WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE)); | 635 | WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE)); |
569 | return cxl->p1_mmio + cxl_reg_off(reg); | 636 | return cxl->native->p1_mmio + cxl_reg_off(reg); |
570 | } | 637 | } |
571 | 638 | ||
572 | static inline void cxl_p1_write(struct cxl *cxl, cxl_p1_reg_t reg, u64 val) | 639 | static inline void cxl_p1_write(struct cxl *cxl, cxl_p1_reg_t reg, u64 val) |
573 | { | 640 | { |
574 | if (likely(cxl_adapter_link_ok(cxl))) | 641 | if (likely(cxl_adapter_link_ok(cxl, NULL))) |
575 | out_be64(_cxl_p1_addr(cxl, reg), val); | 642 | out_be64(_cxl_p1_addr(cxl, reg), val); |
576 | } | 643 | } |
577 | 644 | ||
578 | static inline u64 cxl_p1_read(struct cxl *cxl, cxl_p1_reg_t reg) | 645 | static inline u64 cxl_p1_read(struct cxl *cxl, cxl_p1_reg_t reg) |
579 | { | 646 | { |
580 | if (likely(cxl_adapter_link_ok(cxl))) | 647 | if (likely(cxl_adapter_link_ok(cxl, NULL))) |
581 | return in_be64(_cxl_p1_addr(cxl, reg)); | 648 | return in_be64(_cxl_p1_addr(cxl, reg)); |
582 | else | 649 | else |
583 | return ~0ULL; | 650 | return ~0ULL; |
@@ -586,18 +653,18 @@ static inline u64 cxl_p1_read(struct cxl *cxl, cxl_p1_reg_t reg) | |||
586 | static inline void __iomem *_cxl_p1n_addr(struct cxl_afu *afu, cxl_p1n_reg_t reg) | 653 | static inline void __iomem *_cxl_p1n_addr(struct cxl_afu *afu, cxl_p1n_reg_t reg) |
587 | { | 654 | { |
588 | WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE)); | 655 | WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE)); |
589 | return afu->p1n_mmio + cxl_reg_off(reg); | 656 | return afu->native->p1n_mmio + cxl_reg_off(reg); |
590 | } | 657 | } |
591 | 658 | ||
592 | static inline void cxl_p1n_write(struct cxl_afu *afu, cxl_p1n_reg_t reg, u64 val) | 659 | static inline void cxl_p1n_write(struct cxl_afu *afu, cxl_p1n_reg_t reg, u64 val) |
593 | { | 660 | { |
594 | if (likely(cxl_adapter_link_ok(afu->adapter))) | 661 | if (likely(cxl_adapter_link_ok(afu->adapter, afu))) |
595 | out_be64(_cxl_p1n_addr(afu, reg), val); | 662 | out_be64(_cxl_p1n_addr(afu, reg), val); |
596 | } | 663 | } |
597 | 664 | ||
598 | static inline u64 cxl_p1n_read(struct cxl_afu *afu, cxl_p1n_reg_t reg) | 665 | static inline u64 cxl_p1n_read(struct cxl_afu *afu, cxl_p1n_reg_t reg) |
599 | { | 666 | { |
600 | if (likely(cxl_adapter_link_ok(afu->adapter))) | 667 | if (likely(cxl_adapter_link_ok(afu->adapter, afu))) |
601 | return in_be64(_cxl_p1n_addr(afu, reg)); | 668 | return in_be64(_cxl_p1n_addr(afu, reg)); |
602 | else | 669 | else |
603 | return ~0ULL; | 670 | return ~0ULL; |
@@ -610,39 +677,19 @@ static inline void __iomem *_cxl_p2n_addr(struct cxl_afu *afu, cxl_p2n_reg_t reg | |||
610 | 677 | ||
611 | static inline void cxl_p2n_write(struct cxl_afu *afu, cxl_p2n_reg_t reg, u64 val) | 678 | static inline void cxl_p2n_write(struct cxl_afu *afu, cxl_p2n_reg_t reg, u64 val) |
612 | { | 679 | { |
613 | if (likely(cxl_adapter_link_ok(afu->adapter))) | 680 | if (likely(cxl_adapter_link_ok(afu->adapter, afu))) |
614 | out_be64(_cxl_p2n_addr(afu, reg), val); | 681 | out_be64(_cxl_p2n_addr(afu, reg), val); |
615 | } | 682 | } |
616 | 683 | ||
617 | static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg) | 684 | static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg) |
618 | { | 685 | { |
619 | if (likely(cxl_adapter_link_ok(afu->adapter))) | 686 | if (likely(cxl_adapter_link_ok(afu->adapter, afu))) |
620 | return in_be64(_cxl_p2n_addr(afu, reg)); | 687 | return in_be64(_cxl_p2n_addr(afu, reg)); |
621 | else | 688 | else |
622 | return ~0ULL; | 689 | return ~0ULL; |
623 | } | 690 | } |
624 | 691 | ||
625 | static inline u64 cxl_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off) | 692 | ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, |
626 | { | ||
627 | if (likely(cxl_adapter_link_ok(afu->adapter))) | ||
628 | return in_le64((afu)->afu_desc_mmio + (afu)->crs_offset + | ||
629 | ((cr) * (afu)->crs_len) + (off)); | ||
630 | else | ||
631 | return ~0ULL; | ||
632 | } | ||
633 | |||
634 | static inline u32 cxl_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off) | ||
635 | { | ||
636 | if (likely(cxl_adapter_link_ok(afu->adapter))) | ||
637 | return in_le32((afu)->afu_desc_mmio + (afu)->crs_offset + | ||
638 | ((cr) * (afu)->crs_len) + (off)); | ||
639 | else | ||
640 | return 0xffffffff; | ||
641 | } | ||
642 | u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off); | ||
643 | u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off); | ||
644 | |||
645 | ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf, | ||
646 | loff_t off, size_t count); | 693 | loff_t off, size_t count); |
647 | 694 | ||
648 | 695 | ||
@@ -652,13 +699,14 @@ struct cxl_calls { | |||
652 | }; | 699 | }; |
653 | int register_cxl_calls(struct cxl_calls *calls); | 700 | int register_cxl_calls(struct cxl_calls *calls); |
654 | void unregister_cxl_calls(struct cxl_calls *calls); | 701 | void unregister_cxl_calls(struct cxl_calls *calls); |
702 | int cxl_update_properties(struct device_node *dn, struct property *new_prop); | ||
655 | 703 | ||
656 | int cxl_alloc_adapter_nr(struct cxl *adapter); | ||
657 | void cxl_remove_adapter_nr(struct cxl *adapter); | 704 | void cxl_remove_adapter_nr(struct cxl *adapter); |
658 | 705 | ||
659 | int cxl_alloc_spa(struct cxl_afu *afu); | 706 | int cxl_alloc_spa(struct cxl_afu *afu); |
660 | void cxl_release_spa(struct cxl_afu *afu); | 707 | void cxl_release_spa(struct cxl_afu *afu); |
661 | 708 | ||
709 | dev_t cxl_get_dev(void); | ||
662 | int cxl_file_init(void); | 710 | int cxl_file_init(void); |
663 | void cxl_file_exit(void); | 711 | void cxl_file_exit(void); |
664 | int cxl_register_adapter(struct cxl *adapter); | 712 | int cxl_register_adapter(struct cxl *adapter); |
@@ -679,21 +727,19 @@ void cxl_sysfs_afu_remove(struct cxl_afu *afu); | |||
679 | int cxl_sysfs_afu_m_add(struct cxl_afu *afu); | 727 | int cxl_sysfs_afu_m_add(struct cxl_afu *afu); |
680 | void cxl_sysfs_afu_m_remove(struct cxl_afu *afu); | 728 | void cxl_sysfs_afu_m_remove(struct cxl_afu *afu); |
681 | 729 | ||
682 | int cxl_afu_activate_mode(struct cxl_afu *afu, int mode); | 730 | struct cxl *cxl_alloc_adapter(void); |
683 | int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode); | 731 | struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice); |
684 | int cxl_afu_deactivate_mode(struct cxl_afu *afu); | ||
685 | int cxl_afu_select_best_mode(struct cxl_afu *afu); | 732 | int cxl_afu_select_best_mode(struct cxl_afu *afu); |
686 | 733 | ||
687 | int cxl_register_psl_irq(struct cxl_afu *afu); | 734 | int cxl_native_register_psl_irq(struct cxl_afu *afu); |
688 | void cxl_release_psl_irq(struct cxl_afu *afu); | 735 | void cxl_native_release_psl_irq(struct cxl_afu *afu); |
689 | int cxl_register_psl_err_irq(struct cxl *adapter); | 736 | int cxl_native_register_psl_err_irq(struct cxl *adapter); |
690 | void cxl_release_psl_err_irq(struct cxl *adapter); | 737 | void cxl_native_release_psl_err_irq(struct cxl *adapter); |
691 | int cxl_register_serr_irq(struct cxl_afu *afu); | 738 | int cxl_native_register_serr_irq(struct cxl_afu *afu); |
692 | void cxl_release_serr_irq(struct cxl_afu *afu); | 739 | void cxl_native_release_serr_irq(struct cxl_afu *afu); |
693 | int afu_register_irqs(struct cxl_context *ctx, u32 count); | 740 | int afu_register_irqs(struct cxl_context *ctx, u32 count); |
694 | void afu_release_irqs(struct cxl_context *ctx, void *cookie); | 741 | void afu_release_irqs(struct cxl_context *ctx, void *cookie); |
695 | void afu_irq_name_free(struct cxl_context *ctx); | 742 | void afu_irq_name_free(struct cxl_context *ctx); |
696 | irqreturn_t cxl_slice_irq_err(int irq, void *data); | ||
697 | 743 | ||
698 | int cxl_debugfs_init(void); | 744 | int cxl_debugfs_init(void); |
699 | void cxl_debugfs_exit(void); | 745 | void cxl_debugfs_exit(void); |
@@ -707,6 +753,7 @@ void cxl_prefault(struct cxl_context *ctx, u64 wed); | |||
707 | 753 | ||
708 | struct cxl *get_cxl_adapter(int num); | 754 | struct cxl *get_cxl_adapter(int num); |
709 | int cxl_alloc_sst(struct cxl_context *ctx); | 755 | int cxl_alloc_sst(struct cxl_context *ctx); |
756 | void cxl_dump_debug_buffer(void *addr, size_t size); | ||
710 | 757 | ||
711 | void init_cxl_native(void); | 758 | void init_cxl_native(void); |
712 | 759 | ||
@@ -720,40 +767,54 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, | |||
720 | void cxl_unmap_irq(unsigned int virq, void *cookie); | 767 | void cxl_unmap_irq(unsigned int virq, void *cookie); |
721 | int __detach_context(struct cxl_context *ctx); | 768 | int __detach_context(struct cxl_context *ctx); |
722 | 769 | ||
723 | /* This matches the layout of the H_COLLECT_CA_INT_INFO retbuf */ | 770 | /* |
771 | * This must match the layout of the H_COLLECT_CA_INT_INFO retbuf defined | ||
772 | * in PAPR. | ||
773 | * A word about endianness: a pointer to this structure is passed when | ||
774 | * calling the hcall. However, it is not a block of memory filled up by | ||
775 | * the hypervisor. The return values are found in registers, and copied | ||
776 | * one by one when returning from the hcall. See the end of the call to | ||
777 | * plpar_hcall9() in hvCall.S | ||
778 | * As a consequence: | ||
779 | * - we don't need to do any endianness conversion | ||
780 | * - the pid and tid are an exception. They are 32-bit values returned in | ||
781 | * the same 64-bit register. So we do need to worry about byte ordering. | ||
782 | */ | ||
724 | struct cxl_irq_info { | 783 | struct cxl_irq_info { |
725 | u64 dsisr; | 784 | u64 dsisr; |
726 | u64 dar; | 785 | u64 dar; |
727 | u64 dsr; | 786 | u64 dsr; |
787 | #ifndef CONFIG_CPU_LITTLE_ENDIAN | ||
728 | u32 pid; | 788 | u32 pid; |
729 | u32 tid; | 789 | u32 tid; |
790 | #else | ||
791 | u32 tid; | ||
792 | u32 pid; | ||
793 | #endif | ||
730 | u64 afu_err; | 794 | u64 afu_err; |
731 | u64 errstat; | 795 | u64 errstat; |
732 | u64 padding[3]; /* to match the expected retbuf size for plpar_hcall9 */ | 796 | u64 proc_handle; |
797 | u64 padding[2]; /* to match the expected retbuf size for plpar_hcall9 */ | ||
733 | }; | 798 | }; |
734 | 799 | ||
735 | void cxl_assign_psn_space(struct cxl_context *ctx); | 800 | void cxl_assign_psn_space(struct cxl_context *ctx); |
736 | int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, | 801 | irqreturn_t cxl_irq(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info); |
737 | u64 amr); | 802 | int cxl_register_one_irq(struct cxl *adapter, irq_handler_t handler, |
738 | int cxl_detach_process(struct cxl_context *ctx); | 803 | void *cookie, irq_hw_number_t *dest_hwirq, |
739 | 804 | unsigned int *dest_virq, const char *name); | |
740 | int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info); | ||
741 | int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask); | ||
742 | 805 | ||
743 | int cxl_check_error(struct cxl_afu *afu); | 806 | int cxl_check_error(struct cxl_afu *afu); |
744 | int cxl_afu_slbia(struct cxl_afu *afu); | 807 | int cxl_afu_slbia(struct cxl_afu *afu); |
745 | int cxl_tlb_slb_invalidate(struct cxl *adapter); | 808 | int cxl_tlb_slb_invalidate(struct cxl *adapter); |
746 | int cxl_afu_disable(struct cxl_afu *afu); | 809 | int cxl_afu_disable(struct cxl_afu *afu); |
747 | int __cxl_afu_reset(struct cxl_afu *afu); | ||
748 | int cxl_afu_check_and_enable(struct cxl_afu *afu); | ||
749 | int cxl_psl_purge(struct cxl_afu *afu); | 810 | int cxl_psl_purge(struct cxl_afu *afu); |
750 | 811 | ||
751 | void cxl_stop_trace(struct cxl *cxl); | 812 | void cxl_stop_trace(struct cxl *cxl); |
752 | int cxl_pci_vphb_add(struct cxl_afu *afu); | 813 | int cxl_pci_vphb_add(struct cxl_afu *afu); |
753 | void cxl_pci_vphb_reconfigure(struct cxl_afu *afu); | ||
754 | void cxl_pci_vphb_remove(struct cxl_afu *afu); | 814 | void cxl_pci_vphb_remove(struct cxl_afu *afu); |
755 | 815 | ||
756 | extern struct pci_driver cxl_pci_driver; | 816 | extern struct pci_driver cxl_pci_driver; |
817 | extern struct platform_driver cxl_of_driver; | ||
757 | int afu_allocate_irqs(struct cxl_context *ctx, u32 count); | 818 | int afu_allocate_irqs(struct cxl_context *ctx, u32 count); |
758 | 819 | ||
759 | int afu_open(struct inode *inode, struct file *file); | 820 | int afu_open(struct inode *inode, struct file *file); |
@@ -764,4 +825,61 @@ unsigned int afu_poll(struct file *file, struct poll_table_struct *poll); | |||
764 | ssize_t afu_read(struct file *file, char __user *buf, size_t count, loff_t *off); | 825 | ssize_t afu_read(struct file *file, char __user *buf, size_t count, loff_t *off); |
765 | extern const struct file_operations afu_fops; | 826 | extern const struct file_operations afu_fops; |
766 | 827 | ||
828 | struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *dev); | ||
829 | void cxl_guest_remove_adapter(struct cxl *adapter); | ||
830 | int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np); | ||
831 | int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np); | ||
832 | ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len); | ||
833 | ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len); | ||
834 | int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np); | ||
835 | void cxl_guest_remove_afu(struct cxl_afu *afu); | ||
836 | int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np); | ||
837 | int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *afu_np); | ||
838 | int cxl_guest_add_chardev(struct cxl *adapter); | ||
839 | void cxl_guest_remove_chardev(struct cxl *adapter); | ||
840 | void cxl_guest_reload_module(struct cxl *adapter); | ||
841 | int cxl_of_probe(struct platform_device *pdev); | ||
842 | |||
843 | struct cxl_backend_ops { | ||
844 | struct module *module; | ||
845 | int (*adapter_reset)(struct cxl *adapter); | ||
846 | int (*alloc_one_irq)(struct cxl *adapter); | ||
847 | void (*release_one_irq)(struct cxl *adapter, int hwirq); | ||
848 | int (*alloc_irq_ranges)(struct cxl_irq_ranges *irqs, | ||
849 | struct cxl *adapter, unsigned int num); | ||
850 | void (*release_irq_ranges)(struct cxl_irq_ranges *irqs, | ||
851 | struct cxl *adapter); | ||
852 | int (*setup_irq)(struct cxl *adapter, unsigned int hwirq, | ||
853 | unsigned int virq); | ||
854 | irqreturn_t (*handle_psl_slice_error)(struct cxl_context *ctx, | ||
855 | u64 dsisr, u64 errstat); | ||
856 | irqreturn_t (*psl_interrupt)(int irq, void *data); | ||
857 | int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask); | ||
858 | int (*attach_process)(struct cxl_context *ctx, bool kernel, | ||
859 | u64 wed, u64 amr); | ||
860 | int (*detach_process)(struct cxl_context *ctx); | ||
861 | bool (*support_attributes)(const char *attr_name, enum cxl_attrs type); | ||
862 | bool (*link_ok)(struct cxl *cxl, struct cxl_afu *afu); | ||
863 | void (*release_afu)(struct device *dev); | ||
864 | ssize_t (*afu_read_err_buffer)(struct cxl_afu *afu, char *buf, | ||
865 | loff_t off, size_t count); | ||
866 | int (*afu_check_and_enable)(struct cxl_afu *afu); | ||
867 | int (*afu_activate_mode)(struct cxl_afu *afu, int mode); | ||
868 | int (*afu_deactivate_mode)(struct cxl_afu *afu, int mode); | ||
869 | int (*afu_reset)(struct cxl_afu *afu); | ||
870 | int (*afu_cr_read8)(struct cxl_afu *afu, int cr_idx, u64 offset, u8 *val); | ||
871 | int (*afu_cr_read16)(struct cxl_afu *afu, int cr_idx, u64 offset, u16 *val); | ||
872 | int (*afu_cr_read32)(struct cxl_afu *afu, int cr_idx, u64 offset, u32 *val); | ||
873 | int (*afu_cr_read64)(struct cxl_afu *afu, int cr_idx, u64 offset, u64 *val); | ||
874 | int (*afu_cr_write8)(struct cxl_afu *afu, int cr_idx, u64 offset, u8 val); | ||
875 | int (*afu_cr_write16)(struct cxl_afu *afu, int cr_idx, u64 offset, u16 val); | ||
876 | int (*afu_cr_write32)(struct cxl_afu *afu, int cr_idx, u64 offset, u32 val); | ||
877 | ssize_t (*read_adapter_vpd)(struct cxl *adapter, void *buf, size_t count); | ||
878 | }; | ||
879 | extern const struct cxl_backend_ops cxl_native_ops; | ||
880 | extern const struct cxl_backend_ops cxl_guest_ops; | ||
881 | extern const struct cxl_backend_ops *cxl_ops; | ||
882 | |||
883 | /* check if the given pci_dev is on the the cxl vphb bus */ | ||
884 | bool cxl_pci_is_vphb_device(struct pci_dev *dev); | ||
767 | #endif | 885 | #endif |
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c index 18df6f44af2a..5751899e0c17 100644 --- a/drivers/misc/cxl/debugfs.c +++ b/drivers/misc/cxl/debugfs.c | |||
@@ -118,6 +118,10 @@ void cxl_debugfs_afu_remove(struct cxl_afu *afu) | |||
118 | int __init cxl_debugfs_init(void) | 118 | int __init cxl_debugfs_init(void) |
119 | { | 119 | { |
120 | struct dentry *ent; | 120 | struct dentry *ent; |
121 | |||
122 | if (!cpu_has_feature(CPU_FTR_HVMODE)) | ||
123 | return 0; | ||
124 | |||
121 | ent = debugfs_create_dir("cxl", NULL); | 125 | ent = debugfs_create_dir("cxl", NULL); |
122 | if (IS_ERR(ent)) | 126 | if (IS_ERR(ent)) |
123 | return PTR_ERR(ent); | 127 | return PTR_ERR(ent); |
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c index 81c3f75b7330..9a8650bcb042 100644 --- a/drivers/misc/cxl/fault.c +++ b/drivers/misc/cxl/fault.c | |||
@@ -101,7 +101,7 @@ static void cxl_ack_ae(struct cxl_context *ctx) | |||
101 | { | 101 | { |
102 | unsigned long flags; | 102 | unsigned long flags; |
103 | 103 | ||
104 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_AE, 0); | 104 | cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0); |
105 | 105 | ||
106 | spin_lock_irqsave(&ctx->lock, flags); | 106 | spin_lock_irqsave(&ctx->lock, flags); |
107 | ctx->pending_fault = true; | 107 | ctx->pending_fault = true; |
@@ -125,7 +125,7 @@ static int cxl_handle_segment_miss(struct cxl_context *ctx, | |||
125 | else { | 125 | else { |
126 | 126 | ||
127 | mb(); /* Order seg table write to TFC MMIO write */ | 127 | mb(); /* Order seg table write to TFC MMIO write */ |
128 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0); | 128 | cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); |
129 | } | 129 | } |
130 | 130 | ||
131 | return IRQ_HANDLED; | 131 | return IRQ_HANDLED; |
@@ -163,7 +163,7 @@ static void cxl_handle_page_fault(struct cxl_context *ctx, | |||
163 | local_irq_restore(flags); | 163 | local_irq_restore(flags); |
164 | 164 | ||
165 | pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); | 165 | pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); |
166 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0); | 166 | cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); |
167 | } | 167 | } |
168 | 168 | ||
169 | /* | 169 | /* |
@@ -254,14 +254,17 @@ void cxl_handle_fault(struct work_struct *fault_work) | |||
254 | u64 dar = ctx->dar; | 254 | u64 dar = ctx->dar; |
255 | struct mm_struct *mm = NULL; | 255 | struct mm_struct *mm = NULL; |
256 | 256 | ||
257 | if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || | 257 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
258 | cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar || | 258 | if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || |
259 | cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) { | 259 | cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar || |
260 | /* Most likely explanation is harmless - a dedicated process | 260 | cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) { |
261 | * has detached and these were cleared by the PSL purge, but | 261 | /* Most likely explanation is harmless - a dedicated |
262 | * warn about it just in case */ | 262 | * process has detached and these were cleared by the |
263 | dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n"); | 263 | * PSL purge, but warn about it just in case |
264 | return; | 264 | */ |
265 | dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n"); | ||
266 | return; | ||
267 | } | ||
265 | } | 268 | } |
266 | 269 | ||
267 | /* Early return if the context is being / has been detached */ | 270 | /* Early return if the context is being / has been detached */ |
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index 783337d22f36..eec468f1612f 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c | |||
@@ -26,9 +26,7 @@ | |||
26 | #include "trace.h" | 26 | #include "trace.h" |
27 | 27 | ||
28 | #define CXL_NUM_MINORS 256 /* Total to reserve */ | 28 | #define CXL_NUM_MINORS 256 /* Total to reserve */ |
29 | #define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */ | ||
30 | 29 | ||
31 | #define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS) | ||
32 | #define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice)) | 30 | #define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice)) |
33 | #define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1) | 31 | #define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1) |
34 | #define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2) | 32 | #define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2) |
@@ -36,7 +34,6 @@ | |||
36 | #define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu)) | 34 | #define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu)) |
37 | #define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu)) | 35 | #define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu)) |
38 | 36 | ||
39 | #define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS) | ||
40 | #define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3) | 37 | #define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3) |
41 | 38 | ||
42 | #define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0) | 39 | #define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0) |
@@ -79,7 +76,7 @@ static int __afu_open(struct inode *inode, struct file *file, bool master) | |||
79 | if (!afu->current_mode) | 76 | if (!afu->current_mode) |
80 | goto err_put_afu; | 77 | goto err_put_afu; |
81 | 78 | ||
82 | if (!cxl_adapter_link_ok(adapter)) { | 79 | if (!cxl_ops->link_ok(adapter, afu)) { |
83 | rc = -EIO; | 80 | rc = -EIO; |
84 | goto err_put_afu; | 81 | goto err_put_afu; |
85 | } | 82 | } |
@@ -210,8 +207,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, | |||
210 | 207 | ||
211 | trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr); | 208 | trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr); |
212 | 209 | ||
213 | if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor, | 210 | if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor, |
214 | amr))) { | 211 | amr))) { |
215 | afu_release_irqs(ctx, ctx); | 212 | afu_release_irqs(ctx, ctx); |
216 | goto out; | 213 | goto out; |
217 | } | 214 | } |
@@ -222,12 +219,13 @@ out: | |||
222 | mutex_unlock(&ctx->status_mutex); | 219 | mutex_unlock(&ctx->status_mutex); |
223 | return rc; | 220 | return rc; |
224 | } | 221 | } |
222 | |||
225 | static long afu_ioctl_process_element(struct cxl_context *ctx, | 223 | static long afu_ioctl_process_element(struct cxl_context *ctx, |
226 | int __user *upe) | 224 | int __user *upe) |
227 | { | 225 | { |
228 | pr_devel("%s: pe: %i\n", __func__, ctx->pe); | 226 | pr_devel("%s: pe: %i\n", __func__, ctx->pe); |
229 | 227 | ||
230 | if (copy_to_user(upe, &ctx->pe, sizeof(__u32))) | 228 | if (copy_to_user(upe, &ctx->external_pe, sizeof(__u32))) |
231 | return -EFAULT; | 229 | return -EFAULT; |
232 | 230 | ||
233 | return 0; | 231 | return 0; |
@@ -259,7 +257,7 @@ long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
259 | if (ctx->status == CLOSED) | 257 | if (ctx->status == CLOSED) |
260 | return -EIO; | 258 | return -EIO; |
261 | 259 | ||
262 | if (!cxl_adapter_link_ok(ctx->afu->adapter)) | 260 | if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) |
263 | return -EIO; | 261 | return -EIO; |
264 | 262 | ||
265 | pr_devel("afu_ioctl\n"); | 263 | pr_devel("afu_ioctl\n"); |
@@ -289,7 +287,7 @@ int afu_mmap(struct file *file, struct vm_area_struct *vm) | |||
289 | if (ctx->status != STARTED) | 287 | if (ctx->status != STARTED) |
290 | return -EIO; | 288 | return -EIO; |
291 | 289 | ||
292 | if (!cxl_adapter_link_ok(ctx->afu->adapter)) | 290 | if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) |
293 | return -EIO; | 291 | return -EIO; |
294 | 292 | ||
295 | return cxl_context_iomap(ctx, vm); | 293 | return cxl_context_iomap(ctx, vm); |
@@ -336,7 +334,7 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count, | |||
336 | int rc; | 334 | int rc; |
337 | DEFINE_WAIT(wait); | 335 | DEFINE_WAIT(wait); |
338 | 336 | ||
339 | if (!cxl_adapter_link_ok(ctx->afu->adapter)) | 337 | if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) |
340 | return -EIO; | 338 | return -EIO; |
341 | 339 | ||
342 | if (count < CXL_READ_MIN_SIZE) | 340 | if (count < CXL_READ_MIN_SIZE) |
@@ -349,7 +347,7 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count, | |||
349 | if (ctx_event_pending(ctx)) | 347 | if (ctx_event_pending(ctx)) |
350 | break; | 348 | break; |
351 | 349 | ||
352 | if (!cxl_adapter_link_ok(ctx->afu->adapter)) { | 350 | if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { |
353 | rc = -EIO; | 351 | rc = -EIO; |
354 | goto out; | 352 | goto out; |
355 | } | 353 | } |
@@ -445,7 +443,8 @@ static const struct file_operations afu_master_fops = { | |||
445 | 443 | ||
446 | static char *cxl_devnode(struct device *dev, umode_t *mode) | 444 | static char *cxl_devnode(struct device *dev, umode_t *mode) |
447 | { | 445 | { |
448 | if (CXL_DEVT_IS_CARD(dev->devt)) { | 446 | if (cpu_has_feature(CPU_FTR_HVMODE) && |
447 | CXL_DEVT_IS_CARD(dev->devt)) { | ||
449 | /* | 448 | /* |
450 | * These minor numbers will eventually be used to program the | 449 | * These minor numbers will eventually be used to program the |
451 | * PSL and AFUs once we have dynamic reprogramming support | 450 | * PSL and AFUs once we have dynamic reprogramming support |
@@ -546,6 +545,11 @@ int cxl_register_adapter(struct cxl *adapter) | |||
546 | return device_register(&adapter->dev); | 545 | return device_register(&adapter->dev); |
547 | } | 546 | } |
548 | 547 | ||
548 | dev_t cxl_get_dev(void) | ||
549 | { | ||
550 | return cxl_dev; | ||
551 | } | ||
552 | |||
549 | int __init cxl_file_init(void) | 553 | int __init cxl_file_init(void) |
550 | { | 554 | { |
551 | int rc; | 555 | int rc; |
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c new file mode 100644 index 000000000000..68dd0b7da471 --- /dev/null +++ b/drivers/misc/cxl/flash.c | |||
@@ -0,0 +1,538 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/fs.h> | ||
3 | #include <linux/semaphore.h> | ||
4 | #include <linux/slab.h> | ||
5 | #include <linux/uaccess.h> | ||
6 | #include <asm/rtas.h> | ||
7 | |||
8 | #include "cxl.h" | ||
9 | #include "hcalls.h" | ||
10 | |||
11 | #define DOWNLOAD_IMAGE 1 | ||
12 | #define VALIDATE_IMAGE 2 | ||
13 | |||
14 | struct ai_header { | ||
15 | u16 version; | ||
16 | u8 reserved0[6]; | ||
17 | u16 vendor; | ||
18 | u16 device; | ||
19 | u16 subsystem_vendor; | ||
20 | u16 subsystem; | ||
21 | u64 image_offset; | ||
22 | u64 image_length; | ||
23 | u8 reserved1[96]; | ||
24 | }; | ||
25 | |||
26 | static struct semaphore sem; | ||
27 | unsigned long *buffer[CXL_AI_MAX_ENTRIES]; | ||
28 | struct sg_list *le; | ||
29 | static u64 continue_token; | ||
30 | static unsigned int transfer; | ||
31 | |||
32 | struct update_props_workarea { | ||
33 | __be32 phandle; | ||
34 | __be32 state; | ||
35 | __be64 reserved; | ||
36 | __be32 nprops; | ||
37 | } __packed; | ||
38 | |||
39 | struct update_nodes_workarea { | ||
40 | __be32 state; | ||
41 | __be64 unit_address; | ||
42 | __be32 reserved; | ||
43 | } __packed; | ||
44 | |||
45 | #define DEVICE_SCOPE 3 | ||
46 | #define NODE_ACTION_MASK 0xff000000 | ||
47 | #define NODE_COUNT_MASK 0x00ffffff | ||
48 | #define OPCODE_DELETE 0x01000000 | ||
49 | #define OPCODE_UPDATE 0x02000000 | ||
50 | #define OPCODE_ADD 0x03000000 | ||
51 | |||
52 | static int rcall(int token, char *buf, s32 scope) | ||
53 | { | ||
54 | int rc; | ||
55 | |||
56 | spin_lock(&rtas_data_buf_lock); | ||
57 | |||
58 | memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE); | ||
59 | rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope); | ||
60 | memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE); | ||
61 | |||
62 | spin_unlock(&rtas_data_buf_lock); | ||
63 | return rc; | ||
64 | } | ||
65 | |||
66 | static int update_property(struct device_node *dn, const char *name, | ||
67 | u32 vd, char *value) | ||
68 | { | ||
69 | struct property *new_prop; | ||
70 | u32 *val; | ||
71 | int rc; | ||
72 | |||
73 | new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL); | ||
74 | if (!new_prop) | ||
75 | return -ENOMEM; | ||
76 | |||
77 | new_prop->name = kstrdup(name, GFP_KERNEL); | ||
78 | if (!new_prop->name) { | ||
79 | kfree(new_prop); | ||
80 | return -ENOMEM; | ||
81 | } | ||
82 | |||
83 | new_prop->length = vd; | ||
84 | new_prop->value = kzalloc(new_prop->length, GFP_KERNEL); | ||
85 | if (!new_prop->value) { | ||
86 | kfree(new_prop->name); | ||
87 | kfree(new_prop); | ||
88 | return -ENOMEM; | ||
89 | } | ||
90 | memcpy(new_prop->value, value, vd); | ||
91 | |||
92 | val = (u32 *)new_prop->value; | ||
93 | rc = cxl_update_properties(dn, new_prop); | ||
94 | pr_devel("%s: update property (%s, length: %i, value: %#x)\n", | ||
95 | dn->name, name, vd, be32_to_cpu(*val)); | ||
96 | |||
97 | if (rc) { | ||
98 | kfree(new_prop->name); | ||
99 | kfree(new_prop->value); | ||
100 | kfree(new_prop); | ||
101 | } | ||
102 | return rc; | ||
103 | } | ||
104 | |||
105 | static int update_node(__be32 phandle, s32 scope) | ||
106 | { | ||
107 | struct update_props_workarea *upwa; | ||
108 | struct device_node *dn; | ||
109 | int i, rc, ret; | ||
110 | char *prop_data; | ||
111 | char *buf; | ||
112 | int token; | ||
113 | u32 nprops; | ||
114 | u32 vd; | ||
115 | |||
116 | token = rtas_token("ibm,update-properties"); | ||
117 | if (token == RTAS_UNKNOWN_SERVICE) | ||
118 | return -EINVAL; | ||
119 | |||
120 | buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); | ||
121 | if (!buf) | ||
122 | return -ENOMEM; | ||
123 | |||
124 | dn = of_find_node_by_phandle(be32_to_cpu(phandle)); | ||
125 | if (!dn) { | ||
126 | kfree(buf); | ||
127 | return -ENOENT; | ||
128 | } | ||
129 | |||
130 | upwa = (struct update_props_workarea *)&buf[0]; | ||
131 | upwa->phandle = phandle; | ||
132 | do { | ||
133 | rc = rcall(token, buf, scope); | ||
134 | if (rc < 0) | ||
135 | break; | ||
136 | |||
137 | prop_data = buf + sizeof(*upwa); | ||
138 | nprops = be32_to_cpu(upwa->nprops); | ||
139 | |||
140 | if (*prop_data == 0) { | ||
141 | prop_data++; | ||
142 | vd = be32_to_cpu(*(__be32 *)prop_data); | ||
143 | prop_data += vd + sizeof(vd); | ||
144 | nprops--; | ||
145 | } | ||
146 | |||
147 | for (i = 0; i < nprops; i++) { | ||
148 | char *prop_name; | ||
149 | |||
150 | prop_name = prop_data; | ||
151 | prop_data += strlen(prop_name) + 1; | ||
152 | vd = be32_to_cpu(*(__be32 *)prop_data); | ||
153 | prop_data += sizeof(vd); | ||
154 | |||
155 | if ((vd != 0x00000000) && (vd != 0x80000000)) { | ||
156 | ret = update_property(dn, prop_name, vd, | ||
157 | prop_data); | ||
158 | if (ret) | ||
159 | pr_err("cxl: Could not update property %s - %i\n", | ||
160 | prop_name, ret); | ||
161 | |||
162 | prop_data += vd; | ||
163 | } | ||
164 | } | ||
165 | } while (rc == 1); | ||
166 | |||
167 | of_node_put(dn); | ||
168 | kfree(buf); | ||
169 | return rc; | ||
170 | } | ||
171 | |||
172 | static int update_devicetree(struct cxl *adapter, s32 scope) | ||
173 | { | ||
174 | struct update_nodes_workarea *unwa; | ||
175 | u32 action, node_count; | ||
176 | int token, rc, i; | ||
177 | __be32 *data, drc_index, phandle; | ||
178 | char *buf; | ||
179 | |||
180 | token = rtas_token("ibm,update-nodes"); | ||
181 | if (token == RTAS_UNKNOWN_SERVICE) | ||
182 | return -EINVAL; | ||
183 | |||
184 | buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); | ||
185 | if (!buf) | ||
186 | return -ENOMEM; | ||
187 | |||
188 | unwa = (struct update_nodes_workarea *)&buf[0]; | ||
189 | unwa->unit_address = cpu_to_be64(adapter->guest->handle); | ||
190 | do { | ||
191 | rc = rcall(token, buf, scope); | ||
192 | if (rc && rc != 1) | ||
193 | break; | ||
194 | |||
195 | data = (__be32 *)buf + 4; | ||
196 | while (be32_to_cpu(*data) & NODE_ACTION_MASK) { | ||
197 | action = be32_to_cpu(*data) & NODE_ACTION_MASK; | ||
198 | node_count = be32_to_cpu(*data) & NODE_COUNT_MASK; | ||
199 | pr_devel("device reconfiguration - action: %#x, nodes: %#x\n", | ||
200 | action, node_count); | ||
201 | data++; | ||
202 | |||
203 | for (i = 0; i < node_count; i++) { | ||
204 | phandle = *data++; | ||
205 | |||
206 | switch (action) { | ||
207 | case OPCODE_DELETE: | ||
208 | /* nothing to do */ | ||
209 | break; | ||
210 | case OPCODE_UPDATE: | ||
211 | update_node(phandle, scope); | ||
212 | break; | ||
213 | case OPCODE_ADD: | ||
214 | /* nothing to do, just move pointer */ | ||
215 | drc_index = *data++; | ||
216 | break; | ||
217 | } | ||
218 | } | ||
219 | } | ||
220 | } while (rc == 1); | ||
221 | |||
222 | kfree(buf); | ||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | static int handle_image(struct cxl *adapter, int operation, | ||
227 | long (*fct)(u64, u64, u64, u64 *), | ||
228 | struct cxl_adapter_image *ai) | ||
229 | { | ||
230 | size_t mod, s_copy, len_chunk = 0; | ||
231 | struct ai_header *header = NULL; | ||
232 | unsigned int entries = 0, i; | ||
233 | void *dest, *from; | ||
234 | int rc = 0, need_header; | ||
235 | |||
236 | /* base adapter image header */ | ||
237 | need_header = (ai->flags & CXL_AI_NEED_HEADER); | ||
238 | if (need_header) { | ||
239 | header = kzalloc(sizeof(struct ai_header), GFP_KERNEL); | ||
240 | if (!header) | ||
241 | return -ENOMEM; | ||
242 | header->version = cpu_to_be16(1); | ||
243 | header->vendor = cpu_to_be16(adapter->guest->vendor); | ||
244 | header->device = cpu_to_be16(adapter->guest->device); | ||
245 | header->subsystem_vendor = cpu_to_be16(adapter->guest->subsystem_vendor); | ||
246 | header->subsystem = cpu_to_be16(adapter->guest->subsystem); | ||
247 | header->image_offset = cpu_to_be64(CXL_AI_HEADER_SIZE); | ||
248 | header->image_length = cpu_to_be64(ai->len_image); | ||
249 | } | ||
250 | |||
251 | /* number of entries in the list */ | ||
252 | len_chunk = ai->len_data; | ||
253 | if (need_header) | ||
254 | len_chunk += CXL_AI_HEADER_SIZE; | ||
255 | |||
256 | entries = len_chunk / CXL_AI_BUFFER_SIZE; | ||
257 | mod = len_chunk % CXL_AI_BUFFER_SIZE; | ||
258 | if (mod) | ||
259 | entries++; | ||
260 | |||
261 | if (entries > CXL_AI_MAX_ENTRIES) { | ||
262 | rc = -EINVAL; | ||
263 | goto err; | ||
264 | } | ||
265 | |||
266 | /* < -- MAX_CHUNK_SIZE = 4096 * 256 = 1048576 bytes --> | ||
267 | * chunk 0 ---------------------------------------------------- | ||
268 | * | header | data | | ||
269 | * ---------------------------------------------------- | ||
270 | * chunk 1 ---------------------------------------------------- | ||
271 | * | data | | ||
272 | * ---------------------------------------------------- | ||
273 | * .... | ||
274 | * chunk n ---------------------------------------------------- | ||
275 | * | data | | ||
276 | * ---------------------------------------------------- | ||
277 | */ | ||
278 | from = (void *) ai->data; | ||
279 | for (i = 0; i < entries; i++) { | ||
280 | dest = buffer[i]; | ||
281 | s_copy = CXL_AI_BUFFER_SIZE; | ||
282 | |||
283 | if ((need_header) && (i == 0)) { | ||
284 | /* add adapter image header */ | ||
285 | memcpy(buffer[i], header, sizeof(struct ai_header)); | ||
286 | s_copy = CXL_AI_BUFFER_SIZE - CXL_AI_HEADER_SIZE; | ||
287 | dest += CXL_AI_HEADER_SIZE; /* image offset */ | ||
288 | } | ||
289 | if ((i == (entries - 1)) && mod) | ||
290 | s_copy = mod; | ||
291 | |||
292 | /* copy data */ | ||
293 | if (copy_from_user(dest, from, s_copy)) | ||
294 | goto err; | ||
295 | |||
296 | /* fill in the list */ | ||
297 | le[i].phys_addr = cpu_to_be64(virt_to_phys(buffer[i])); | ||
298 | le[i].len = cpu_to_be64(CXL_AI_BUFFER_SIZE); | ||
299 | if ((i == (entries - 1)) && mod) | ||
300 | le[i].len = cpu_to_be64(mod); | ||
301 | from += s_copy; | ||
302 | } | ||
303 | pr_devel("%s (op: %i, need header: %i, entries: %i, token: %#llx)\n", | ||
304 | __func__, operation, need_header, entries, continue_token); | ||
305 | |||
306 | /* | ||
307 | * download/validate the adapter image to the coherent | ||
308 | * platform facility | ||
309 | */ | ||
310 | rc = fct(adapter->guest->handle, virt_to_phys(le), entries, | ||
311 | &continue_token); | ||
312 | if (rc == 0) /* success of download/validation operation */ | ||
313 | continue_token = 0; | ||
314 | |||
315 | err: | ||
316 | kfree(header); | ||
317 | |||
318 | return rc; | ||
319 | } | ||
320 | |||
321 | static int transfer_image(struct cxl *adapter, int operation, | ||
322 | struct cxl_adapter_image *ai) | ||
323 | { | ||
324 | int rc = 0; | ||
325 | int afu; | ||
326 | |||
327 | switch (operation) { | ||
328 | case DOWNLOAD_IMAGE: | ||
329 | rc = handle_image(adapter, operation, | ||
330 | &cxl_h_download_adapter_image, ai); | ||
331 | if (rc < 0) { | ||
332 | pr_devel("resetting adapter\n"); | ||
333 | cxl_h_reset_adapter(adapter->guest->handle); | ||
334 | } | ||
335 | return rc; | ||
336 | |||
337 | case VALIDATE_IMAGE: | ||
338 | rc = handle_image(adapter, operation, | ||
339 | &cxl_h_validate_adapter_image, ai); | ||
340 | if (rc < 0) { | ||
341 | pr_devel("resetting adapter\n"); | ||
342 | cxl_h_reset_adapter(adapter->guest->handle); | ||
343 | return rc; | ||
344 | } | ||
345 | if (rc == 0) { | ||
346 | pr_devel("remove curent afu\n"); | ||
347 | for (afu = 0; afu < adapter->slices; afu++) | ||
348 | cxl_guest_remove_afu(adapter->afu[afu]); | ||
349 | |||
350 | pr_devel("resetting adapter\n"); | ||
351 | cxl_h_reset_adapter(adapter->guest->handle); | ||
352 | |||
353 | /* The entire image has now been | ||
354 | * downloaded and the validation has | ||
355 | * been successfully performed. | ||
356 | * After that, the partition should call | ||
357 | * ibm,update-nodes and | ||
358 | * ibm,update-properties to receive the | ||
359 | * current configuration | ||
360 | */ | ||
361 | rc = update_devicetree(adapter, DEVICE_SCOPE); | ||
362 | transfer = 1; | ||
363 | } | ||
364 | return rc; | ||
365 | } | ||
366 | |||
367 | return -EINVAL; | ||
368 | } | ||
369 | |||
370 | static long ioctl_transfer_image(struct cxl *adapter, int operation, | ||
371 | struct cxl_adapter_image __user *uai) | ||
372 | { | ||
373 | struct cxl_adapter_image ai; | ||
374 | |||
375 | pr_devel("%s\n", __func__); | ||
376 | |||
377 | if (copy_from_user(&ai, uai, sizeof(struct cxl_adapter_image))) | ||
378 | return -EFAULT; | ||
379 | |||
380 | /* | ||
381 | * Make sure reserved fields and bits are set to 0 | ||
382 | */ | ||
383 | if (ai.reserved1 || ai.reserved2 || ai.reserved3 || ai.reserved4 || | ||
384 | (ai.flags & ~CXL_AI_ALL)) | ||
385 | return -EINVAL; | ||
386 | |||
387 | return transfer_image(adapter, operation, &ai); | ||
388 | } | ||
389 | |||
390 | static int device_open(struct inode *inode, struct file *file) | ||
391 | { | ||
392 | int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev); | ||
393 | struct cxl *adapter; | ||
394 | int rc = 0, i; | ||
395 | |||
396 | pr_devel("in %s\n", __func__); | ||
397 | |||
398 | BUG_ON(sizeof(struct ai_header) != CXL_AI_HEADER_SIZE); | ||
399 | |||
400 | /* Allows one process to open the device by using a semaphore */ | ||
401 | if (down_interruptible(&sem) != 0) | ||
402 | return -EPERM; | ||
403 | |||
404 | if (!(adapter = get_cxl_adapter(adapter_num))) | ||
405 | return -ENODEV; | ||
406 | |||
407 | file->private_data = adapter; | ||
408 | continue_token = 0; | ||
409 | transfer = 0; | ||
410 | |||
411 | for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) | ||
412 | buffer[i] = NULL; | ||
413 | |||
414 | /* aligned buffer containing list entries which describes up to | ||
415 | * 1 megabyte of data (256 entries of 4096 bytes each) | ||
416 | * Logical real address of buffer 0 - Buffer 0 length in bytes | ||
417 | * Logical real address of buffer 1 - Buffer 1 length in bytes | ||
418 | * Logical real address of buffer 2 - Buffer 2 length in bytes | ||
419 | * .... | ||
420 | * .... | ||
421 | * Logical real address of buffer N - Buffer N length in bytes | ||
422 | */ | ||
423 | le = (struct sg_list *)get_zeroed_page(GFP_KERNEL); | ||
424 | if (!le) { | ||
425 | rc = -ENOMEM; | ||
426 | goto err; | ||
427 | } | ||
428 | |||
429 | for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) { | ||
430 | buffer[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL); | ||
431 | if (!buffer[i]) { | ||
432 | rc = -ENOMEM; | ||
433 | goto err1; | ||
434 | } | ||
435 | } | ||
436 | |||
437 | return 0; | ||
438 | |||
439 | err1: | ||
440 | for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) { | ||
441 | if (buffer[i]) | ||
442 | free_page((unsigned long) buffer[i]); | ||
443 | } | ||
444 | |||
445 | if (le) | ||
446 | free_page((unsigned long) le); | ||
447 | err: | ||
448 | put_device(&adapter->dev); | ||
449 | |||
450 | return rc; | ||
451 | } | ||
452 | |||
453 | static long device_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
454 | { | ||
455 | struct cxl *adapter = file->private_data; | ||
456 | |||
457 | pr_devel("in %s\n", __func__); | ||
458 | |||
459 | if (cmd == CXL_IOCTL_DOWNLOAD_IMAGE) | ||
460 | return ioctl_transfer_image(adapter, | ||
461 | DOWNLOAD_IMAGE, | ||
462 | (struct cxl_adapter_image __user *)arg); | ||
463 | else if (cmd == CXL_IOCTL_VALIDATE_IMAGE) | ||
464 | return ioctl_transfer_image(adapter, | ||
465 | VALIDATE_IMAGE, | ||
466 | (struct cxl_adapter_image __user *)arg); | ||
467 | else | ||
468 | return -EINVAL; | ||
469 | } | ||
470 | |||
471 | static long device_compat_ioctl(struct file *file, unsigned int cmd, | ||
472 | unsigned long arg) | ||
473 | { | ||
474 | return device_ioctl(file, cmd, arg); | ||
475 | } | ||
476 | |||
477 | static int device_close(struct inode *inode, struct file *file) | ||
478 | { | ||
479 | struct cxl *adapter = file->private_data; | ||
480 | int i; | ||
481 | |||
482 | pr_devel("in %s\n", __func__); | ||
483 | |||
484 | for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) { | ||
485 | if (buffer[i]) | ||
486 | free_page((unsigned long) buffer[i]); | ||
487 | } | ||
488 | |||
489 | if (le) | ||
490 | free_page((unsigned long) le); | ||
491 | |||
492 | up(&sem); | ||
493 | put_device(&adapter->dev); | ||
494 | continue_token = 0; | ||
495 | |||
496 | /* reload the module */ | ||
497 | if (transfer) | ||
498 | cxl_guest_reload_module(adapter); | ||
499 | else { | ||
500 | pr_devel("resetting adapter\n"); | ||
501 | cxl_h_reset_adapter(adapter->guest->handle); | ||
502 | } | ||
503 | |||
504 | transfer = 0; | ||
505 | return 0; | ||
506 | } | ||
507 | |||
508 | static const struct file_operations fops = { | ||
509 | .owner = THIS_MODULE, | ||
510 | .open = device_open, | ||
511 | .unlocked_ioctl = device_ioctl, | ||
512 | .compat_ioctl = device_compat_ioctl, | ||
513 | .release = device_close, | ||
514 | }; | ||
515 | |||
516 | void cxl_guest_remove_chardev(struct cxl *adapter) | ||
517 | { | ||
518 | cdev_del(&adapter->guest->cdev); | ||
519 | } | ||
520 | |||
521 | int cxl_guest_add_chardev(struct cxl *adapter) | ||
522 | { | ||
523 | dev_t devt; | ||
524 | int rc; | ||
525 | |||
526 | devt = MKDEV(MAJOR(cxl_get_dev()), CXL_CARD_MINOR(adapter)); | ||
527 | cdev_init(&adapter->guest->cdev, &fops); | ||
528 | if ((rc = cdev_add(&adapter->guest->cdev, devt, 1))) { | ||
529 | dev_err(&adapter->dev, | ||
530 | "Unable to add chardev on adapter (card%i): %i\n", | ||
531 | adapter->adapter_num, rc); | ||
532 | goto err; | ||
533 | } | ||
534 | adapter->dev.devt = devt; | ||
535 | sema_init(&sem, 1); | ||
536 | err: | ||
537 | return rc; | ||
538 | } | ||
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c new file mode 100644 index 000000000000..8213372de2b7 --- /dev/null +++ b/drivers/misc/cxl/guest.c | |||
@@ -0,0 +1,1177 @@ | |||
1 | /* | ||
2 | * Copyright 2015 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/uaccess.h> | ||
12 | #include <linux/delay.h> | ||
13 | |||
14 | #include "cxl.h" | ||
15 | #include "hcalls.h" | ||
16 | #include "trace.h" | ||
17 | |||
18 | #define CXL_ERROR_DETECTED_EVENT 1 | ||
19 | #define CXL_SLOT_RESET_EVENT 2 | ||
20 | #define CXL_RESUME_EVENT 3 | ||
21 | |||
22 | static void pci_error_handlers(struct cxl_afu *afu, | ||
23 | int bus_error_event, | ||
24 | pci_channel_state_t state) | ||
25 | { | ||
26 | struct pci_dev *afu_dev; | ||
27 | |||
28 | if (afu->phb == NULL) | ||
29 | return; | ||
30 | |||
31 | list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { | ||
32 | if (!afu_dev->driver) | ||
33 | continue; | ||
34 | |||
35 | switch (bus_error_event) { | ||
36 | case CXL_ERROR_DETECTED_EVENT: | ||
37 | afu_dev->error_state = state; | ||
38 | |||
39 | if (afu_dev->driver->err_handler && | ||
40 | afu_dev->driver->err_handler->error_detected) | ||
41 | afu_dev->driver->err_handler->error_detected(afu_dev, state); | ||
42 | break; | ||
43 | case CXL_SLOT_RESET_EVENT: | ||
44 | afu_dev->error_state = state; | ||
45 | |||
46 | if (afu_dev->driver->err_handler && | ||
47 | afu_dev->driver->err_handler->slot_reset) | ||
48 | afu_dev->driver->err_handler->slot_reset(afu_dev); | ||
49 | break; | ||
50 | case CXL_RESUME_EVENT: | ||
51 | if (afu_dev->driver->err_handler && | ||
52 | afu_dev->driver->err_handler->resume) | ||
53 | afu_dev->driver->err_handler->resume(afu_dev); | ||
54 | break; | ||
55 | } | ||
56 | } | ||
57 | } | ||
58 | |||
59 | static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, | ||
60 | u64 errstat) | ||
61 | { | ||
62 | pr_devel("in %s\n", __func__); | ||
63 | dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat); | ||
64 | |||
65 | return cxl_ops->ack_irq(ctx, 0, errstat); | ||
66 | } | ||
67 | |||
68 | static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu, | ||
69 | void *buf, size_t len) | ||
70 | { | ||
71 | unsigned int entries, mod; | ||
72 | unsigned long **vpd_buf = NULL; | ||
73 | struct sg_list *le; | ||
74 | int rc = 0, i, tocopy; | ||
75 | u64 out = 0; | ||
76 | |||
77 | if (buf == NULL) | ||
78 | return -EINVAL; | ||
79 | |||
80 | /* number of entries in the list */ | ||
81 | entries = len / SG_BUFFER_SIZE; | ||
82 | mod = len % SG_BUFFER_SIZE; | ||
83 | if (mod) | ||
84 | entries++; | ||
85 | |||
86 | if (entries > SG_MAX_ENTRIES) { | ||
87 | entries = SG_MAX_ENTRIES; | ||
88 | len = SG_MAX_ENTRIES * SG_BUFFER_SIZE; | ||
89 | mod = 0; | ||
90 | } | ||
91 | |||
92 | vpd_buf = kzalloc(entries * sizeof(unsigned long *), GFP_KERNEL); | ||
93 | if (!vpd_buf) | ||
94 | return -ENOMEM; | ||
95 | |||
96 | le = (struct sg_list *)get_zeroed_page(GFP_KERNEL); | ||
97 | if (!le) { | ||
98 | rc = -ENOMEM; | ||
99 | goto err1; | ||
100 | } | ||
101 | |||
102 | for (i = 0; i < entries; i++) { | ||
103 | vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL); | ||
104 | if (!vpd_buf[i]) { | ||
105 | rc = -ENOMEM; | ||
106 | goto err2; | ||
107 | } | ||
108 | le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i])); | ||
109 | le[i].len = cpu_to_be64(SG_BUFFER_SIZE); | ||
110 | if ((i == (entries - 1)) && mod) | ||
111 | le[i].len = cpu_to_be64(mod); | ||
112 | } | ||
113 | |||
114 | if (adapter) | ||
115 | rc = cxl_h_collect_vpd_adapter(adapter->guest->handle, | ||
116 | virt_to_phys(le), entries, &out); | ||
117 | else | ||
118 | rc = cxl_h_collect_vpd(afu->guest->handle, 0, | ||
119 | virt_to_phys(le), entries, &out); | ||
120 | pr_devel("length of available (entries: %i), vpd: %#llx\n", | ||
121 | entries, out); | ||
122 | |||
123 | if (!rc) { | ||
124 | /* | ||
125 | * hcall returns in 'out' the size of available VPDs. | ||
126 | * It fills the buffer with as much data as possible. | ||
127 | */ | ||
128 | if (out < len) | ||
129 | len = out; | ||
130 | rc = len; | ||
131 | if (out) { | ||
132 | for (i = 0; i < entries; i++) { | ||
133 | if (len < SG_BUFFER_SIZE) | ||
134 | tocopy = len; | ||
135 | else | ||
136 | tocopy = SG_BUFFER_SIZE; | ||
137 | memcpy(buf, vpd_buf[i], tocopy); | ||
138 | buf += tocopy; | ||
139 | len -= tocopy; | ||
140 | } | ||
141 | } | ||
142 | } | ||
143 | err2: | ||
144 | for (i = 0; i < entries; i++) { | ||
145 | if (vpd_buf[i]) | ||
146 | free_page((unsigned long) vpd_buf[i]); | ||
147 | } | ||
148 | free_page((unsigned long) le); | ||
149 | err1: | ||
150 | kfree(vpd_buf); | ||
151 | return rc; | ||
152 | } | ||
153 | |||
154 | static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info) | ||
155 | { | ||
156 | return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info); | ||
157 | } | ||
158 | |||
159 | static irqreturn_t guest_psl_irq(int irq, void *data) | ||
160 | { | ||
161 | struct cxl_context *ctx = data; | ||
162 | struct cxl_irq_info irq_info; | ||
163 | int rc; | ||
164 | |||
165 | pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq); | ||
166 | rc = guest_get_irq_info(ctx, &irq_info); | ||
167 | if (rc) { | ||
168 | WARN(1, "Unable to get IRQ info: %i\n", rc); | ||
169 | return IRQ_HANDLED; | ||
170 | } | ||
171 | |||
172 | rc = cxl_irq(irq, ctx, &irq_info); | ||
173 | return rc; | ||
174 | } | ||
175 | |||
176 | static int afu_read_error_state(struct cxl_afu *afu, int *state_out) | ||
177 | { | ||
178 | u64 state; | ||
179 | int rc = 0; | ||
180 | |||
181 | rc = cxl_h_read_error_state(afu->guest->handle, &state); | ||
182 | if (!rc) { | ||
183 | WARN_ON(state != H_STATE_NORMAL && | ||
184 | state != H_STATE_DISABLE && | ||
185 | state != H_STATE_TEMP_UNAVAILABLE && | ||
186 | state != H_STATE_PERM_UNAVAILABLE); | ||
187 | *state_out = state & 0xffffffff; | ||
188 | } | ||
189 | return rc; | ||
190 | } | ||
191 | |||
192 | static irqreturn_t guest_slice_irq_err(int irq, void *data) | ||
193 | { | ||
194 | struct cxl_afu *afu = data; | ||
195 | int rc; | ||
196 | u64 serr; | ||
197 | |||
198 | WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq); | ||
199 | rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr); | ||
200 | if (rc) { | ||
201 | dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc); | ||
202 | return IRQ_HANDLED; | ||
203 | } | ||
204 | dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr); | ||
205 | |||
206 | rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr); | ||
207 | if (rc) | ||
208 | dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n", | ||
209 | rc); | ||
210 | |||
211 | return IRQ_HANDLED; | ||
212 | } | ||
213 | |||
214 | |||
215 | static int irq_alloc_range(struct cxl *adapter, int len, int *irq) | ||
216 | { | ||
217 | int i, n; | ||
218 | struct irq_avail *cur; | ||
219 | |||
220 | for (i = 0; i < adapter->guest->irq_nranges; i++) { | ||
221 | cur = &adapter->guest->irq_avail[i]; | ||
222 | n = bitmap_find_next_zero_area(cur->bitmap, cur->range, | ||
223 | 0, len, 0); | ||
224 | if (n < cur->range) { | ||
225 | bitmap_set(cur->bitmap, n, len); | ||
226 | *irq = cur->offset + n; | ||
227 | pr_devel("guest: allocate IRQs %#x->%#x\n", | ||
228 | *irq, *irq + len - 1); | ||
229 | |||
230 | return 0; | ||
231 | } | ||
232 | } | ||
233 | return -ENOSPC; | ||
234 | } | ||
235 | |||
236 | static int irq_free_range(struct cxl *adapter, int irq, int len) | ||
237 | { | ||
238 | int i, n; | ||
239 | struct irq_avail *cur; | ||
240 | |||
241 | if (len == 0) | ||
242 | return -ENOENT; | ||
243 | |||
244 | for (i = 0; i < adapter->guest->irq_nranges; i++) { | ||
245 | cur = &adapter->guest->irq_avail[i]; | ||
246 | if (irq >= cur->offset && | ||
247 | (irq + len) <= (cur->offset + cur->range)) { | ||
248 | n = irq - cur->offset; | ||
249 | bitmap_clear(cur->bitmap, n, len); | ||
250 | pr_devel("guest: release IRQs %#x->%#x\n", | ||
251 | irq, irq + len - 1); | ||
252 | return 0; | ||
253 | } | ||
254 | } | ||
255 | return -ENOENT; | ||
256 | } | ||
257 | |||
258 | static int guest_reset(struct cxl *adapter) | ||
259 | { | ||
260 | struct cxl_afu *afu = NULL; | ||
261 | int i, rc; | ||
262 | |||
263 | pr_devel("Adapter reset request\n"); | ||
264 | for (i = 0; i < adapter->slices; i++) { | ||
265 | if ((afu = adapter->afu[i])) { | ||
266 | pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, | ||
267 | pci_channel_io_frozen); | ||
268 | cxl_context_detach_all(afu); | ||
269 | } | ||
270 | } | ||
271 | |||
272 | rc = cxl_h_reset_adapter(adapter->guest->handle); | ||
273 | for (i = 0; i < adapter->slices; i++) { | ||
274 | if (!rc && (afu = adapter->afu[i])) { | ||
275 | pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, | ||
276 | pci_channel_io_normal); | ||
277 | pci_error_handlers(afu, CXL_RESUME_EVENT, 0); | ||
278 | } | ||
279 | } | ||
280 | return rc; | ||
281 | } | ||
282 | |||
283 | static int guest_alloc_one_irq(struct cxl *adapter) | ||
284 | { | ||
285 | int irq; | ||
286 | |||
287 | spin_lock(&adapter->guest->irq_alloc_lock); | ||
288 | if (irq_alloc_range(adapter, 1, &irq)) | ||
289 | irq = -ENOSPC; | ||
290 | spin_unlock(&adapter->guest->irq_alloc_lock); | ||
291 | return irq; | ||
292 | } | ||
293 | |||
294 | static void guest_release_one_irq(struct cxl *adapter, int irq) | ||
295 | { | ||
296 | spin_lock(&adapter->guest->irq_alloc_lock); | ||
297 | irq_free_range(adapter, irq, 1); | ||
298 | spin_unlock(&adapter->guest->irq_alloc_lock); | ||
299 | } | ||
300 | |||
301 | static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs, | ||
302 | struct cxl *adapter, unsigned int num) | ||
303 | { | ||
304 | int i, try, irq; | ||
305 | |||
306 | memset(irqs, 0, sizeof(struct cxl_irq_ranges)); | ||
307 | |||
308 | spin_lock(&adapter->guest->irq_alloc_lock); | ||
309 | for (i = 0; i < CXL_IRQ_RANGES && num; i++) { | ||
310 | try = num; | ||
311 | while (try) { | ||
312 | if (irq_alloc_range(adapter, try, &irq) == 0) | ||
313 | break; | ||
314 | try /= 2; | ||
315 | } | ||
316 | if (!try) | ||
317 | goto error; | ||
318 | irqs->offset[i] = irq; | ||
319 | irqs->range[i] = try; | ||
320 | num -= try; | ||
321 | } | ||
322 | if (num) | ||
323 | goto error; | ||
324 | spin_unlock(&adapter->guest->irq_alloc_lock); | ||
325 | return 0; | ||
326 | |||
327 | error: | ||
328 | for (i = 0; i < CXL_IRQ_RANGES; i++) | ||
329 | irq_free_range(adapter, irqs->offset[i], irqs->range[i]); | ||
330 | spin_unlock(&adapter->guest->irq_alloc_lock); | ||
331 | return -ENOSPC; | ||
332 | } | ||
333 | |||
334 | static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs, | ||
335 | struct cxl *adapter) | ||
336 | { | ||
337 | int i; | ||
338 | |||
339 | spin_lock(&adapter->guest->irq_alloc_lock); | ||
340 | for (i = 0; i < CXL_IRQ_RANGES; i++) | ||
341 | irq_free_range(adapter, irqs->offset[i], irqs->range[i]); | ||
342 | spin_unlock(&adapter->guest->irq_alloc_lock); | ||
343 | } | ||
344 | |||
345 | static int guest_register_serr_irq(struct cxl_afu *afu) | ||
346 | { | ||
347 | afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", | ||
348 | dev_name(&afu->dev)); | ||
349 | if (!afu->err_irq_name) | ||
350 | return -ENOMEM; | ||
351 | |||
352 | if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq, | ||
353 | guest_slice_irq_err, afu, afu->err_irq_name))) { | ||
354 | kfree(afu->err_irq_name); | ||
355 | afu->err_irq_name = NULL; | ||
356 | return -ENOMEM; | ||
357 | } | ||
358 | |||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | static void guest_release_serr_irq(struct cxl_afu *afu) | ||
363 | { | ||
364 | cxl_unmap_irq(afu->serr_virq, afu); | ||
365 | cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); | ||
366 | kfree(afu->err_irq_name); | ||
367 | } | ||
368 | |||
369 | static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) | ||
370 | { | ||
371 | return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token, | ||
372 | tfc >> 32, (psl_reset_mask != 0)); | ||
373 | } | ||
374 | |||
375 | static void disable_afu_irqs(struct cxl_context *ctx) | ||
376 | { | ||
377 | irq_hw_number_t hwirq; | ||
378 | unsigned int virq; | ||
379 | int r, i; | ||
380 | |||
381 | pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice); | ||
382 | for (r = 0; r < CXL_IRQ_RANGES; r++) { | ||
383 | hwirq = ctx->irqs.offset[r]; | ||
384 | for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { | ||
385 | virq = irq_find_mapping(NULL, hwirq); | ||
386 | disable_irq(virq); | ||
387 | } | ||
388 | } | ||
389 | } | ||
390 | |||
391 | static void enable_afu_irqs(struct cxl_context *ctx) | ||
392 | { | ||
393 | irq_hw_number_t hwirq; | ||
394 | unsigned int virq; | ||
395 | int r, i; | ||
396 | |||
397 | pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice); | ||
398 | for (r = 0; r < CXL_IRQ_RANGES; r++) { | ||
399 | hwirq = ctx->irqs.offset[r]; | ||
400 | for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { | ||
401 | virq = irq_find_mapping(NULL, hwirq); | ||
402 | enable_irq(virq); | ||
403 | } | ||
404 | } | ||
405 | } | ||
406 | |||
407 | static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx, | ||
408 | u64 offset, u64 *val) | ||
409 | { | ||
410 | unsigned long cr; | ||
411 | char c; | ||
412 | int rc = 0; | ||
413 | |||
414 | if (afu->crs_len < sz) | ||
415 | return -ENOENT; | ||
416 | |||
417 | if (unlikely(offset >= afu->crs_len)) | ||
418 | return -ERANGE; | ||
419 | |||
420 | cr = get_zeroed_page(GFP_KERNEL); | ||
421 | if (!cr) | ||
422 | return -ENOMEM; | ||
423 | |||
424 | rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset, | ||
425 | virt_to_phys((void *)cr), sz); | ||
426 | if (rc) | ||
427 | goto err; | ||
428 | |||
429 | switch (sz) { | ||
430 | case 1: | ||
431 | c = *((char *) cr); | ||
432 | *val = c; | ||
433 | break; | ||
434 | case 2: | ||
435 | *val = in_le16((u16 *)cr); | ||
436 | break; | ||
437 | case 4: | ||
438 | *val = in_le32((unsigned *)cr); | ||
439 | break; | ||
440 | case 8: | ||
441 | *val = in_le64((u64 *)cr); | ||
442 | break; | ||
443 | default: | ||
444 | WARN_ON(1); | ||
445 | } | ||
446 | err: | ||
447 | free_page(cr); | ||
448 | return rc; | ||
449 | } | ||
450 | |||
451 | static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset, | ||
452 | u32 *out) | ||
453 | { | ||
454 | int rc; | ||
455 | u64 val; | ||
456 | |||
457 | rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val); | ||
458 | if (!rc) | ||
459 | *out = (u32) val; | ||
460 | return rc; | ||
461 | } | ||
462 | |||
463 | static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset, | ||
464 | u16 *out) | ||
465 | { | ||
466 | int rc; | ||
467 | u64 val; | ||
468 | |||
469 | rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val); | ||
470 | if (!rc) | ||
471 | *out = (u16) val; | ||
472 | return rc; | ||
473 | } | ||
474 | |||
475 | static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset, | ||
476 | u8 *out) | ||
477 | { | ||
478 | int rc; | ||
479 | u64 val; | ||
480 | |||
481 | rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val); | ||
482 | if (!rc) | ||
483 | *out = (u8) val; | ||
484 | return rc; | ||
485 | } | ||
486 | |||
487 | static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset, | ||
488 | u64 *out) | ||
489 | { | ||
490 | return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out); | ||
491 | } | ||
492 | |||
493 | static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) | ||
494 | { | ||
495 | /* config record is not writable from guest */ | ||
496 | return -EPERM; | ||
497 | } | ||
498 | |||
499 | static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) | ||
500 | { | ||
501 | /* config record is not writable from guest */ | ||
502 | return -EPERM; | ||
503 | } | ||
504 | |||
505 | static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) | ||
506 | { | ||
507 | /* config record is not writable from guest */ | ||
508 | return -EPERM; | ||
509 | } | ||
510 | |||
511 | static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) | ||
512 | { | ||
513 | struct cxl_process_element_hcall *elem; | ||
514 | struct cxl *adapter = ctx->afu->adapter; | ||
515 | const struct cred *cred; | ||
516 | u32 pid, idx; | ||
517 | int rc, r, i; | ||
518 | u64 mmio_addr, mmio_size; | ||
519 | __be64 flags = 0; | ||
520 | |||
521 | /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */ | ||
522 | if (!(elem = (struct cxl_process_element_hcall *) | ||
523 | get_zeroed_page(GFP_KERNEL))) | ||
524 | return -ENOMEM; | ||
525 | |||
526 | elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION); | ||
527 | if (ctx->kernel) { | ||
528 | pid = 0; | ||
529 | flags |= CXL_PE_TRANSLATION_ENABLED; | ||
530 | flags |= CXL_PE_PRIVILEGED_PROCESS; | ||
531 | if (mfmsr() & MSR_SF) | ||
532 | flags |= CXL_PE_64_BIT; | ||
533 | } else { | ||
534 | pid = current->pid; | ||
535 | flags |= CXL_PE_PROBLEM_STATE; | ||
536 | flags |= CXL_PE_TRANSLATION_ENABLED; | ||
537 | if (!test_tsk_thread_flag(current, TIF_32BIT)) | ||
538 | flags |= CXL_PE_64_BIT; | ||
539 | cred = get_current_cred(); | ||
540 | if (uid_eq(cred->euid, GLOBAL_ROOT_UID)) | ||
541 | flags |= CXL_PE_PRIVILEGED_PROCESS; | ||
542 | put_cred(cred); | ||
543 | } | ||
544 | elem->flags = cpu_to_be64(flags); | ||
545 | elem->common.tid = cpu_to_be32(0); /* Unused */ | ||
546 | elem->common.pid = cpu_to_be32(pid); | ||
547 | elem->common.csrp = cpu_to_be64(0); /* disable */ | ||
548 | elem->common.aurp0 = cpu_to_be64(0); /* disable */ | ||
549 | elem->common.aurp1 = cpu_to_be64(0); /* disable */ | ||
550 | |||
551 | cxl_prefault(ctx, wed); | ||
552 | |||
553 | elem->common.sstp0 = cpu_to_be64(ctx->sstp0); | ||
554 | elem->common.sstp1 = cpu_to_be64(ctx->sstp1); | ||
555 | for (r = 0; r < CXL_IRQ_RANGES; r++) { | ||
556 | for (i = 0; i < ctx->irqs.range[r]; i++) { | ||
557 | if (r == 0 && i == 0) { | ||
558 | elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]); | ||
559 | } else { | ||
560 | idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset; | ||
561 | elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8); | ||
562 | } | ||
563 | } | ||
564 | } | ||
565 | elem->common.amr = cpu_to_be64(amr); | ||
566 | elem->common.wed = cpu_to_be64(wed); | ||
567 | |||
568 | disable_afu_irqs(ctx); | ||
569 | |||
570 | rc = cxl_h_attach_process(ctx->afu->guest->handle, elem, | ||
571 | &ctx->process_token, &mmio_addr, &mmio_size); | ||
572 | if (rc == H_SUCCESS) { | ||
573 | if (ctx->master || !ctx->afu->pp_psa) { | ||
574 | ctx->psn_phys = ctx->afu->psn_phys; | ||
575 | ctx->psn_size = ctx->afu->adapter->ps_size; | ||
576 | } else { | ||
577 | ctx->psn_phys = mmio_addr; | ||
578 | ctx->psn_size = mmio_size; | ||
579 | } | ||
580 | if (ctx->afu->pp_psa && mmio_size && | ||
581 | ctx->afu->pp_size == 0) { | ||
582 | /* | ||
583 | * There's no property in the device tree to read the | ||
584 | * pp_size. We only find out at the 1st attach. | ||
585 | * Compared to bare-metal, it is too late and we | ||
586 | * should really lock here. However, on powerVM, | ||
587 | * pp_size is really only used to display in /sys. | ||
588 | * Being discussed with pHyp for their next release. | ||
589 | */ | ||
590 | ctx->afu->pp_size = mmio_size; | ||
591 | } | ||
592 | /* from PAPR: process element is bytes 4-7 of process token */ | ||
593 | ctx->external_pe = ctx->process_token & 0xFFFFFFFF; | ||
594 | pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx", | ||
595 | ctx->pe, ctx->external_pe, ctx->psn_size); | ||
596 | ctx->pe_inserted = true; | ||
597 | enable_afu_irqs(ctx); | ||
598 | } | ||
599 | |||
600 | free_page((u64)elem); | ||
601 | return rc; | ||
602 | } | ||
603 | |||
604 | static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr) | ||
605 | { | ||
606 | pr_devel("in %s\n", __func__); | ||
607 | |||
608 | ctx->kernel = kernel; | ||
609 | if (ctx->afu->current_mode == CXL_MODE_DIRECTED) | ||
610 | return attach_afu_directed(ctx, wed, amr); | ||
611 | |||
612 | /* dedicated mode not supported on FW840 */ | ||
613 | |||
614 | return -EINVAL; | ||
615 | } | ||
616 | |||
617 | static int detach_afu_directed(struct cxl_context *ctx) | ||
618 | { | ||
619 | if (!ctx->pe_inserted) | ||
620 | return 0; | ||
621 | if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token)) | ||
622 | return -1; | ||
623 | return 0; | ||
624 | } | ||
625 | |||
626 | static int guest_detach_process(struct cxl_context *ctx) | ||
627 | { | ||
628 | pr_devel("in %s\n", __func__); | ||
629 | trace_cxl_detach(ctx); | ||
630 | |||
631 | if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) | ||
632 | return -EIO; | ||
633 | |||
634 | if (ctx->afu->current_mode == CXL_MODE_DIRECTED) | ||
635 | return detach_afu_directed(ctx); | ||
636 | |||
637 | return -EINVAL; | ||
638 | } | ||
639 | |||
640 | static void guest_release_afu(struct device *dev) | ||
641 | { | ||
642 | struct cxl_afu *afu = to_cxl_afu(dev); | ||
643 | |||
644 | pr_devel("%s\n", __func__); | ||
645 | |||
646 | idr_destroy(&afu->contexts_idr); | ||
647 | |||
648 | kfree(afu->guest); | ||
649 | kfree(afu); | ||
650 | } | ||
651 | |||
652 | ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len) | ||
653 | { | ||
654 | return guest_collect_vpd(NULL, afu, buf, len); | ||
655 | } | ||
656 | |||
657 | #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE | ||
658 | static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf, | ||
659 | loff_t off, size_t count) | ||
660 | { | ||
661 | void *tbuf = NULL; | ||
662 | int rc = 0; | ||
663 | |||
664 | tbuf = (void *) get_zeroed_page(GFP_KERNEL); | ||
665 | if (!tbuf) | ||
666 | return -ENOMEM; | ||
667 | |||
668 | rc = cxl_h_get_afu_err(afu->guest->handle, | ||
669 | off & 0x7, | ||
670 | virt_to_phys(tbuf), | ||
671 | count); | ||
672 | if (rc) | ||
673 | goto err; | ||
674 | |||
675 | if (count > ERR_BUFF_MAX_COPY_SIZE) | ||
676 | count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); | ||
677 | memcpy(buf, tbuf, count); | ||
678 | err: | ||
679 | free_page((u64)tbuf); | ||
680 | |||
681 | return rc; | ||
682 | } | ||
683 | |||
684 | static int guest_afu_check_and_enable(struct cxl_afu *afu) | ||
685 | { | ||
686 | return 0; | ||
687 | } | ||
688 | |||
689 | static bool guest_support_attributes(const char *attr_name, | ||
690 | enum cxl_attrs type) | ||
691 | { | ||
692 | switch (type) { | ||
693 | case CXL_ADAPTER_ATTRS: | ||
694 | if ((strcmp(attr_name, "base_image") == 0) || | ||
695 | (strcmp(attr_name, "load_image_on_perst") == 0) || | ||
696 | (strcmp(attr_name, "perst_reloads_same_image") == 0) || | ||
697 | (strcmp(attr_name, "image_loaded") == 0)) | ||
698 | return false; | ||
699 | break; | ||
700 | case CXL_AFU_MASTER_ATTRS: | ||
701 | if ((strcmp(attr_name, "pp_mmio_off") == 0)) | ||
702 | return false; | ||
703 | break; | ||
704 | case CXL_AFU_ATTRS: | ||
705 | break; | ||
706 | default: | ||
707 | break; | ||
708 | } | ||
709 | |||
710 | return true; | ||
711 | } | ||
712 | |||
713 | static int activate_afu_directed(struct cxl_afu *afu) | ||
714 | { | ||
715 | int rc; | ||
716 | |||
717 | dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice); | ||
718 | |||
719 | afu->current_mode = CXL_MODE_DIRECTED; | ||
720 | |||
721 | afu->num_procs = afu->max_procs_virtualised; | ||
722 | |||
723 | if ((rc = cxl_chardev_m_afu_add(afu))) | ||
724 | return rc; | ||
725 | |||
726 | if ((rc = cxl_sysfs_afu_m_add(afu))) | ||
727 | goto err; | ||
728 | |||
729 | if ((rc = cxl_chardev_s_afu_add(afu))) | ||
730 | goto err1; | ||
731 | |||
732 | return 0; | ||
733 | err1: | ||
734 | cxl_sysfs_afu_m_remove(afu); | ||
735 | err: | ||
736 | cxl_chardev_afu_remove(afu); | ||
737 | return rc; | ||
738 | } | ||
739 | |||
740 | static int guest_afu_activate_mode(struct cxl_afu *afu, int mode) | ||
741 | { | ||
742 | if (!mode) | ||
743 | return 0; | ||
744 | if (!(mode & afu->modes_supported)) | ||
745 | return -EINVAL; | ||
746 | |||
747 | if (mode == CXL_MODE_DIRECTED) | ||
748 | return activate_afu_directed(afu); | ||
749 | |||
750 | if (mode == CXL_MODE_DEDICATED) | ||
751 | dev_err(&afu->dev, "Dedicated mode not supported\n"); | ||
752 | |||
753 | return -EINVAL; | ||
754 | } | ||
755 | |||
756 | static int deactivate_afu_directed(struct cxl_afu *afu) | ||
757 | { | ||
758 | dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice); | ||
759 | |||
760 | afu->current_mode = 0; | ||
761 | afu->num_procs = 0; | ||
762 | |||
763 | cxl_sysfs_afu_m_remove(afu); | ||
764 | cxl_chardev_afu_remove(afu); | ||
765 | |||
766 | cxl_ops->afu_reset(afu); | ||
767 | |||
768 | return 0; | ||
769 | } | ||
770 | |||
771 | static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode) | ||
772 | { | ||
773 | if (!mode) | ||
774 | return 0; | ||
775 | if (!(mode & afu->modes_supported)) | ||
776 | return -EINVAL; | ||
777 | |||
778 | if (mode == CXL_MODE_DIRECTED) | ||
779 | return deactivate_afu_directed(afu); | ||
780 | return 0; | ||
781 | } | ||
782 | |||
783 | static int guest_afu_reset(struct cxl_afu *afu) | ||
784 | { | ||
785 | pr_devel("AFU(%d) reset request\n", afu->slice); | ||
786 | return cxl_h_reset_afu(afu->guest->handle); | ||
787 | } | ||
788 | |||
789 | static int guest_map_slice_regs(struct cxl_afu *afu) | ||
790 | { | ||
791 | if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) { | ||
792 | dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n", | ||
793 | afu->slice); | ||
794 | return -ENOMEM; | ||
795 | } | ||
796 | return 0; | ||
797 | } | ||
798 | |||
799 | static void guest_unmap_slice_regs(struct cxl_afu *afu) | ||
800 | { | ||
801 | if (afu->p2n_mmio) | ||
802 | iounmap(afu->p2n_mmio); | ||
803 | } | ||
804 | |||
805 | static int afu_update_state(struct cxl_afu *afu) | ||
806 | { | ||
807 | int rc, cur_state; | ||
808 | |||
809 | rc = afu_read_error_state(afu, &cur_state); | ||
810 | if (rc) | ||
811 | return rc; | ||
812 | |||
813 | if (afu->guest->previous_state == cur_state) | ||
814 | return 0; | ||
815 | |||
816 | pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state); | ||
817 | |||
818 | switch (cur_state) { | ||
819 | case H_STATE_NORMAL: | ||
820 | afu->guest->previous_state = cur_state; | ||
821 | rc = 1; | ||
822 | break; | ||
823 | |||
824 | case H_STATE_DISABLE: | ||
825 | pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, | ||
826 | pci_channel_io_frozen); | ||
827 | |||
828 | cxl_context_detach_all(afu); | ||
829 | if ((rc = cxl_ops->afu_reset(afu))) | ||
830 | pr_devel("reset hcall failed %d\n", rc); | ||
831 | |||
832 | rc = afu_read_error_state(afu, &cur_state); | ||
833 | if (!rc && cur_state == H_STATE_NORMAL) { | ||
834 | pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, | ||
835 | pci_channel_io_normal); | ||
836 | pci_error_handlers(afu, CXL_RESUME_EVENT, 0); | ||
837 | rc = 1; | ||
838 | } | ||
839 | afu->guest->previous_state = 0; | ||
840 | break; | ||
841 | |||
842 | case H_STATE_TEMP_UNAVAILABLE: | ||
843 | afu->guest->previous_state = cur_state; | ||
844 | break; | ||
845 | |||
846 | case H_STATE_PERM_UNAVAILABLE: | ||
847 | dev_err(&afu->dev, "AFU is in permanent error state\n"); | ||
848 | pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, | ||
849 | pci_channel_io_perm_failure); | ||
850 | afu->guest->previous_state = cur_state; | ||
851 | break; | ||
852 | |||
853 | default: | ||
854 | pr_err("Unexpected AFU(%d) error state: %#x\n", | ||
855 | afu->slice, cur_state); | ||
856 | return -EINVAL; | ||
857 | } | ||
858 | |||
859 | return rc; | ||
860 | } | ||
861 | |||
862 | static int afu_do_recovery(struct cxl_afu *afu) | ||
863 | { | ||
864 | int rc; | ||
865 | |||
866 | /* many threads can arrive here, in case of detach_all for example. | ||
867 | * Only one needs to drive the recovery | ||
868 | */ | ||
869 | if (mutex_trylock(&afu->guest->recovery_lock)) { | ||
870 | rc = afu_update_state(afu); | ||
871 | mutex_unlock(&afu->guest->recovery_lock); | ||
872 | return rc; | ||
873 | } | ||
874 | return 0; | ||
875 | } | ||
876 | |||
877 | static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu) | ||
878 | { | ||
879 | int state; | ||
880 | |||
881 | if (afu) { | ||
882 | if (afu_read_error_state(afu, &state) || | ||
883 | state != H_STATE_NORMAL) { | ||
884 | if (afu_do_recovery(afu) > 0) { | ||
885 | /* check again in case we've just fixed it */ | ||
886 | if (!afu_read_error_state(afu, &state) && | ||
887 | state == H_STATE_NORMAL) | ||
888 | return true; | ||
889 | } | ||
890 | return false; | ||
891 | } | ||
892 | } | ||
893 | |||
894 | return true; | ||
895 | } | ||
896 | |||
897 | static int afu_properties_look_ok(struct cxl_afu *afu) | ||
898 | { | ||
899 | if (afu->pp_irqs < 0) { | ||
900 | dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n"); | ||
901 | return -EINVAL; | ||
902 | } | ||
903 | |||
904 | if (afu->max_procs_virtualised < 1) { | ||
905 | dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n"); | ||
906 | return -EINVAL; | ||
907 | } | ||
908 | |||
909 | if (afu->crs_len < 0) { | ||
910 | dev_err(&afu->dev, "Unexpected configuration record size value\n"); | ||
911 | return -EINVAL; | ||
912 | } | ||
913 | |||
914 | return 0; | ||
915 | } | ||
916 | |||
917 | int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np) | ||
918 | { | ||
919 | struct cxl_afu *afu; | ||
920 | bool free = true; | ||
921 | int rc; | ||
922 | |||
923 | pr_devel("in %s - AFU(%d)\n", __func__, slice); | ||
924 | if (!(afu = cxl_alloc_afu(adapter, slice))) | ||
925 | return -ENOMEM; | ||
926 | |||
927 | if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) { | ||
928 | kfree(afu); | ||
929 | return -ENOMEM; | ||
930 | } | ||
931 | |||
932 | mutex_init(&afu->guest->recovery_lock); | ||
933 | |||
934 | if ((rc = dev_set_name(&afu->dev, "afu%i.%i", | ||
935 | adapter->adapter_num, | ||
936 | slice))) | ||
937 | goto err1; | ||
938 | |||
939 | adapter->slices++; | ||
940 | |||
941 | if ((rc = cxl_of_read_afu_handle(afu, afu_np))) | ||
942 | goto err1; | ||
943 | |||
944 | if ((rc = cxl_ops->afu_reset(afu))) | ||
945 | goto err1; | ||
946 | |||
947 | if ((rc = cxl_of_read_afu_properties(afu, afu_np))) | ||
948 | goto err1; | ||
949 | |||
950 | if ((rc = afu_properties_look_ok(afu))) | ||
951 | goto err1; | ||
952 | |||
953 | if ((rc = guest_map_slice_regs(afu))) | ||
954 | goto err1; | ||
955 | |||
956 | if ((rc = guest_register_serr_irq(afu))) | ||
957 | goto err2; | ||
958 | |||
959 | /* | ||
960 | * After we call this function we must not free the afu directly, even | ||
961 | * if it returns an error! | ||
962 | */ | ||
963 | if ((rc = cxl_register_afu(afu))) | ||
964 | goto err_put1; | ||
965 | |||
966 | if ((rc = cxl_sysfs_afu_add(afu))) | ||
967 | goto err_put1; | ||
968 | |||
969 | /* | ||
970 | * pHyp doesn't expose the programming models supported by the | ||
971 | * AFU. pHyp currently only supports directed mode. If it adds | ||
972 | * dedicated mode later, this version of cxl has no way to | ||
973 | * detect it. So we'll initialize the driver, but the first | ||
974 | * attach will fail. | ||
975 | * Being discussed with pHyp to do better (likely new property) | ||
976 | */ | ||
977 | if (afu->max_procs_virtualised == 1) | ||
978 | afu->modes_supported = CXL_MODE_DEDICATED; | ||
979 | else | ||
980 | afu->modes_supported = CXL_MODE_DIRECTED; | ||
981 | |||
982 | if ((rc = cxl_afu_select_best_mode(afu))) | ||
983 | goto err_put2; | ||
984 | |||
985 | adapter->afu[afu->slice] = afu; | ||
986 | |||
987 | afu->enabled = true; | ||
988 | |||
989 | if ((rc = cxl_pci_vphb_add(afu))) | ||
990 | dev_info(&afu->dev, "Can't register vPHB\n"); | ||
991 | |||
992 | return 0; | ||
993 | |||
994 | err_put2: | ||
995 | cxl_sysfs_afu_remove(afu); | ||
996 | err_put1: | ||
997 | device_unregister(&afu->dev); | ||
998 | free = false; | ||
999 | guest_release_serr_irq(afu); | ||
1000 | err2: | ||
1001 | guest_unmap_slice_regs(afu); | ||
1002 | err1: | ||
1003 | if (free) { | ||
1004 | kfree(afu->guest); | ||
1005 | kfree(afu); | ||
1006 | } | ||
1007 | return rc; | ||
1008 | } | ||
1009 | |||
1010 | void cxl_guest_remove_afu(struct cxl_afu *afu) | ||
1011 | { | ||
1012 | pr_devel("in %s - AFU(%d)\n", __func__, afu->slice); | ||
1013 | |||
1014 | if (!afu) | ||
1015 | return; | ||
1016 | |||
1017 | cxl_pci_vphb_remove(afu); | ||
1018 | cxl_sysfs_afu_remove(afu); | ||
1019 | |||
1020 | spin_lock(&afu->adapter->afu_list_lock); | ||
1021 | afu->adapter->afu[afu->slice] = NULL; | ||
1022 | spin_unlock(&afu->adapter->afu_list_lock); | ||
1023 | |||
1024 | cxl_context_detach_all(afu); | ||
1025 | cxl_ops->afu_deactivate_mode(afu, afu->current_mode); | ||
1026 | guest_release_serr_irq(afu); | ||
1027 | guest_unmap_slice_regs(afu); | ||
1028 | |||
1029 | device_unregister(&afu->dev); | ||
1030 | } | ||
1031 | |||
1032 | static void free_adapter(struct cxl *adapter) | ||
1033 | { | ||
1034 | struct irq_avail *cur; | ||
1035 | int i; | ||
1036 | |||
1037 | if (adapter->guest->irq_avail) { | ||
1038 | for (i = 0; i < adapter->guest->irq_nranges; i++) { | ||
1039 | cur = &adapter->guest->irq_avail[i]; | ||
1040 | kfree(cur->bitmap); | ||
1041 | } | ||
1042 | kfree(adapter->guest->irq_avail); | ||
1043 | } | ||
1044 | kfree(adapter->guest->status); | ||
1045 | cxl_remove_adapter_nr(adapter); | ||
1046 | kfree(adapter->guest); | ||
1047 | kfree(adapter); | ||
1048 | } | ||
1049 | |||
1050 | static int properties_look_ok(struct cxl *adapter) | ||
1051 | { | ||
1052 | /* The absence of this property means that the operational | ||
1053 | * status is unknown or okay | ||
1054 | */ | ||
1055 | if (strlen(adapter->guest->status) && | ||
1056 | strcmp(adapter->guest->status, "okay")) { | ||
1057 | pr_err("ABORTING:Bad operational status of the device\n"); | ||
1058 | return -EINVAL; | ||
1059 | } | ||
1060 | |||
1061 | return 0; | ||
1062 | } | ||
1063 | |||
1064 | ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len) | ||
1065 | { | ||
1066 | return guest_collect_vpd(adapter, NULL, buf, len); | ||
1067 | } | ||
1068 | |||
1069 | void cxl_guest_remove_adapter(struct cxl *adapter) | ||
1070 | { | ||
1071 | pr_devel("in %s\n", __func__); | ||
1072 | |||
1073 | cxl_sysfs_adapter_remove(adapter); | ||
1074 | |||
1075 | cxl_guest_remove_chardev(adapter); | ||
1076 | device_unregister(&adapter->dev); | ||
1077 | } | ||
1078 | |||
1079 | static void release_adapter(struct device *dev) | ||
1080 | { | ||
1081 | free_adapter(to_cxl_adapter(dev)); | ||
1082 | } | ||
1083 | |||
1084 | struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev) | ||
1085 | { | ||
1086 | struct cxl *adapter; | ||
1087 | bool free = true; | ||
1088 | int rc; | ||
1089 | |||
1090 | if (!(adapter = cxl_alloc_adapter())) | ||
1091 | return ERR_PTR(-ENOMEM); | ||
1092 | |||
1093 | if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) { | ||
1094 | free_adapter(adapter); | ||
1095 | return ERR_PTR(-ENOMEM); | ||
1096 | } | ||
1097 | |||
1098 | adapter->slices = 0; | ||
1099 | adapter->guest->pdev = pdev; | ||
1100 | adapter->dev.parent = &pdev->dev; | ||
1101 | adapter->dev.release = release_adapter; | ||
1102 | dev_set_drvdata(&pdev->dev, adapter); | ||
1103 | |||
1104 | if ((rc = cxl_of_read_adapter_handle(adapter, np))) | ||
1105 | goto err1; | ||
1106 | |||
1107 | if ((rc = cxl_of_read_adapter_properties(adapter, np))) | ||
1108 | goto err1; | ||
1109 | |||
1110 | if ((rc = properties_look_ok(adapter))) | ||
1111 | goto err1; | ||
1112 | |||
1113 | if ((rc = cxl_guest_add_chardev(adapter))) | ||
1114 | goto err1; | ||
1115 | |||
1116 | /* | ||
1117 | * After we call this function we must not free the adapter directly, | ||
1118 | * even if it returns an error! | ||
1119 | */ | ||
1120 | if ((rc = cxl_register_adapter(adapter))) | ||
1121 | goto err_put1; | ||
1122 | |||
1123 | if ((rc = cxl_sysfs_adapter_add(adapter))) | ||
1124 | goto err_put1; | ||
1125 | |||
1126 | return adapter; | ||
1127 | |||
1128 | err_put1: | ||
1129 | device_unregister(&adapter->dev); | ||
1130 | free = false; | ||
1131 | cxl_guest_remove_chardev(adapter); | ||
1132 | err1: | ||
1133 | if (free) | ||
1134 | free_adapter(adapter); | ||
1135 | return ERR_PTR(rc); | ||
1136 | } | ||
1137 | |||
1138 | void cxl_guest_reload_module(struct cxl *adapter) | ||
1139 | { | ||
1140 | struct platform_device *pdev; | ||
1141 | |||
1142 | pdev = adapter->guest->pdev; | ||
1143 | cxl_guest_remove_adapter(adapter); | ||
1144 | |||
1145 | cxl_of_probe(pdev); | ||
1146 | } | ||
1147 | |||
1148 | const struct cxl_backend_ops cxl_guest_ops = { | ||
1149 | .module = THIS_MODULE, | ||
1150 | .adapter_reset = guest_reset, | ||
1151 | .alloc_one_irq = guest_alloc_one_irq, | ||
1152 | .release_one_irq = guest_release_one_irq, | ||
1153 | .alloc_irq_ranges = guest_alloc_irq_ranges, | ||
1154 | .release_irq_ranges = guest_release_irq_ranges, | ||
1155 | .setup_irq = NULL, | ||
1156 | .handle_psl_slice_error = guest_handle_psl_slice_error, | ||
1157 | .psl_interrupt = guest_psl_irq, | ||
1158 | .ack_irq = guest_ack_irq, | ||
1159 | .attach_process = guest_attach_process, | ||
1160 | .detach_process = guest_detach_process, | ||
1161 | .support_attributes = guest_support_attributes, | ||
1162 | .link_ok = guest_link_ok, | ||
1163 | .release_afu = guest_release_afu, | ||
1164 | .afu_read_err_buffer = guest_afu_read_err_buffer, | ||
1165 | .afu_check_and_enable = guest_afu_check_and_enable, | ||
1166 | .afu_activate_mode = guest_afu_activate_mode, | ||
1167 | .afu_deactivate_mode = guest_afu_deactivate_mode, | ||
1168 | .afu_reset = guest_afu_reset, | ||
1169 | .afu_cr_read8 = guest_afu_cr_read8, | ||
1170 | .afu_cr_read16 = guest_afu_cr_read16, | ||
1171 | .afu_cr_read32 = guest_afu_cr_read32, | ||
1172 | .afu_cr_read64 = guest_afu_cr_read64, | ||
1173 | .afu_cr_write8 = guest_afu_cr_write8, | ||
1174 | .afu_cr_write16 = guest_afu_cr_write16, | ||
1175 | .afu_cr_write32 = guest_afu_cr_write32, | ||
1176 | .read_adapter_vpd = cxl_guest_read_adapter_vpd, | ||
1177 | }; | ||
diff --git a/drivers/misc/cxl/hcalls.c b/drivers/misc/cxl/hcalls.c new file mode 100644 index 000000000000..d6d11f4056d7 --- /dev/null +++ b/drivers/misc/cxl/hcalls.c | |||
@@ -0,0 +1,647 @@ | |||
1 | /* | ||
2 | * Copyright 2015 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | |||
11 | #include <linux/compiler.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <asm/byteorder.h> | ||
15 | #include "hcalls.h" | ||
16 | #include "trace.h" | ||
17 | |||
18 | #define CXL_HCALL_TIMEOUT 60000 | ||
19 | #define CXL_HCALL_TIMEOUT_DOWNLOAD 120000 | ||
20 | |||
21 | #define H_ATTACH_CA_PROCESS 0x344 | ||
22 | #define H_CONTROL_CA_FUNCTION 0x348 | ||
23 | #define H_DETACH_CA_PROCESS 0x34C | ||
24 | #define H_COLLECT_CA_INT_INFO 0x350 | ||
25 | #define H_CONTROL_CA_FAULTS 0x354 | ||
26 | #define H_DOWNLOAD_CA_FUNCTION 0x35C | ||
27 | #define H_DOWNLOAD_CA_FACILITY 0x364 | ||
28 | #define H_CONTROL_CA_FACILITY 0x368 | ||
29 | |||
30 | #define H_CONTROL_CA_FUNCTION_RESET 1 /* perform a reset */ | ||
31 | #define H_CONTROL_CA_FUNCTION_SUSPEND_PROCESS 2 /* suspend a process from being executed */ | ||
32 | #define H_CONTROL_CA_FUNCTION_RESUME_PROCESS 3 /* resume a process to be executed */ | ||
33 | #define H_CONTROL_CA_FUNCTION_READ_ERR_STATE 4 /* read the error state */ | ||
34 | #define H_CONTROL_CA_FUNCTION_GET_AFU_ERR 5 /* collect the AFU error buffer */ | ||
35 | #define H_CONTROL_CA_FUNCTION_GET_CONFIG 6 /* collect configuration record */ | ||
36 | #define H_CONTROL_CA_FUNCTION_GET_DOWNLOAD_STATE 7 /* query to return download status */ | ||
37 | #define H_CONTROL_CA_FUNCTION_TERMINATE_PROCESS 8 /* terminate the process before completion */ | ||
38 | #define H_CONTROL_CA_FUNCTION_COLLECT_VPD 9 /* collect VPD */ | ||
39 | #define H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT 11 /* read the function-wide error data based on an interrupt */ | ||
40 | #define H_CONTROL_CA_FUNCTION_ACK_FUNCTION_ERR_INT 12 /* acknowledge function-wide error data based on an interrupt */ | ||
41 | #define H_CONTROL_CA_FUNCTION_GET_ERROR_LOG 13 /* retrieve the Platform Log ID (PLID) of an error log */ | ||
42 | |||
43 | #define H_CONTROL_CA_FAULTS_RESPOND_PSL 1 | ||
44 | #define H_CONTROL_CA_FAULTS_RESPOND_AFU 2 | ||
45 | |||
46 | #define H_CONTROL_CA_FACILITY_RESET 1 /* perform a reset */ | ||
47 | #define H_CONTROL_CA_FACILITY_COLLECT_VPD 2 /* collect VPD */ | ||
48 | |||
49 | #define H_DOWNLOAD_CA_FACILITY_DOWNLOAD 1 /* download adapter image */ | ||
50 | #define H_DOWNLOAD_CA_FACILITY_VALIDATE 2 /* validate adapter image */ | ||
51 | |||
52 | |||
53 | #define _CXL_LOOP_HCALL(call, rc, retbuf, fn, ...) \ | ||
54 | { \ | ||
55 | unsigned int delay, total_delay = 0; \ | ||
56 | u64 token = 0; \ | ||
57 | \ | ||
58 | memset(retbuf, 0, sizeof(retbuf)); \ | ||
59 | while (1) { \ | ||
60 | rc = call(fn, retbuf, __VA_ARGS__, token); \ | ||
61 | token = retbuf[0]; \ | ||
62 | if (rc != H_BUSY && !H_IS_LONG_BUSY(rc)) \ | ||
63 | break; \ | ||
64 | \ | ||
65 | if (rc == H_BUSY) \ | ||
66 | delay = 10; \ | ||
67 | else \ | ||
68 | delay = get_longbusy_msecs(rc); \ | ||
69 | \ | ||
70 | total_delay += delay; \ | ||
71 | if (total_delay > CXL_HCALL_TIMEOUT) { \ | ||
72 | WARN(1, "Warning: Giving up waiting for CXL hcall " \ | ||
73 | "%#x after %u msec\n", fn, total_delay); \ | ||
74 | rc = H_BUSY; \ | ||
75 | break; \ | ||
76 | } \ | ||
77 | msleep(delay); \ | ||
78 | } \ | ||
79 | } | ||
80 | #define CXL_H_WAIT_UNTIL_DONE(...) _CXL_LOOP_HCALL(plpar_hcall, __VA_ARGS__) | ||
81 | #define CXL_H9_WAIT_UNTIL_DONE(...) _CXL_LOOP_HCALL(plpar_hcall9, __VA_ARGS__) | ||
82 | |||
83 | #define _PRINT_MSG(rc, format, ...) \ | ||
84 | { \ | ||
85 | if ((rc != H_SUCCESS) && (rc != H_CONTINUE)) \ | ||
86 | pr_err(format, __VA_ARGS__); \ | ||
87 | else \ | ||
88 | pr_devel(format, __VA_ARGS__); \ | ||
89 | } \ | ||
90 | |||
91 | |||
92 | static char *afu_op_names[] = { | ||
93 | "UNKNOWN_OP", /* 0 undefined */ | ||
94 | "RESET", /* 1 */ | ||
95 | "SUSPEND_PROCESS", /* 2 */ | ||
96 | "RESUME_PROCESS", /* 3 */ | ||
97 | "READ_ERR_STATE", /* 4 */ | ||
98 | "GET_AFU_ERR", /* 5 */ | ||
99 | "GET_CONFIG", /* 6 */ | ||
100 | "GET_DOWNLOAD_STATE", /* 7 */ | ||
101 | "TERMINATE_PROCESS", /* 8 */ | ||
102 | "COLLECT_VPD", /* 9 */ | ||
103 | "UNKNOWN_OP", /* 10 undefined */ | ||
104 | "GET_FUNCTION_ERR_INT", /* 11 */ | ||
105 | "ACK_FUNCTION_ERR_INT", /* 12 */ | ||
106 | "GET_ERROR_LOG", /* 13 */ | ||
107 | }; | ||
108 | |||
109 | static char *control_adapter_op_names[] = { | ||
110 | "UNKNOWN_OP", /* 0 undefined */ | ||
111 | "RESET", /* 1 */ | ||
112 | "COLLECT_VPD", /* 2 */ | ||
113 | }; | ||
114 | |||
115 | static char *download_op_names[] = { | ||
116 | "UNKNOWN_OP", /* 0 undefined */ | ||
117 | "DOWNLOAD", /* 1 */ | ||
118 | "VALIDATE", /* 2 */ | ||
119 | }; | ||
120 | |||
121 | static char *op_str(unsigned int op, char *name_array[], int array_len) | ||
122 | { | ||
123 | if (op >= array_len) | ||
124 | return "UNKNOWN_OP"; | ||
125 | return name_array[op]; | ||
126 | } | ||
127 | |||
128 | #define OP_STR(op, name_array) op_str(op, name_array, ARRAY_SIZE(name_array)) | ||
129 | |||
130 | #define OP_STR_AFU(op) OP_STR(op, afu_op_names) | ||
131 | #define OP_STR_CONTROL_ADAPTER(op) OP_STR(op, control_adapter_op_names) | ||
132 | #define OP_STR_DOWNLOAD_ADAPTER(op) OP_STR(op, download_op_names) | ||
133 | |||
134 | |||
135 | long cxl_h_attach_process(u64 unit_address, | ||
136 | struct cxl_process_element_hcall *element, | ||
137 | u64 *process_token, u64 *mmio_addr, u64 *mmio_size) | ||
138 | { | ||
139 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | ||
140 | long rc; | ||
141 | |||
142 | CXL_H_WAIT_UNTIL_DONE(rc, retbuf, H_ATTACH_CA_PROCESS, unit_address, virt_to_phys(element)); | ||
143 | _PRINT_MSG(rc, "cxl_h_attach_process(%#.16llx, %#.16lx): %li\n", | ||
144 | unit_address, virt_to_phys(element), rc); | ||
145 | trace_cxl_hcall_attach(unit_address, virt_to_phys(element), retbuf[0], retbuf[1], retbuf[2], rc); | ||
146 | |||
147 | pr_devel("token: 0x%.8lx mmio_addr: 0x%lx mmio_size: 0x%lx\nProcess Element Structure:\n", | ||
148 | retbuf[0], retbuf[1], retbuf[2]); | ||
149 | cxl_dump_debug_buffer(element, sizeof(*element)); | ||
150 | |||
151 | switch (rc) { | ||
152 | case H_SUCCESS: /* The process info is attached to the coherent platform function */ | ||
153 | *process_token = retbuf[0]; | ||
154 | if (mmio_addr) | ||
155 | *mmio_addr = retbuf[1]; | ||
156 | if (mmio_size) | ||
157 | *mmio_size = retbuf[2]; | ||
158 | return 0; | ||
159 | case H_PARAMETER: /* An incorrect parameter was supplied. */ | ||
160 | case H_FUNCTION: /* The function is not supported. */ | ||
161 | return -EINVAL; | ||
162 | case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ | ||
163 | case H_RESOURCE: /* The coherent platform function does not have enough additional resource to attach the process */ | ||
164 | case H_HARDWARE: /* A hardware event prevented the attach operation */ | ||
165 | case H_STATE: /* The coherent platform function is not in a valid state */ | ||
166 | case H_BUSY: | ||
167 | return -EBUSY; | ||
168 | default: | ||
169 | WARN(1, "Unexpected return code: %lx", rc); | ||
170 | return -EINVAL; | ||
171 | } | ||
172 | } | ||
173 | |||
174 | /** | ||
175 | * cxl_h_detach_process - Detach a process element from a coherent | ||
176 | * platform function. | ||
177 | */ | ||
178 | long cxl_h_detach_process(u64 unit_address, u64 process_token) | ||
179 | { | ||
180 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | ||
181 | long rc; | ||
182 | |||
183 | CXL_H_WAIT_UNTIL_DONE(rc, retbuf, H_DETACH_CA_PROCESS, unit_address, process_token); | ||
184 | _PRINT_MSG(rc, "cxl_h_detach_process(%#.16llx, 0x%.8llx): %li\n", unit_address, process_token, rc); | ||
185 | trace_cxl_hcall_detach(unit_address, process_token, rc); | ||
186 | |||
187 | switch (rc) { | ||
188 | case H_SUCCESS: /* The process was detached from the coherent platform function */ | ||
189 | return 0; | ||
190 | case H_PARAMETER: /* An incorrect parameter was supplied. */ | ||
191 | return -EINVAL; | ||
192 | case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ | ||
193 | case H_RESOURCE: /* The function has page table mappings for MMIO */ | ||
194 | case H_HARDWARE: /* A hardware event prevented the detach operation */ | ||
195 | case H_STATE: /* The coherent platform function is not in a valid state */ | ||
196 | case H_BUSY: | ||
197 | return -EBUSY; | ||
198 | default: | ||
199 | WARN(1, "Unexpected return code: %lx", rc); | ||
200 | return -EINVAL; | ||
201 | } | ||
202 | } | ||
203 | |||
204 | /** | ||
205 | * cxl_h_control_function - This H_CONTROL_CA_FUNCTION hypervisor call allows | ||
206 | * the partition to manipulate or query | ||
207 | * certain coherent platform function behaviors. | ||
208 | */ | ||
209 | static long cxl_h_control_function(u64 unit_address, u64 op, | ||
210 | u64 p1, u64 p2, u64 p3, u64 p4, u64 *out) | ||
211 | { | ||
212 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | ||
213 | long rc; | ||
214 | |||
215 | CXL_H9_WAIT_UNTIL_DONE(rc, retbuf, H_CONTROL_CA_FUNCTION, unit_address, op, p1, p2, p3, p4); | ||
216 | _PRINT_MSG(rc, "cxl_h_control_function(%#.16llx, %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li\n", | ||
217 | unit_address, OP_STR_AFU(op), p1, p2, p3, p4, retbuf[0], rc); | ||
218 | trace_cxl_hcall_control_function(unit_address, OP_STR_AFU(op), p1, p2, p3, p4, retbuf[0], rc); | ||
219 | |||
220 | switch (rc) { | ||
221 | case H_SUCCESS: /* The operation is completed for the coherent platform function */ | ||
222 | if ((op == H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT || | ||
223 | op == H_CONTROL_CA_FUNCTION_READ_ERR_STATE || | ||
224 | op == H_CONTROL_CA_FUNCTION_COLLECT_VPD)) | ||
225 | *out = retbuf[0]; | ||
226 | return 0; | ||
227 | case H_PARAMETER: /* An incorrect parameter was supplied. */ | ||
228 | case H_FUNCTION: /* The function is not supported. */ | ||
229 | case H_NOT_FOUND: /* The operation supplied was not valid */ | ||
230 | case H_NOT_AVAILABLE: /* The operation cannot be performed because the AFU has not been downloaded */ | ||
231 | case H_SG_LIST: /* An block list entry was invalid */ | ||
232 | return -EINVAL; | ||
233 | case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ | ||
234 | case H_RESOURCE: /* The function has page table mappings for MMIO */ | ||
235 | case H_HARDWARE: /* A hardware event prevented the attach operation */ | ||
236 | case H_STATE: /* The coherent platform function is not in a valid state */ | ||
237 | case H_BUSY: | ||
238 | return -EBUSY; | ||
239 | default: | ||
240 | WARN(1, "Unexpected return code: %lx", rc); | ||
241 | return -EINVAL; | ||
242 | } | ||
243 | } | ||
244 | |||
245 | /** | ||
246 | * cxl_h_reset_afu - Perform a reset to the coherent platform function. | ||
247 | */ | ||
248 | long cxl_h_reset_afu(u64 unit_address) | ||
249 | { | ||
250 | return cxl_h_control_function(unit_address, | ||
251 | H_CONTROL_CA_FUNCTION_RESET, | ||
252 | 0, 0, 0, 0, | ||
253 | NULL); | ||
254 | } | ||
255 | |||
256 | /** | ||
257 | * cxl_h_suspend_process - Suspend a process from being executed | ||
258 | * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when | ||
259 | * process was attached. | ||
260 | */ | ||
261 | long cxl_h_suspend_process(u64 unit_address, u64 process_token) | ||
262 | { | ||
263 | return cxl_h_control_function(unit_address, | ||
264 | H_CONTROL_CA_FUNCTION_SUSPEND_PROCESS, | ||
265 | process_token, 0, 0, 0, | ||
266 | NULL); | ||
267 | } | ||
268 | |||
269 | /** | ||
270 | * cxl_h_resume_process - Resume a process to be executed | ||
271 | * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when | ||
272 | * process was attached. | ||
273 | */ | ||
274 | long cxl_h_resume_process(u64 unit_address, u64 process_token) | ||
275 | { | ||
276 | return cxl_h_control_function(unit_address, | ||
277 | H_CONTROL_CA_FUNCTION_RESUME_PROCESS, | ||
278 | process_token, 0, 0, 0, | ||
279 | NULL); | ||
280 | } | ||
281 | |||
282 | /** | ||
283 | * cxl_h_read_error_state - Checks the error state of the coherent | ||
284 | * platform function. | ||
285 | * R4 contains the error state | ||
286 | */ | ||
287 | long cxl_h_read_error_state(u64 unit_address, u64 *state) | ||
288 | { | ||
289 | return cxl_h_control_function(unit_address, | ||
290 | H_CONTROL_CA_FUNCTION_READ_ERR_STATE, | ||
291 | 0, 0, 0, 0, | ||
292 | state); | ||
293 | } | ||
294 | |||
295 | /** | ||
296 | * cxl_h_get_afu_err - collect the AFU error buffer | ||
297 | * Parameter1 = byte offset into error buffer to retrieve, valid values | ||
298 | * are between 0 and (ibm,error-buffer-size - 1) | ||
299 | * Parameter2 = 4K aligned real address of error buffer, to be filled in | ||
300 | * Parameter3 = length of error buffer, valid values are 4K or less | ||
301 | */ | ||
302 | long cxl_h_get_afu_err(u64 unit_address, u64 offset, | ||
303 | u64 buf_address, u64 len) | ||
304 | { | ||
305 | return cxl_h_control_function(unit_address, | ||
306 | H_CONTROL_CA_FUNCTION_GET_AFU_ERR, | ||
307 | offset, buf_address, len, 0, | ||
308 | NULL); | ||
309 | } | ||
310 | |||
311 | /** | ||
312 | * cxl_h_get_config - collect configuration record for the | ||
313 | * coherent platform function | ||
314 | * Parameter1 = # of configuration record to retrieve, valid values are | ||
315 | * between 0 and (ibm,#config-records - 1) | ||
316 | * Parameter2 = byte offset into configuration record to retrieve, | ||
317 | * valid values are between 0 and (ibm,config-record-size - 1) | ||
318 | * Parameter3 = 4K aligned real address of configuration record buffer, | ||
319 | * to be filled in | ||
320 | * Parameter4 = length of configuration buffer, valid values are 4K or less | ||
321 | */ | ||
322 | long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset, | ||
323 | u64 buf_address, u64 len) | ||
324 | { | ||
325 | return cxl_h_control_function(unit_address, | ||
326 | H_CONTROL_CA_FUNCTION_GET_CONFIG, | ||
327 | cr_num, offset, buf_address, len, | ||
328 | NULL); | ||
329 | } | ||
330 | |||
331 | /** | ||
332 | * cxl_h_terminate_process - Terminate the process before completion | ||
333 | * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when | ||
334 | * process was attached. | ||
335 | */ | ||
336 | long cxl_h_terminate_process(u64 unit_address, u64 process_token) | ||
337 | { | ||
338 | return cxl_h_control_function(unit_address, | ||
339 | H_CONTROL_CA_FUNCTION_TERMINATE_PROCESS, | ||
340 | process_token, 0, 0, 0, | ||
341 | NULL); | ||
342 | } | ||
343 | |||
344 | /** | ||
345 | * cxl_h_collect_vpd - Collect VPD for the coherent platform function. | ||
346 | * Parameter1 = # of VPD record to retrieve, valid values are between 0 | ||
347 | * and (ibm,#config-records - 1). | ||
348 | * Parameter2 = 4K naturally aligned real buffer containing block | ||
349 | * list entries | ||
350 | * Parameter3 = number of block list entries in the block list, valid | ||
351 | * values are between 0 and 256 | ||
352 | */ | ||
353 | long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address, | ||
354 | u64 num, u64 *out) | ||
355 | { | ||
356 | return cxl_h_control_function(unit_address, | ||
357 | H_CONTROL_CA_FUNCTION_COLLECT_VPD, | ||
358 | record, list_address, num, 0, | ||
359 | out); | ||
360 | } | ||
361 | |||
362 | /** | ||
363 | * cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt | ||
364 | */ | ||
365 | long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg) | ||
366 | { | ||
367 | return cxl_h_control_function(unit_address, | ||
368 | H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT, | ||
369 | 0, 0, 0, 0, reg); | ||
370 | } | ||
371 | |||
372 | /** | ||
373 | * cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data | ||
374 | * based on an interrupt | ||
375 | * Parameter1 = value to write to the function-wide error interrupt register | ||
376 | */ | ||
377 | long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value) | ||
378 | { | ||
379 | return cxl_h_control_function(unit_address, | ||
380 | H_CONTROL_CA_FUNCTION_ACK_FUNCTION_ERR_INT, | ||
381 | value, 0, 0, 0, | ||
382 | NULL); | ||
383 | } | ||
384 | |||
385 | /** | ||
386 | * cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of | ||
387 | * an error log | ||
388 | */ | ||
389 | long cxl_h_get_error_log(u64 unit_address, u64 value) | ||
390 | { | ||
391 | return cxl_h_control_function(unit_address, | ||
392 | H_CONTROL_CA_FUNCTION_GET_ERROR_LOG, | ||
393 | 0, 0, 0, 0, | ||
394 | NULL); | ||
395 | } | ||
396 | |||
397 | /** | ||
398 | * cxl_h_collect_int_info - Collect interrupt info about a coherent | ||
399 | * platform function after an interrupt occurred. | ||
400 | */ | ||
401 | long cxl_h_collect_int_info(u64 unit_address, u64 process_token, | ||
402 | struct cxl_irq_info *info) | ||
403 | { | ||
404 | long rc; | ||
405 | |||
406 | BUG_ON(sizeof(*info) != sizeof(unsigned long[PLPAR_HCALL9_BUFSIZE])); | ||
407 | |||
408 | rc = plpar_hcall9(H_COLLECT_CA_INT_INFO, (unsigned long *) info, | ||
409 | unit_address, process_token); | ||
410 | _PRINT_MSG(rc, "cxl_h_collect_int_info(%#.16llx, 0x%llx): %li\n", | ||
411 | unit_address, process_token, rc); | ||
412 | trace_cxl_hcall_collect_int_info(unit_address, process_token, rc); | ||
413 | |||
414 | switch (rc) { | ||
415 | case H_SUCCESS: /* The interrupt info is returned in return registers. */ | ||
416 | pr_devel("dsisr:%#llx, dar:%#llx, dsr:%#llx, pid:%u, tid:%u, afu_err:%#llx, errstat:%#llx\n", | ||
417 | info->dsisr, info->dar, info->dsr, info->pid, | ||
418 | info->tid, info->afu_err, info->errstat); | ||
419 | return 0; | ||
420 | case H_PARAMETER: /* An incorrect parameter was supplied. */ | ||
421 | return -EINVAL; | ||
422 | case H_AUTHORITY: /* The partition does not have authority to perform this hcall. */ | ||
423 | case H_HARDWARE: /* A hardware event prevented the collection of the interrupt info.*/ | ||
424 | case H_STATE: /* The coherent platform function is not in a valid state to collect interrupt info. */ | ||
425 | return -EBUSY; | ||
426 | default: | ||
427 | WARN(1, "Unexpected return code: %lx", rc); | ||
428 | return -EINVAL; | ||
429 | } | ||
430 | } | ||
431 | |||
432 | /** | ||
433 | * cxl_h_control_faults - Control the operation of a coherent platform | ||
434 | * function after a fault occurs. | ||
435 | * | ||
436 | * Parameters | ||
437 | * control-mask: value to control the faults | ||
438 | * looks like PSL_TFC_An shifted >> 32 | ||
439 | * reset-mask: mask to control reset of function faults | ||
440 | * Set reset_mask = 1 to reset PSL errors | ||
441 | */ | ||
442 | long cxl_h_control_faults(u64 unit_address, u64 process_token, | ||
443 | u64 control_mask, u64 reset_mask) | ||
444 | { | ||
445 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | ||
446 | long rc; | ||
447 | |||
448 | memset(retbuf, 0, sizeof(retbuf)); | ||
449 | |||
450 | rc = plpar_hcall(H_CONTROL_CA_FAULTS, retbuf, unit_address, | ||
451 | H_CONTROL_CA_FAULTS_RESPOND_PSL, process_token, | ||
452 | control_mask, reset_mask); | ||
453 | _PRINT_MSG(rc, "cxl_h_control_faults(%#.16llx, 0x%llx, %#llx, %#llx): %li (%#lx)\n", | ||
454 | unit_address, process_token, control_mask, reset_mask, | ||
455 | rc, retbuf[0]); | ||
456 | trace_cxl_hcall_control_faults(unit_address, process_token, | ||
457 | control_mask, reset_mask, retbuf[0], rc); | ||
458 | |||
459 | switch (rc) { | ||
460 | case H_SUCCESS: /* Faults were successfully controlled for the function. */ | ||
461 | return 0; | ||
462 | case H_PARAMETER: /* An incorrect parameter was supplied. */ | ||
463 | return -EINVAL; | ||
464 | case H_HARDWARE: /* A hardware event prevented the control of faults. */ | ||
465 | case H_STATE: /* The function was in an invalid state. */ | ||
466 | case H_AUTHORITY: /* The partition does not have authority to perform this hcall; the coherent platform facilities may need to be licensed. */ | ||
467 | return -EBUSY; | ||
468 | case H_FUNCTION: /* The function is not supported */ | ||
469 | case H_NOT_FOUND: /* The operation supplied was not valid */ | ||
470 | return -EINVAL; | ||
471 | default: | ||
472 | WARN(1, "Unexpected return code: %lx", rc); | ||
473 | return -EINVAL; | ||
474 | } | ||
475 | } | ||
476 | |||
477 | /** | ||
478 | * cxl_h_control_facility - This H_CONTROL_CA_FACILITY hypervisor call | ||
479 | * allows the partition to manipulate or query | ||
480 | * certain coherent platform facility behaviors. | ||
481 | */ | ||
482 | static long cxl_h_control_facility(u64 unit_address, u64 op, | ||
483 | u64 p1, u64 p2, u64 p3, u64 p4, u64 *out) | ||
484 | { | ||
485 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | ||
486 | long rc; | ||
487 | |||
488 | CXL_H9_WAIT_UNTIL_DONE(rc, retbuf, H_CONTROL_CA_FACILITY, unit_address, op, p1, p2, p3, p4); | ||
489 | _PRINT_MSG(rc, "cxl_h_control_facility(%#.16llx, %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li\n", | ||
490 | unit_address, OP_STR_CONTROL_ADAPTER(op), p1, p2, p3, p4, retbuf[0], rc); | ||
491 | trace_cxl_hcall_control_facility(unit_address, OP_STR_CONTROL_ADAPTER(op), p1, p2, p3, p4, retbuf[0], rc); | ||
492 | |||
493 | switch (rc) { | ||
494 | case H_SUCCESS: /* The operation is completed for the coherent platform facility */ | ||
495 | if (op == H_CONTROL_CA_FACILITY_COLLECT_VPD) | ||
496 | *out = retbuf[0]; | ||
497 | return 0; | ||
498 | case H_PARAMETER: /* An incorrect parameter was supplied. */ | ||
499 | case H_FUNCTION: /* The function is not supported. */ | ||
500 | case H_NOT_FOUND: /* The operation supplied was not valid */ | ||
501 | case H_NOT_AVAILABLE: /* The operation cannot be performed because the AFU has not been downloaded */ | ||
502 | case H_SG_LIST: /* An block list entry was invalid */ | ||
503 | return -EINVAL; | ||
504 | case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ | ||
505 | case H_RESOURCE: /* The function has page table mappings for MMIO */ | ||
506 | case H_HARDWARE: /* A hardware event prevented the attach operation */ | ||
507 | case H_STATE: /* The coherent platform facility is not in a valid state */ | ||
508 | case H_BUSY: | ||
509 | return -EBUSY; | ||
510 | default: | ||
511 | WARN(1, "Unexpected return code: %lx", rc); | ||
512 | return -EINVAL; | ||
513 | } | ||
514 | } | ||
515 | |||
516 | /** | ||
517 | * cxl_h_reset_adapter - Perform a reset to the coherent platform facility. | ||
518 | */ | ||
519 | long cxl_h_reset_adapter(u64 unit_address) | ||
520 | { | ||
521 | return cxl_h_control_facility(unit_address, | ||
522 | H_CONTROL_CA_FACILITY_RESET, | ||
523 | 0, 0, 0, 0, | ||
524 | NULL); | ||
525 | } | ||
526 | |||
527 | /** | ||
528 | * cxl_h_collect_vpd - Collect VPD for the coherent platform function. | ||
529 | * Parameter1 = 4K naturally aligned real buffer containing block | ||
530 | * list entries | ||
531 | * Parameter2 = number of block list entries in the block list, valid | ||
532 | * values are between 0 and 256 | ||
533 | */ | ||
534 | long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address, | ||
535 | u64 num, u64 *out) | ||
536 | { | ||
537 | return cxl_h_control_facility(unit_address, | ||
538 | H_CONTROL_CA_FACILITY_COLLECT_VPD, | ||
539 | list_address, num, 0, 0, | ||
540 | out); | ||
541 | } | ||
542 | |||
543 | /** | ||
544 | * cxl_h_download_facility - This H_DOWNLOAD_CA_FACILITY | ||
545 | * hypervisor call provide platform support for | ||
546 | * downloading a base adapter image to the coherent | ||
547 | * platform facility, and for validating the entire | ||
548 | * image after the download. | ||
549 | * Parameters | ||
550 | * op: operation to perform to the coherent platform function | ||
551 | * Download: operation = 1, the base image in the coherent platform | ||
552 | * facility is first erased, and then | ||
553 | * programmed using the image supplied | ||
554 | * in the scatter/gather list. | ||
555 | * Validate: operation = 2, the base image in the coherent platform | ||
556 | * facility is compared with the image | ||
557 | * supplied in the scatter/gather list. | ||
558 | * list_address: 4K naturally aligned real buffer containing | ||
559 | * scatter/gather list entries. | ||
560 | * num: number of block list entries in the scatter/gather list. | ||
561 | */ | ||
562 | static long cxl_h_download_facility(u64 unit_address, u64 op, | ||
563 | u64 list_address, u64 num, | ||
564 | u64 *out) | ||
565 | { | ||
566 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | ||
567 | unsigned int delay, total_delay = 0; | ||
568 | u64 token = 0; | ||
569 | long rc; | ||
570 | |||
571 | if (*out != 0) | ||
572 | token = *out; | ||
573 | |||
574 | memset(retbuf, 0, sizeof(retbuf)); | ||
575 | while (1) { | ||
576 | rc = plpar_hcall(H_DOWNLOAD_CA_FACILITY, retbuf, | ||
577 | unit_address, op, list_address, num, | ||
578 | token); | ||
579 | token = retbuf[0]; | ||
580 | if (rc != H_BUSY && !H_IS_LONG_BUSY(rc)) | ||
581 | break; | ||
582 | |||
583 | if (rc != H_BUSY) { | ||
584 | delay = get_longbusy_msecs(rc); | ||
585 | total_delay += delay; | ||
586 | if (total_delay > CXL_HCALL_TIMEOUT_DOWNLOAD) { | ||
587 | WARN(1, "Warning: Giving up waiting for CXL hcall " | ||
588 | "%#x after %u msec\n", | ||
589 | H_DOWNLOAD_CA_FACILITY, total_delay); | ||
590 | rc = H_BUSY; | ||
591 | break; | ||
592 | } | ||
593 | msleep(delay); | ||
594 | } | ||
595 | } | ||
596 | _PRINT_MSG(rc, "cxl_h_download_facility(%#.16llx, %s(%#llx, %#llx), %#lx): %li\n", | ||
597 | unit_address, OP_STR_DOWNLOAD_ADAPTER(op), list_address, num, retbuf[0], rc); | ||
598 | trace_cxl_hcall_download_facility(unit_address, OP_STR_DOWNLOAD_ADAPTER(op), list_address, num, retbuf[0], rc); | ||
599 | |||
600 | switch (rc) { | ||
601 | case H_SUCCESS: /* The operation is completed for the coherent platform facility */ | ||
602 | return 0; | ||
603 | case H_PARAMETER: /* An incorrect parameter was supplied */ | ||
604 | case H_FUNCTION: /* The function is not supported. */ | ||
605 | case H_SG_LIST: /* An block list entry was invalid */ | ||
606 | case H_BAD_DATA: /* Image verification failed */ | ||
607 | return -EINVAL; | ||
608 | case H_AUTHORITY: /* The partition does not have authority to perform this hcall */ | ||
609 | case H_RESOURCE: /* The function has page table mappings for MMIO */ | ||
610 | case H_HARDWARE: /* A hardware event prevented the attach operation */ | ||
611 | case H_STATE: /* The coherent platform facility is not in a valid state */ | ||
612 | case H_BUSY: | ||
613 | return -EBUSY; | ||
614 | case H_CONTINUE: | ||
615 | *out = retbuf[0]; | ||
616 | return 1; /* More data is needed for the complete image */ | ||
617 | default: | ||
618 | WARN(1, "Unexpected return code: %lx", rc); | ||
619 | return -EINVAL; | ||
620 | } | ||
621 | } | ||
622 | |||
623 | /** | ||
624 | * cxl_h_download_adapter_image - Download the base image to the coherent | ||
625 | * platform facility. | ||
626 | */ | ||
627 | long cxl_h_download_adapter_image(u64 unit_address, | ||
628 | u64 list_address, u64 num, | ||
629 | u64 *out) | ||
630 | { | ||
631 | return cxl_h_download_facility(unit_address, | ||
632 | H_DOWNLOAD_CA_FACILITY_DOWNLOAD, | ||
633 | list_address, num, out); | ||
634 | } | ||
635 | |||
636 | /** | ||
637 | * cxl_h_validate_adapter_image - Validate the base image in the coherent | ||
638 | * platform facility. | ||
639 | */ | ||
640 | long cxl_h_validate_adapter_image(u64 unit_address, | ||
641 | u64 list_address, u64 num, | ||
642 | u64 *out) | ||
643 | { | ||
644 | return cxl_h_download_facility(unit_address, | ||
645 | H_DOWNLOAD_CA_FACILITY_VALIDATE, | ||
646 | list_address, num, out); | ||
647 | } | ||
diff --git a/drivers/misc/cxl/hcalls.h b/drivers/misc/cxl/hcalls.h new file mode 100644 index 000000000000..3e25522a5df6 --- /dev/null +++ b/drivers/misc/cxl/hcalls.h | |||
@@ -0,0 +1,204 @@ | |||
1 | /* | ||
2 | * Copyright 2015 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #ifndef _HCALLS_H | ||
11 | #define _HCALLS_H | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | #include <asm/byteorder.h> | ||
15 | #include <asm/hvcall.h> | ||
16 | #include "cxl.h" | ||
17 | |||
18 | #define SG_BUFFER_SIZE 4096 | ||
19 | #define SG_MAX_ENTRIES 256 | ||
20 | |||
21 | struct sg_list { | ||
22 | u64 phys_addr; | ||
23 | u64 len; | ||
24 | }; | ||
25 | |||
26 | /* | ||
27 | * This is straight out of PAPR, but replacing some of the compound fields with | ||
28 | * a single field, where they were identical to the register layout. | ||
29 | * | ||
30 | * The 'flags' parameter regroups the various bit-fields | ||
31 | */ | ||
32 | #define CXL_PE_CSRP_VALID (1ULL << 63) | ||
33 | #define CXL_PE_PROBLEM_STATE (1ULL << 62) | ||
34 | #define CXL_PE_SECONDARY_SEGMENT_TBL_SRCH (1ULL << 61) | ||
35 | #define CXL_PE_TAGS_ACTIVE (1ULL << 60) | ||
36 | #define CXL_PE_USER_STATE (1ULL << 59) | ||
37 | #define CXL_PE_TRANSLATION_ENABLED (1ULL << 58) | ||
38 | #define CXL_PE_64_BIT (1ULL << 57) | ||
39 | #define CXL_PE_PRIVILEGED_PROCESS (1ULL << 56) | ||
40 | |||
41 | #define CXL_PROCESS_ELEMENT_VERSION 1 | ||
42 | struct cxl_process_element_hcall { | ||
43 | __be64 version; | ||
44 | __be64 flags; | ||
45 | u8 reserved0[12]; | ||
46 | __be32 pslVirtualIsn; | ||
47 | u8 applicationVirtualIsnBitmap[256]; | ||
48 | u8 reserved1[144]; | ||
49 | struct cxl_process_element_common common; | ||
50 | u8 reserved4[12]; | ||
51 | } __packed; | ||
52 | |||
53 | #define H_STATE_NORMAL 1 | ||
54 | #define H_STATE_DISABLE 2 | ||
55 | #define H_STATE_TEMP_UNAVAILABLE 3 | ||
56 | #define H_STATE_PERM_UNAVAILABLE 4 | ||
57 | |||
58 | /* NOTE: element must be a logical real address, and must be pinned */ | ||
59 | long cxl_h_attach_process(u64 unit_address, struct cxl_process_element_hcall *element, | ||
60 | u64 *process_token, u64 *mmio_addr, u64 *mmio_size); | ||
61 | |||
62 | /** | ||
63 | * cxl_h_detach_process - Detach a process element from a coherent | ||
64 | * platform function. | ||
65 | */ | ||
66 | long cxl_h_detach_process(u64 unit_address, u64 process_token); | ||
67 | |||
68 | /** | ||
69 | * cxl_h_reset_afu - Perform a reset to the coherent platform function. | ||
70 | */ | ||
71 | long cxl_h_reset_afu(u64 unit_address); | ||
72 | |||
73 | /** | ||
74 | * cxl_h_suspend_process - Suspend a process from being executed | ||
75 | * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when | ||
76 | * process was attached. | ||
77 | */ | ||
78 | long cxl_h_suspend_process(u64 unit_address, u64 process_token); | ||
79 | |||
80 | /** | ||
81 | * cxl_h_resume_process - Resume a process to be executed | ||
82 | * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when | ||
83 | * process was attached. | ||
84 | */ | ||
85 | long cxl_h_resume_process(u64 unit_address, u64 process_token); | ||
86 | |||
87 | /** | ||
88 | * cxl_h_read_error_state - Reads the error state of the coherent | ||
89 | * platform function. | ||
90 | * R4 contains the error state | ||
91 | */ | ||
92 | long cxl_h_read_error_state(u64 unit_address, u64 *state); | ||
93 | |||
94 | /** | ||
95 | * cxl_h_get_afu_err - collect the AFU error buffer | ||
96 | * Parameter1 = byte offset into error buffer to retrieve, valid values | ||
97 | * are between 0 and (ibm,error-buffer-size - 1) | ||
98 | * Parameter2 = 4K aligned real address of error buffer, to be filled in | ||
99 | * Parameter3 = length of error buffer, valid values are 4K or less | ||
100 | */ | ||
101 | long cxl_h_get_afu_err(u64 unit_address, u64 offset, u64 buf_address, u64 len); | ||
102 | |||
103 | /** | ||
104 | * cxl_h_get_config - collect configuration record for the | ||
105 | * coherent platform function | ||
106 | * Parameter1 = # of configuration record to retrieve, valid values are | ||
107 | * between 0 and (ibm,#config-records - 1) | ||
108 | * Parameter2 = byte offset into configuration record to retrieve, | ||
109 | * valid values are between 0 and (ibm,config-record-size - 1) | ||
110 | * Parameter3 = 4K aligned real address of configuration record buffer, | ||
111 | * to be filled in | ||
112 | * Parameter4 = length of configuration buffer, valid values are 4K or less | ||
113 | */ | ||
114 | long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset, | ||
115 | u64 buf_address, u64 len); | ||
116 | |||
117 | /** | ||
118 | * cxl_h_terminate_process - Terminate the process before completion | ||
119 | * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when | ||
120 | * process was attached. | ||
121 | */ | ||
122 | long cxl_h_terminate_process(u64 unit_address, u64 process_token); | ||
123 | |||
124 | /** | ||
125 | * cxl_h_collect_vpd - Collect VPD for the coherent platform function. | ||
126 | * Parameter1 = # of VPD record to retrieve, valid values are between 0 | ||
127 | * and (ibm,#config-records - 1). | ||
128 | * Parameter2 = 4K naturally aligned real buffer containing block | ||
129 | * list entries | ||
130 | * Parameter3 = number of block list entries in the block list, valid | ||
131 | * values are between 0 and 256 | ||
132 | */ | ||
133 | long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address, | ||
134 | u64 num, u64 *out); | ||
135 | |||
136 | /** | ||
137 | * cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt | ||
138 | */ | ||
139 | long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg); | ||
140 | |||
141 | /** | ||
142 | * cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data | ||
143 | * based on an interrupt | ||
144 | * Parameter1 = value to write to the function-wide error interrupt register | ||
145 | */ | ||
146 | long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value); | ||
147 | |||
148 | /** | ||
149 | * cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of | ||
150 | * an error log | ||
151 | */ | ||
152 | long cxl_h_get_error_log(u64 unit_address, u64 value); | ||
153 | |||
154 | /** | ||
155 | * cxl_h_collect_int_info - Collect interrupt info about a coherent | ||
156 | * platform function after an interrupt occurred. | ||
157 | */ | ||
158 | long cxl_h_collect_int_info(u64 unit_address, u64 process_token, | ||
159 | struct cxl_irq_info *info); | ||
160 | |||
161 | /** | ||
162 | * cxl_h_control_faults - Control the operation of a coherent platform | ||
163 | * function after a fault occurs. | ||
164 | * | ||
165 | * Parameters | ||
166 | * control-mask: value to control the faults | ||
167 | * looks like PSL_TFC_An shifted >> 32 | ||
168 | * reset-mask: mask to control reset of function faults | ||
169 | * Set reset_mask = 1 to reset PSL errors | ||
170 | */ | ||
171 | long cxl_h_control_faults(u64 unit_address, u64 process_token, | ||
172 | u64 control_mask, u64 reset_mask); | ||
173 | |||
174 | /** | ||
175 | * cxl_h_reset_adapter - Perform a reset to the coherent platform facility. | ||
176 | */ | ||
177 | long cxl_h_reset_adapter(u64 unit_address); | ||
178 | |||
179 | /** | ||
180 | * cxl_h_collect_vpd - Collect VPD for the coherent platform function. | ||
181 | * Parameter1 = 4K naturally aligned real buffer containing block | ||
182 | * list entries | ||
183 | * Parameter2 = number of block list entries in the block list, valid | ||
184 | * values are between 0 and 256 | ||
185 | */ | ||
186 | long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address, | ||
187 | u64 num, u64 *out); | ||
188 | |||
189 | /** | ||
190 | * cxl_h_download_adapter_image - Download the base image to the coherent | ||
191 | * platform facility. | ||
192 | */ | ||
193 | long cxl_h_download_adapter_image(u64 unit_address, | ||
194 | u64 list_address, u64 num, | ||
195 | u64 *out); | ||
196 | |||
197 | /** | ||
198 | * cxl_h_validate_adapter_image - Validate the base image in the coherent | ||
199 | * platform facility. | ||
200 | */ | ||
201 | long cxl_h_validate_adapter_image(u64 unit_address, | ||
202 | u64 list_address, u64 num, | ||
203 | u64 *out); | ||
204 | #endif /* _HCALLS_H */ | ||
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c index 09a406058c46..be646dc41a2c 100644 --- a/drivers/misc/cxl/irq.c +++ b/drivers/misc/cxl/irq.c | |||
@@ -19,70 +19,11 @@ | |||
19 | #include "cxl.h" | 19 | #include "cxl.h" |
20 | #include "trace.h" | 20 | #include "trace.h" |
21 | 21 | ||
22 | /* XXX: This is implementation specific */ | 22 | static int afu_irq_range_start(void) |
23 | static irqreturn_t handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, u64 errstat) | ||
24 | { | 23 | { |
25 | u64 fir1, fir2, fir_slice, serr, afu_debug; | 24 | if (cpu_has_feature(CPU_FTR_HVMODE)) |
26 | 25 | return 1; | |
27 | fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1); | 26 | return 0; |
28 | fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2); | ||
29 | fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An); | ||
30 | serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); | ||
31 | afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An); | ||
32 | |||
33 | dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat); | ||
34 | dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); | ||
35 | dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2); | ||
36 | dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); | ||
37 | dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); | ||
38 | dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); | ||
39 | |||
40 | dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); | ||
41 | cxl_stop_trace(ctx->afu->adapter); | ||
42 | |||
43 | return cxl_ack_irq(ctx, 0, errstat); | ||
44 | } | ||
45 | |||
46 | irqreturn_t cxl_slice_irq_err(int irq, void *data) | ||
47 | { | ||
48 | struct cxl_afu *afu = data; | ||
49 | u64 fir_slice, errstat, serr, afu_debug; | ||
50 | |||
51 | WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq); | ||
52 | |||
53 | serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); | ||
54 | fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An); | ||
55 | errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); | ||
56 | afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An); | ||
57 | dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); | ||
58 | dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); | ||
59 | dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat); | ||
60 | dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); | ||
61 | |||
62 | cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); | ||
63 | |||
64 | return IRQ_HANDLED; | ||
65 | } | ||
66 | |||
67 | static irqreturn_t cxl_irq_err(int irq, void *data) | ||
68 | { | ||
69 | struct cxl *adapter = data; | ||
70 | u64 fir1, fir2, err_ivte; | ||
71 | |||
72 | WARN(1, "CXL ERROR interrupt %i\n", irq); | ||
73 | |||
74 | err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE); | ||
75 | dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte); | ||
76 | |||
77 | dev_crit(&adapter->dev, "STOPPING CXL TRACE\n"); | ||
78 | cxl_stop_trace(adapter); | ||
79 | |||
80 | fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1); | ||
81 | fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2); | ||
82 | |||
83 | dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2); | ||
84 | |||
85 | return IRQ_HANDLED; | ||
86 | } | 27 | } |
87 | 28 | ||
88 | static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar) | 29 | static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar) |
@@ -93,9 +34,8 @@ static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 da | |||
93 | return IRQ_HANDLED; | 34 | return IRQ_HANDLED; |
94 | } | 35 | } |
95 | 36 | ||
96 | static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info) | 37 | irqreturn_t cxl_irq(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info) |
97 | { | 38 | { |
98 | struct cxl_context *ctx = data; | ||
99 | u64 dsisr, dar; | 39 | u64 dsisr, dar; |
100 | 40 | ||
101 | dsisr = irq_info->dsisr; | 41 | dsisr = irq_info->dsisr; |
@@ -145,7 +85,8 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info) | |||
145 | if (dsisr & CXL_PSL_DSISR_An_UR) | 85 | if (dsisr & CXL_PSL_DSISR_An_UR) |
146 | pr_devel("CXL interrupt: AURP PTE not found\n"); | 86 | pr_devel("CXL interrupt: AURP PTE not found\n"); |
147 | if (dsisr & CXL_PSL_DSISR_An_PE) | 87 | if (dsisr & CXL_PSL_DSISR_An_PE) |
148 | return handle_psl_slice_error(ctx, dsisr, irq_info->errstat); | 88 | return cxl_ops->handle_psl_slice_error(ctx, dsisr, |
89 | irq_info->errstat); | ||
149 | if (dsisr & CXL_PSL_DSISR_An_AE) { | 90 | if (dsisr & CXL_PSL_DSISR_An_AE) { |
150 | pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err); | 91 | pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err); |
151 | 92 | ||
@@ -169,7 +110,7 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info) | |||
169 | wake_up_all(&ctx->wq); | 110 | wake_up_all(&ctx->wq); |
170 | } | 111 | } |
171 | 112 | ||
172 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0); | 113 | cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0); |
173 | return IRQ_HANDLED; | 114 | return IRQ_HANDLED; |
174 | } | 115 | } |
175 | if (dsisr & CXL_PSL_DSISR_An_OC) | 116 | if (dsisr & CXL_PSL_DSISR_An_OC) |
@@ -179,54 +120,27 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info) | |||
179 | return IRQ_HANDLED; | 120 | return IRQ_HANDLED; |
180 | } | 121 | } |
181 | 122 | ||
182 | static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info) | ||
183 | { | ||
184 | if (irq_info->dsisr & CXL_PSL_DSISR_TRANS) | ||
185 | cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); | ||
186 | else | ||
187 | cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); | ||
188 | |||
189 | return IRQ_HANDLED; | ||
190 | } | ||
191 | |||
192 | static irqreturn_t cxl_irq_multiplexed(int irq, void *data) | ||
193 | { | ||
194 | struct cxl_afu *afu = data; | ||
195 | struct cxl_context *ctx; | ||
196 | struct cxl_irq_info irq_info; | ||
197 | int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff; | ||
198 | int ret; | ||
199 | |||
200 | if ((ret = cxl_get_irq(afu, &irq_info))) { | ||
201 | WARN(1, "Unable to get CXL IRQ Info: %i\n", ret); | ||
202 | return fail_psl_irq(afu, &irq_info); | ||
203 | } | ||
204 | |||
205 | rcu_read_lock(); | ||
206 | ctx = idr_find(&afu->contexts_idr, ph); | ||
207 | if (ctx) { | ||
208 | ret = cxl_irq(irq, ctx, &irq_info); | ||
209 | rcu_read_unlock(); | ||
210 | return ret; | ||
211 | } | ||
212 | rcu_read_unlock(); | ||
213 | |||
214 | WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR" | ||
215 | " %016llx\n(Possible AFU HW issue - was a term/remove acked" | ||
216 | " with outstanding transactions?)\n", ph, irq_info.dsisr, | ||
217 | irq_info.dar); | ||
218 | return fail_psl_irq(afu, &irq_info); | ||
219 | } | ||
220 | |||
221 | static irqreturn_t cxl_irq_afu(int irq, void *data) | 123 | static irqreturn_t cxl_irq_afu(int irq, void *data) |
222 | { | 124 | { |
223 | struct cxl_context *ctx = data; | 125 | struct cxl_context *ctx = data; |
224 | irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq)); | 126 | irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq)); |
225 | int irq_off, afu_irq = 1; | 127 | int irq_off, afu_irq = 0; |
226 | __u16 range; | 128 | __u16 range; |
227 | int r; | 129 | int r; |
228 | 130 | ||
229 | for (r = 1; r < CXL_IRQ_RANGES; r++) { | 131 | /* |
132 | * Look for the interrupt number. | ||
133 | * On bare-metal, we know range 0 only contains the PSL | ||
134 | * interrupt so we could start counting at range 1 and initialize | ||
135 | * afu_irq at 1. | ||
136 | * In a guest, range 0 also contains AFU interrupts, so it must | ||
137 | * be counted for. Therefore we initialize afu_irq at 0 to take into | ||
138 | * account the PSL interrupt. | ||
139 | * | ||
140 | * For code-readability, it just seems easier to go over all | ||
141 | * the ranges on bare-metal and guest. The end result is the same. | ||
142 | */ | ||
143 | for (r = 0; r < CXL_IRQ_RANGES; r++) { | ||
230 | irq_off = hwirq - ctx->irqs.offset[r]; | 144 | irq_off = hwirq - ctx->irqs.offset[r]; |
231 | range = ctx->irqs.range[r]; | 145 | range = ctx->irqs.range[r]; |
232 | if (irq_off >= 0 && irq_off < range) { | 146 | if (irq_off >= 0 && irq_off < range) { |
@@ -236,7 +150,7 @@ static irqreturn_t cxl_irq_afu(int irq, void *data) | |||
236 | afu_irq += range; | 150 | afu_irq += range; |
237 | } | 151 | } |
238 | if (unlikely(r >= CXL_IRQ_RANGES)) { | 152 | if (unlikely(r >= CXL_IRQ_RANGES)) { |
239 | WARN(1, "Recieved AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n", | 153 | WARN(1, "Received AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n", |
240 | ctx->pe, irq, hwirq); | 154 | ctx->pe, irq, hwirq); |
241 | return IRQ_HANDLED; | 155 | return IRQ_HANDLED; |
242 | } | 156 | } |
@@ -246,7 +160,7 @@ static irqreturn_t cxl_irq_afu(int irq, void *data) | |||
246 | afu_irq, ctx->pe, irq, hwirq); | 160 | afu_irq, ctx->pe, irq, hwirq); |
247 | 161 | ||
248 | if (unlikely(!ctx->irq_bitmap)) { | 162 | if (unlikely(!ctx->irq_bitmap)) { |
249 | WARN(1, "Recieved AFU IRQ for context with no IRQ bitmap\n"); | 163 | WARN(1, "Received AFU IRQ for context with no IRQ bitmap\n"); |
250 | return IRQ_HANDLED; | 164 | return IRQ_HANDLED; |
251 | } | 165 | } |
252 | spin_lock(&ctx->lock); | 166 | spin_lock(&ctx->lock); |
@@ -272,7 +186,8 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, | |||
272 | return 0; | 186 | return 0; |
273 | } | 187 | } |
274 | 188 | ||
275 | cxl_setup_irq(adapter, hwirq, virq); | 189 | if (cxl_ops->setup_irq) |
190 | cxl_ops->setup_irq(adapter, hwirq, virq); | ||
276 | 191 | ||
277 | pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq); | 192 | pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq); |
278 | 193 | ||
@@ -291,16 +206,16 @@ void cxl_unmap_irq(unsigned int virq, void *cookie) | |||
291 | irq_dispose_mapping(virq); | 206 | irq_dispose_mapping(virq); |
292 | } | 207 | } |
293 | 208 | ||
294 | static int cxl_register_one_irq(struct cxl *adapter, | 209 | int cxl_register_one_irq(struct cxl *adapter, |
295 | irq_handler_t handler, | 210 | irq_handler_t handler, |
296 | void *cookie, | 211 | void *cookie, |
297 | irq_hw_number_t *dest_hwirq, | 212 | irq_hw_number_t *dest_hwirq, |
298 | unsigned int *dest_virq, | 213 | unsigned int *dest_virq, |
299 | const char *name) | 214 | const char *name) |
300 | { | 215 | { |
301 | int hwirq, virq; | 216 | int hwirq, virq; |
302 | 217 | ||
303 | if ((hwirq = cxl_alloc_one_irq(adapter)) < 0) | 218 | if ((hwirq = cxl_ops->alloc_one_irq(adapter)) < 0) |
304 | return hwirq; | 219 | return hwirq; |
305 | 220 | ||
306 | if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name))) | 221 | if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name))) |
@@ -312,108 +227,10 @@ static int cxl_register_one_irq(struct cxl *adapter, | |||
312 | return 0; | 227 | return 0; |
313 | 228 | ||
314 | err: | 229 | err: |
315 | cxl_release_one_irq(adapter, hwirq); | 230 | cxl_ops->release_one_irq(adapter, hwirq); |
316 | return -ENOMEM; | 231 | return -ENOMEM; |
317 | } | 232 | } |
318 | 233 | ||
319 | int cxl_register_psl_err_irq(struct cxl *adapter) | ||
320 | { | ||
321 | int rc; | ||
322 | |||
323 | adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", | ||
324 | dev_name(&adapter->dev)); | ||
325 | if (!adapter->irq_name) | ||
326 | return -ENOMEM; | ||
327 | |||
328 | if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter, | ||
329 | &adapter->err_hwirq, | ||
330 | &adapter->err_virq, | ||
331 | adapter->irq_name))) { | ||
332 | kfree(adapter->irq_name); | ||
333 | adapter->irq_name = NULL; | ||
334 | return rc; | ||
335 | } | ||
336 | |||
337 | cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff); | ||
338 | |||
339 | return 0; | ||
340 | } | ||
341 | |||
342 | void cxl_release_psl_err_irq(struct cxl *adapter) | ||
343 | { | ||
344 | if (adapter->err_virq != irq_find_mapping(NULL, adapter->err_hwirq)) | ||
345 | return; | ||
346 | |||
347 | cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); | ||
348 | cxl_unmap_irq(adapter->err_virq, adapter); | ||
349 | cxl_release_one_irq(adapter, adapter->err_hwirq); | ||
350 | kfree(adapter->irq_name); | ||
351 | } | ||
352 | |||
353 | int cxl_register_serr_irq(struct cxl_afu *afu) | ||
354 | { | ||
355 | u64 serr; | ||
356 | int rc; | ||
357 | |||
358 | afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", | ||
359 | dev_name(&afu->dev)); | ||
360 | if (!afu->err_irq_name) | ||
361 | return -ENOMEM; | ||
362 | |||
363 | if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu, | ||
364 | &afu->serr_hwirq, | ||
365 | &afu->serr_virq, afu->err_irq_name))) { | ||
366 | kfree(afu->err_irq_name); | ||
367 | afu->err_irq_name = NULL; | ||
368 | return rc; | ||
369 | } | ||
370 | |||
371 | serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); | ||
372 | serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); | ||
373 | cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); | ||
374 | |||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | void cxl_release_serr_irq(struct cxl_afu *afu) | ||
379 | { | ||
380 | if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) | ||
381 | return; | ||
382 | |||
383 | cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); | ||
384 | cxl_unmap_irq(afu->serr_virq, afu); | ||
385 | cxl_release_one_irq(afu->adapter, afu->serr_hwirq); | ||
386 | kfree(afu->err_irq_name); | ||
387 | } | ||
388 | |||
389 | int cxl_register_psl_irq(struct cxl_afu *afu) | ||
390 | { | ||
391 | int rc; | ||
392 | |||
393 | afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s", | ||
394 | dev_name(&afu->dev)); | ||
395 | if (!afu->psl_irq_name) | ||
396 | return -ENOMEM; | ||
397 | |||
398 | if ((rc = cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu, | ||
399 | &afu->psl_hwirq, &afu->psl_virq, | ||
400 | afu->psl_irq_name))) { | ||
401 | kfree(afu->psl_irq_name); | ||
402 | afu->psl_irq_name = NULL; | ||
403 | } | ||
404 | return rc; | ||
405 | } | ||
406 | |||
407 | void cxl_release_psl_irq(struct cxl_afu *afu) | ||
408 | { | ||
409 | if (afu->psl_virq != irq_find_mapping(NULL, afu->psl_hwirq)) | ||
410 | return; | ||
411 | |||
412 | cxl_unmap_irq(afu->psl_virq, afu); | ||
413 | cxl_release_one_irq(afu->adapter, afu->psl_hwirq); | ||
414 | kfree(afu->psl_irq_name); | ||
415 | } | ||
416 | |||
417 | void afu_irq_name_free(struct cxl_context *ctx) | 234 | void afu_irq_name_free(struct cxl_context *ctx) |
418 | { | 235 | { |
419 | struct cxl_irq_name *irq_name, *tmp; | 236 | struct cxl_irq_name *irq_name, *tmp; |
@@ -429,16 +246,33 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count) | |||
429 | { | 246 | { |
430 | int rc, r, i, j = 1; | 247 | int rc, r, i, j = 1; |
431 | struct cxl_irq_name *irq_name; | 248 | struct cxl_irq_name *irq_name; |
249 | int alloc_count; | ||
250 | |||
251 | /* | ||
252 | * In native mode, range 0 is reserved for the multiplexed | ||
253 | * PSL interrupt. It has been allocated when the AFU was initialized. | ||
254 | * | ||
255 | * In a guest, the PSL interrupt is not mutliplexed, but per-context, | ||
256 | * and is the first interrupt from range 0. It still needs to be | ||
257 | * allocated, so bump the count by one. | ||
258 | */ | ||
259 | if (cpu_has_feature(CPU_FTR_HVMODE)) | ||
260 | alloc_count = count; | ||
261 | else | ||
262 | alloc_count = count + 1; | ||
432 | 263 | ||
433 | /* Initialize the list head to hold irq names */ | 264 | /* Initialize the list head to hold irq names */ |
434 | INIT_LIST_HEAD(&ctx->irq_names); | 265 | INIT_LIST_HEAD(&ctx->irq_names); |
435 | 266 | ||
436 | if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count))) | 267 | if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, |
268 | alloc_count))) | ||
437 | return rc; | 269 | return rc; |
438 | 270 | ||
439 | /* Multiplexed PSL Interrupt */ | 271 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
440 | ctx->irqs.offset[0] = ctx->afu->psl_hwirq; | 272 | /* Multiplexed PSL Interrupt */ |
441 | ctx->irqs.range[0] = 1; | 273 | ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; |
274 | ctx->irqs.range[0] = 1; | ||
275 | } | ||
442 | 276 | ||
443 | ctx->irq_count = count; | 277 | ctx->irq_count = count; |
444 | ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count), | 278 | ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count), |
@@ -450,7 +284,7 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count) | |||
450 | * Allocate names first. If any fail, bail out before allocating | 284 | * Allocate names first. If any fail, bail out before allocating |
451 | * actual hardware IRQs. | 285 | * actual hardware IRQs. |
452 | */ | 286 | */ |
453 | for (r = 1; r < CXL_IRQ_RANGES; r++) { | 287 | for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { |
454 | for (i = 0; i < ctx->irqs.range[r]; i++) { | 288 | for (i = 0; i < ctx->irqs.range[r]; i++) { |
455 | irq_name = kmalloc(sizeof(struct cxl_irq_name), | 289 | irq_name = kmalloc(sizeof(struct cxl_irq_name), |
456 | GFP_KERNEL); | 290 | GFP_KERNEL); |
@@ -471,7 +305,7 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count) | |||
471 | return 0; | 305 | return 0; |
472 | 306 | ||
473 | out: | 307 | out: |
474 | cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); | 308 | cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); |
475 | afu_irq_name_free(ctx); | 309 | afu_irq_name_free(ctx); |
476 | return -ENOMEM; | 310 | return -ENOMEM; |
477 | } | 311 | } |
@@ -480,15 +314,30 @@ static void afu_register_hwirqs(struct cxl_context *ctx) | |||
480 | { | 314 | { |
481 | irq_hw_number_t hwirq; | 315 | irq_hw_number_t hwirq; |
482 | struct cxl_irq_name *irq_name; | 316 | struct cxl_irq_name *irq_name; |
483 | int r,i; | 317 | int r, i; |
318 | irqreturn_t (*handler)(int irq, void *data); | ||
484 | 319 | ||
485 | /* We've allocated all memory now, so let's do the irq allocations */ | 320 | /* We've allocated all memory now, so let's do the irq allocations */ |
486 | irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list); | 321 | irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list); |
487 | for (r = 1; r < CXL_IRQ_RANGES; r++) { | 322 | for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { |
488 | hwirq = ctx->irqs.offset[r]; | 323 | hwirq = ctx->irqs.offset[r]; |
489 | for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { | 324 | for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { |
490 | cxl_map_irq(ctx->afu->adapter, hwirq, | 325 | if (r == 0 && i == 0) |
491 | cxl_irq_afu, ctx, irq_name->name); | 326 | /* |
327 | * The very first interrupt of range 0 is | ||
328 | * always the PSL interrupt, but we only | ||
329 | * need to connect a handler for guests, | ||
330 | * because there's one PSL interrupt per | ||
331 | * context. | ||
332 | * On bare-metal, the PSL interrupt is | ||
333 | * multiplexed and was setup when the AFU | ||
334 | * was configured. | ||
335 | */ | ||
336 | handler = cxl_ops->psl_interrupt; | ||
337 | else | ||
338 | handler = cxl_irq_afu; | ||
339 | cxl_map_irq(ctx->afu->adapter, hwirq, handler, ctx, | ||
340 | irq_name->name); | ||
492 | irq_name = list_next_entry(irq_name, list); | 341 | irq_name = list_next_entry(irq_name, list); |
493 | } | 342 | } |
494 | } | 343 | } |
@@ -504,7 +353,7 @@ int afu_register_irqs(struct cxl_context *ctx, u32 count) | |||
504 | 353 | ||
505 | afu_register_hwirqs(ctx); | 354 | afu_register_hwirqs(ctx); |
506 | return 0; | 355 | return 0; |
507 | } | 356 | } |
508 | 357 | ||
509 | void afu_release_irqs(struct cxl_context *ctx, void *cookie) | 358 | void afu_release_irqs(struct cxl_context *ctx, void *cookie) |
510 | { | 359 | { |
@@ -512,7 +361,7 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie) | |||
512 | unsigned int virq; | 361 | unsigned int virq; |
513 | int r, i; | 362 | int r, i; |
514 | 363 | ||
515 | for (r = 1; r < CXL_IRQ_RANGES; r++) { | 364 | for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) { |
516 | hwirq = ctx->irqs.offset[r]; | 365 | hwirq = ctx->irqs.offset[r]; |
517 | for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { | 366 | for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { |
518 | virq = irq_find_mapping(NULL, hwirq); | 367 | virq = irq_find_mapping(NULL, hwirq); |
@@ -522,7 +371,7 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie) | |||
522 | } | 371 | } |
523 | 372 | ||
524 | afu_irq_name_free(ctx); | 373 | afu_irq_name_free(ctx); |
525 | cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); | 374 | cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); |
526 | 375 | ||
527 | ctx->irq_count = 0; | 376 | ctx->irq_count = 0; |
528 | } | 377 | } |
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c index 9fde75ed4fac..ae68c3201156 100644 --- a/drivers/misc/cxl/main.c +++ b/drivers/misc/cxl/main.c | |||
@@ -32,6 +32,29 @@ uint cxl_verbose; | |||
32 | module_param_named(verbose, cxl_verbose, uint, 0600); | 32 | module_param_named(verbose, cxl_verbose, uint, 0600); |
33 | MODULE_PARM_DESC(verbose, "Enable verbose dmesg output"); | 33 | MODULE_PARM_DESC(verbose, "Enable verbose dmesg output"); |
34 | 34 | ||
35 | const struct cxl_backend_ops *cxl_ops; | ||
36 | |||
37 | int cxl_afu_slbia(struct cxl_afu *afu) | ||
38 | { | ||
39 | unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); | ||
40 | |||
41 | pr_devel("cxl_afu_slbia issuing SLBIA command\n"); | ||
42 | cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL); | ||
43 | while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) { | ||
44 | if (time_after_eq(jiffies, timeout)) { | ||
45 | dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n"); | ||
46 | return -EBUSY; | ||
47 | } | ||
48 | /* If the adapter has gone down, we can assume that we | ||
49 | * will PERST it and that will invalidate everything. | ||
50 | */ | ||
51 | if (!cxl_ops->link_ok(afu->adapter, afu)) | ||
52 | return -EIO; | ||
53 | cpu_relax(); | ||
54 | } | ||
55 | return 0; | ||
56 | } | ||
57 | |||
35 | static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm) | 58 | static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm) |
36 | { | 59 | { |
37 | struct task_struct *task; | 60 | struct task_struct *task; |
@@ -139,6 +162,32 @@ int cxl_alloc_sst(struct cxl_context *ctx) | |||
139 | return 0; | 162 | return 0; |
140 | } | 163 | } |
141 | 164 | ||
165 | /* print buffer content as integers when debugging */ | ||
166 | void cxl_dump_debug_buffer(void *buf, size_t buf_len) | ||
167 | { | ||
168 | #ifdef DEBUG | ||
169 | int i, *ptr; | ||
170 | |||
171 | /* | ||
172 | * We want to regroup up to 4 integers per line, which means they | ||
173 | * need to be in the same pr_devel() statement | ||
174 | */ | ||
175 | ptr = (int *) buf; | ||
176 | for (i = 0; i * 4 < buf_len; i += 4) { | ||
177 | if ((i + 3) * 4 < buf_len) | ||
178 | pr_devel("%.8x %.8x %.8x %.8x\n", ptr[i], ptr[i + 1], | ||
179 | ptr[i + 2], ptr[i + 3]); | ||
180 | else if ((i + 2) * 4 < buf_len) | ||
181 | pr_devel("%.8x %.8x %.8x\n", ptr[i], ptr[i + 1], | ||
182 | ptr[i + 2]); | ||
183 | else if ((i + 1) * 4 < buf_len) | ||
184 | pr_devel("%.8x %.8x\n", ptr[i], ptr[i + 1]); | ||
185 | else | ||
186 | pr_devel("%.8x\n", ptr[i]); | ||
187 | } | ||
188 | #endif /* DEBUG */ | ||
189 | } | ||
190 | |||
142 | /* Find a CXL adapter by it's number and increase it's refcount */ | 191 | /* Find a CXL adapter by it's number and increase it's refcount */ |
143 | struct cxl *get_cxl_adapter(int num) | 192 | struct cxl *get_cxl_adapter(int num) |
144 | { | 193 | { |
@@ -152,7 +201,7 @@ struct cxl *get_cxl_adapter(int num) | |||
152 | return adapter; | 201 | return adapter; |
153 | } | 202 | } |
154 | 203 | ||
155 | int cxl_alloc_adapter_nr(struct cxl *adapter) | 204 | static int cxl_alloc_adapter_nr(struct cxl *adapter) |
156 | { | 205 | { |
157 | int i; | 206 | int i; |
158 | 207 | ||
@@ -174,13 +223,58 @@ void cxl_remove_adapter_nr(struct cxl *adapter) | |||
174 | idr_remove(&cxl_adapter_idr, adapter->adapter_num); | 223 | idr_remove(&cxl_adapter_idr, adapter->adapter_num); |
175 | } | 224 | } |
176 | 225 | ||
226 | struct cxl *cxl_alloc_adapter(void) | ||
227 | { | ||
228 | struct cxl *adapter; | ||
229 | |||
230 | if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL))) | ||
231 | return NULL; | ||
232 | |||
233 | spin_lock_init(&adapter->afu_list_lock); | ||
234 | |||
235 | if (cxl_alloc_adapter_nr(adapter)) | ||
236 | goto err1; | ||
237 | |||
238 | if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)) | ||
239 | goto err2; | ||
240 | |||
241 | return adapter; | ||
242 | |||
243 | err2: | ||
244 | cxl_remove_adapter_nr(adapter); | ||
245 | err1: | ||
246 | kfree(adapter); | ||
247 | return NULL; | ||
248 | } | ||
249 | |||
250 | struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice) | ||
251 | { | ||
252 | struct cxl_afu *afu; | ||
253 | |||
254 | if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL))) | ||
255 | return NULL; | ||
256 | |||
257 | afu->adapter = adapter; | ||
258 | afu->dev.parent = &adapter->dev; | ||
259 | afu->dev.release = cxl_ops->release_afu; | ||
260 | afu->slice = slice; | ||
261 | idr_init(&afu->contexts_idr); | ||
262 | mutex_init(&afu->contexts_lock); | ||
263 | spin_lock_init(&afu->afu_cntl_lock); | ||
264 | |||
265 | afu->prefault_mode = CXL_PREFAULT_NONE; | ||
266 | afu->irqs_max = afu->adapter->user_irqs; | ||
267 | |||
268 | return afu; | ||
269 | } | ||
270 | |||
177 | int cxl_afu_select_best_mode(struct cxl_afu *afu) | 271 | int cxl_afu_select_best_mode(struct cxl_afu *afu) |
178 | { | 272 | { |
179 | if (afu->modes_supported & CXL_MODE_DIRECTED) | 273 | if (afu->modes_supported & CXL_MODE_DIRECTED) |
180 | return cxl_afu_activate_mode(afu, CXL_MODE_DIRECTED); | 274 | return cxl_ops->afu_activate_mode(afu, CXL_MODE_DIRECTED); |
181 | 275 | ||
182 | if (afu->modes_supported & CXL_MODE_DEDICATED) | 276 | if (afu->modes_supported & CXL_MODE_DEDICATED) |
183 | return cxl_afu_activate_mode(afu, CXL_MODE_DEDICATED); | 277 | return cxl_ops->afu_activate_mode(afu, CXL_MODE_DEDICATED); |
184 | 278 | ||
185 | dev_warn(&afu->dev, "No supported programming modes available\n"); | 279 | dev_warn(&afu->dev, "No supported programming modes available\n"); |
186 | /* We don't fail this so the user can inspect sysfs */ | 280 | /* We don't fail this so the user can inspect sysfs */ |
@@ -191,9 +285,6 @@ static int __init init_cxl(void) | |||
191 | { | 285 | { |
192 | int rc = 0; | 286 | int rc = 0; |
193 | 287 | ||
194 | if (!cpu_has_feature(CPU_FTR_HVMODE)) | ||
195 | return -EPERM; | ||
196 | |||
197 | if ((rc = cxl_file_init())) | 288 | if ((rc = cxl_file_init())) |
198 | return rc; | 289 | return rc; |
199 | 290 | ||
@@ -202,7 +293,17 @@ static int __init init_cxl(void) | |||
202 | if ((rc = register_cxl_calls(&cxl_calls))) | 293 | if ((rc = register_cxl_calls(&cxl_calls))) |
203 | goto err; | 294 | goto err; |
204 | 295 | ||
205 | if ((rc = pci_register_driver(&cxl_pci_driver))) | 296 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
297 | cxl_ops = &cxl_native_ops; | ||
298 | rc = pci_register_driver(&cxl_pci_driver); | ||
299 | } | ||
300 | #ifdef CONFIG_PPC_PSERIES | ||
301 | else { | ||
302 | cxl_ops = &cxl_guest_ops; | ||
303 | rc = platform_driver_register(&cxl_of_driver); | ||
304 | } | ||
305 | #endif | ||
306 | if (rc) | ||
206 | goto err1; | 307 | goto err1; |
207 | 308 | ||
208 | return 0; | 309 | return 0; |
@@ -217,7 +318,12 @@ err: | |||
217 | 318 | ||
218 | static void exit_cxl(void) | 319 | static void exit_cxl(void) |
219 | { | 320 | { |
220 | pci_unregister_driver(&cxl_pci_driver); | 321 | if (cpu_has_feature(CPU_FTR_HVMODE)) |
322 | pci_unregister_driver(&cxl_pci_driver); | ||
323 | #ifdef CONFIG_PPC_PSERIES | ||
324 | else | ||
325 | platform_driver_unregister(&cxl_of_driver); | ||
326 | #endif | ||
221 | 327 | ||
222 | cxl_debugfs_exit(); | 328 | cxl_debugfs_exit(); |
223 | cxl_file_exit(); | 329 | cxl_file_exit(); |
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index f40909793490..387fcbdf9793 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c | |||
@@ -42,7 +42,7 @@ static int afu_control(struct cxl_afu *afu, u64 command, | |||
42 | goto out; | 42 | goto out; |
43 | } | 43 | } |
44 | 44 | ||
45 | if (!cxl_adapter_link_ok(afu->adapter)) { | 45 | if (!cxl_ops->link_ok(afu->adapter, afu)) { |
46 | afu->enabled = enabled; | 46 | afu->enabled = enabled; |
47 | rc = -EIO; | 47 | rc = -EIO; |
48 | goto out; | 48 | goto out; |
@@ -80,7 +80,7 @@ int cxl_afu_disable(struct cxl_afu *afu) | |||
80 | } | 80 | } |
81 | 81 | ||
82 | /* This will disable as well as reset */ | 82 | /* This will disable as well as reset */ |
83 | int __cxl_afu_reset(struct cxl_afu *afu) | 83 | static int native_afu_reset(struct cxl_afu *afu) |
84 | { | 84 | { |
85 | pr_devel("AFU reset request\n"); | 85 | pr_devel("AFU reset request\n"); |
86 | 86 | ||
@@ -90,9 +90,9 @@ int __cxl_afu_reset(struct cxl_afu *afu) | |||
90 | false); | 90 | false); |
91 | } | 91 | } |
92 | 92 | ||
93 | int cxl_afu_check_and_enable(struct cxl_afu *afu) | 93 | static int native_afu_check_and_enable(struct cxl_afu *afu) |
94 | { | 94 | { |
95 | if (!cxl_adapter_link_ok(afu->adapter)) { | 95 | if (!cxl_ops->link_ok(afu->adapter, afu)) { |
96 | WARN(1, "Refusing to enable afu while link down!\n"); | 96 | WARN(1, "Refusing to enable afu while link down!\n"); |
97 | return -EIO; | 97 | return -EIO; |
98 | } | 98 | } |
@@ -114,7 +114,7 @@ int cxl_psl_purge(struct cxl_afu *afu) | |||
114 | 114 | ||
115 | pr_devel("PSL purge request\n"); | 115 | pr_devel("PSL purge request\n"); |
116 | 116 | ||
117 | if (!cxl_adapter_link_ok(afu->adapter)) { | 117 | if (!cxl_ops->link_ok(afu->adapter, afu)) { |
118 | dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n"); | 118 | dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n"); |
119 | rc = -EIO; | 119 | rc = -EIO; |
120 | goto out; | 120 | goto out; |
@@ -136,7 +136,7 @@ int cxl_psl_purge(struct cxl_afu *afu) | |||
136 | rc = -EBUSY; | 136 | rc = -EBUSY; |
137 | goto out; | 137 | goto out; |
138 | } | 138 | } |
139 | if (!cxl_adapter_link_ok(afu->adapter)) { | 139 | if (!cxl_ops->link_ok(afu->adapter, afu)) { |
140 | rc = -EIO; | 140 | rc = -EIO; |
141 | goto out; | 141 | goto out; |
142 | } | 142 | } |
@@ -186,22 +186,22 @@ static int spa_max_procs(int spa_size) | |||
186 | int cxl_alloc_spa(struct cxl_afu *afu) | 186 | int cxl_alloc_spa(struct cxl_afu *afu) |
187 | { | 187 | { |
188 | /* Work out how many pages to allocate */ | 188 | /* Work out how many pages to allocate */ |
189 | afu->spa_order = 0; | 189 | afu->native->spa_order = 0; |
190 | do { | 190 | do { |
191 | afu->spa_order++; | 191 | afu->native->spa_order++; |
192 | afu->spa_size = (1 << afu->spa_order) * PAGE_SIZE; | 192 | afu->native->spa_size = (1 << afu->native->spa_order) * PAGE_SIZE; |
193 | afu->spa_max_procs = spa_max_procs(afu->spa_size); | 193 | afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size); |
194 | } while (afu->spa_max_procs < afu->num_procs); | 194 | } while (afu->native->spa_max_procs < afu->num_procs); |
195 | 195 | ||
196 | WARN_ON(afu->spa_size > 0x100000); /* Max size supported by the hardware */ | 196 | WARN_ON(afu->native->spa_size > 0x100000); /* Max size supported by the hardware */ |
197 | 197 | ||
198 | if (!(afu->spa = (struct cxl_process_element *) | 198 | if (!(afu->native->spa = (struct cxl_process_element *) |
199 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->spa_order))) { | 199 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) { |
200 | pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n"); | 200 | pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n"); |
201 | return -ENOMEM; | 201 | return -ENOMEM; |
202 | } | 202 | } |
203 | pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n", | 203 | pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n", |
204 | 1<<afu->spa_order, afu->spa_max_procs, afu->num_procs); | 204 | 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs); |
205 | 205 | ||
206 | return 0; | 206 | return 0; |
207 | } | 207 | } |
@@ -210,13 +210,15 @@ static void attach_spa(struct cxl_afu *afu) | |||
210 | { | 210 | { |
211 | u64 spap; | 211 | u64 spap; |
212 | 212 | ||
213 | afu->sw_command_status = (__be64 *)((char *)afu->spa + | 213 | afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa + |
214 | ((afu->spa_max_procs + 3) * 128)); | 214 | ((afu->native->spa_max_procs + 3) * 128)); |
215 | 215 | ||
216 | spap = virt_to_phys(afu->spa) & CXL_PSL_SPAP_Addr; | 216 | spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr; |
217 | spap |= ((afu->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size; | 217 | spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size; |
218 | spap |= CXL_PSL_SPAP_V; | 218 | spap |= CXL_PSL_SPAP_V; |
219 | pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap); | 219 | pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", |
220 | afu->native->spa, afu->native->spa_max_procs, | ||
221 | afu->native->sw_command_status, spap); | ||
220 | cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap); | 222 | cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap); |
221 | } | 223 | } |
222 | 224 | ||
@@ -227,9 +229,10 @@ static inline void detach_spa(struct cxl_afu *afu) | |||
227 | 229 | ||
228 | void cxl_release_spa(struct cxl_afu *afu) | 230 | void cxl_release_spa(struct cxl_afu *afu) |
229 | { | 231 | { |
230 | if (afu->spa) { | 232 | if (afu->native->spa) { |
231 | free_pages((unsigned long) afu->spa, afu->spa_order); | 233 | free_pages((unsigned long) afu->native->spa, |
232 | afu->spa = NULL; | 234 | afu->native->spa_order); |
235 | afu->native->spa = NULL; | ||
233 | } | 236 | } |
234 | } | 237 | } |
235 | 238 | ||
@@ -247,7 +250,7 @@ int cxl_tlb_slb_invalidate(struct cxl *adapter) | |||
247 | dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n"); | 250 | dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n"); |
248 | return -EBUSY; | 251 | return -EBUSY; |
249 | } | 252 | } |
250 | if (!cxl_adapter_link_ok(adapter)) | 253 | if (!cxl_ops->link_ok(adapter, NULL)) |
251 | return -EIO; | 254 | return -EIO; |
252 | cpu_relax(); | 255 | cpu_relax(); |
253 | } | 256 | } |
@@ -258,28 +261,7 @@ int cxl_tlb_slb_invalidate(struct cxl *adapter) | |||
258 | dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n"); | 261 | dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n"); |
259 | return -EBUSY; | 262 | return -EBUSY; |
260 | } | 263 | } |
261 | if (!cxl_adapter_link_ok(adapter)) | 264 | if (!cxl_ops->link_ok(adapter, NULL)) |
262 | return -EIO; | ||
263 | cpu_relax(); | ||
264 | } | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | int cxl_afu_slbia(struct cxl_afu *afu) | ||
269 | { | ||
270 | unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); | ||
271 | |||
272 | pr_devel("cxl_afu_slbia issuing SLBIA command\n"); | ||
273 | cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL); | ||
274 | while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) { | ||
275 | if (time_after_eq(jiffies, timeout)) { | ||
276 | dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n"); | ||
277 | return -EBUSY; | ||
278 | } | ||
279 | /* If the adapter has gone down, we can assume that we | ||
280 | * will PERST it and that will invalidate everything. | ||
281 | */ | ||
282 | if (!cxl_adapter_link_ok(afu->adapter)) | ||
283 | return -EIO; | 265 | return -EIO; |
284 | cpu_relax(); | 266 | cpu_relax(); |
285 | } | 267 | } |
@@ -312,7 +294,7 @@ static void slb_invalid(struct cxl_context *ctx) | |||
312 | struct cxl *adapter = ctx->afu->adapter; | 294 | struct cxl *adapter = ctx->afu->adapter; |
313 | u64 slbia; | 295 | u64 slbia; |
314 | 296 | ||
315 | WARN_ON(!mutex_is_locked(&ctx->afu->spa_mutex)); | 297 | WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex)); |
316 | 298 | ||
317 | cxl_p1_write(adapter, CXL_PSL_LBISEL, | 299 | cxl_p1_write(adapter, CXL_PSL_LBISEL, |
318 | ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) | | 300 | ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) | |
@@ -320,7 +302,7 @@ static void slb_invalid(struct cxl_context *ctx) | |||
320 | cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID); | 302 | cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID); |
321 | 303 | ||
322 | while (1) { | 304 | while (1) { |
323 | if (!cxl_adapter_link_ok(adapter)) | 305 | if (!cxl_ops->link_ok(adapter, NULL)) |
324 | break; | 306 | break; |
325 | slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA); | 307 | slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA); |
326 | if (!(slbia & CXL_TLB_SLB_P)) | 308 | if (!(slbia & CXL_TLB_SLB_P)) |
@@ -342,7 +324,7 @@ static int do_process_element_cmd(struct cxl_context *ctx, | |||
342 | 324 | ||
343 | ctx->elem->software_state = cpu_to_be32(pe_state); | 325 | ctx->elem->software_state = cpu_to_be32(pe_state); |
344 | smp_wmb(); | 326 | smp_wmb(); |
345 | *(ctx->afu->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe); | 327 | *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe); |
346 | smp_mb(); | 328 | smp_mb(); |
347 | cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe); | 329 | cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe); |
348 | while (1) { | 330 | while (1) { |
@@ -351,12 +333,12 @@ static int do_process_element_cmd(struct cxl_context *ctx, | |||
351 | rc = -EBUSY; | 333 | rc = -EBUSY; |
352 | goto out; | 334 | goto out; |
353 | } | 335 | } |
354 | if (!cxl_adapter_link_ok(ctx->afu->adapter)) { | 336 | if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { |
355 | dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n"); | 337 | dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n"); |
356 | rc = -EIO; | 338 | rc = -EIO; |
357 | goto out; | 339 | goto out; |
358 | } | 340 | } |
359 | state = be64_to_cpup(ctx->afu->sw_command_status); | 341 | state = be64_to_cpup(ctx->afu->native->sw_command_status); |
360 | if (state == ~0ULL) { | 342 | if (state == ~0ULL) { |
361 | pr_err("cxl: Error adding process element to AFU\n"); | 343 | pr_err("cxl: Error adding process element to AFU\n"); |
362 | rc = -1; | 344 | rc = -1; |
@@ -384,12 +366,12 @@ static int add_process_element(struct cxl_context *ctx) | |||
384 | { | 366 | { |
385 | int rc = 0; | 367 | int rc = 0; |
386 | 368 | ||
387 | mutex_lock(&ctx->afu->spa_mutex); | 369 | mutex_lock(&ctx->afu->native->spa_mutex); |
388 | pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe); | 370 | pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe); |
389 | if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V))) | 371 | if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V))) |
390 | ctx->pe_inserted = true; | 372 | ctx->pe_inserted = true; |
391 | pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe); | 373 | pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe); |
392 | mutex_unlock(&ctx->afu->spa_mutex); | 374 | mutex_unlock(&ctx->afu->native->spa_mutex); |
393 | return rc; | 375 | return rc; |
394 | } | 376 | } |
395 | 377 | ||
@@ -401,18 +383,18 @@ static int terminate_process_element(struct cxl_context *ctx) | |||
401 | if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V))) | 383 | if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V))) |
402 | return rc; | 384 | return rc; |
403 | 385 | ||
404 | mutex_lock(&ctx->afu->spa_mutex); | 386 | mutex_lock(&ctx->afu->native->spa_mutex); |
405 | pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe); | 387 | pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe); |
406 | /* We could be asked to terminate when the hw is down. That | 388 | /* We could be asked to terminate when the hw is down. That |
407 | * should always succeed: it's not running if the hw has gone | 389 | * should always succeed: it's not running if the hw has gone |
408 | * away and is being reset. | 390 | * away and is being reset. |
409 | */ | 391 | */ |
410 | if (cxl_adapter_link_ok(ctx->afu->adapter)) | 392 | if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) |
411 | rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE, | 393 | rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE, |
412 | CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T); | 394 | CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T); |
413 | ctx->elem->software_state = 0; /* Remove Valid bit */ | 395 | ctx->elem->software_state = 0; /* Remove Valid bit */ |
414 | pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe); | 396 | pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe); |
415 | mutex_unlock(&ctx->afu->spa_mutex); | 397 | mutex_unlock(&ctx->afu->native->spa_mutex); |
416 | return rc; | 398 | return rc; |
417 | } | 399 | } |
418 | 400 | ||
@@ -420,20 +402,20 @@ static int remove_process_element(struct cxl_context *ctx) | |||
420 | { | 402 | { |
421 | int rc = 0; | 403 | int rc = 0; |
422 | 404 | ||
423 | mutex_lock(&ctx->afu->spa_mutex); | 405 | mutex_lock(&ctx->afu->native->spa_mutex); |
424 | pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe); | 406 | pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe); |
425 | 407 | ||
426 | /* We could be asked to remove when the hw is down. Again, if | 408 | /* We could be asked to remove when the hw is down. Again, if |
427 | * the hw is down, the PE is gone, so we succeed. | 409 | * the hw is down, the PE is gone, so we succeed. |
428 | */ | 410 | */ |
429 | if (cxl_adapter_link_ok(ctx->afu->adapter)) | 411 | if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) |
430 | rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0); | 412 | rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0); |
431 | 413 | ||
432 | if (!rc) | 414 | if (!rc) |
433 | ctx->pe_inserted = false; | 415 | ctx->pe_inserted = false; |
434 | slb_invalid(ctx); | 416 | slb_invalid(ctx); |
435 | pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe); | 417 | pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe); |
436 | mutex_unlock(&ctx->afu->spa_mutex); | 418 | mutex_unlock(&ctx->afu->native->spa_mutex); |
437 | 419 | ||
438 | return rc; | 420 | return rc; |
439 | } | 421 | } |
@@ -446,7 +428,7 @@ void cxl_assign_psn_space(struct cxl_context *ctx) | |||
446 | ctx->psn_size = ctx->afu->adapter->ps_size; | 428 | ctx->psn_size = ctx->afu->adapter->ps_size; |
447 | } else { | 429 | } else { |
448 | ctx->psn_phys = ctx->afu->psn_phys + | 430 | ctx->psn_phys = ctx->afu->psn_phys + |
449 | (ctx->afu->pp_offset + ctx->afu->pp_size * ctx->pe); | 431 | (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe); |
450 | ctx->psn_size = ctx->afu->pp_size; | 432 | ctx->psn_size = ctx->afu->pp_size; |
451 | } | 433 | } |
452 | } | 434 | } |
@@ -458,7 +440,7 @@ static int activate_afu_directed(struct cxl_afu *afu) | |||
458 | dev_info(&afu->dev, "Activating AFU directed mode\n"); | 440 | dev_info(&afu->dev, "Activating AFU directed mode\n"); |
459 | 441 | ||
460 | afu->num_procs = afu->max_procs_virtualised; | 442 | afu->num_procs = afu->max_procs_virtualised; |
461 | if (afu->spa == NULL) { | 443 | if (afu->native->spa == NULL) { |
462 | if (cxl_alloc_spa(afu)) | 444 | if (cxl_alloc_spa(afu)) |
463 | return -ENOMEM; | 445 | return -ENOMEM; |
464 | } | 446 | } |
@@ -552,7 +534,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) | |||
552 | ctx->elem->common.wed = cpu_to_be64(wed); | 534 | ctx->elem->common.wed = cpu_to_be64(wed); |
553 | 535 | ||
554 | /* first guy needs to enable */ | 536 | /* first guy needs to enable */ |
555 | if ((result = cxl_afu_check_and_enable(ctx->afu))) | 537 | if ((result = cxl_ops->afu_check_and_enable(ctx->afu))) |
556 | return result; | 538 | return result; |
557 | 539 | ||
558 | return add_process_element(ctx); | 540 | return add_process_element(ctx); |
@@ -568,7 +550,7 @@ static int deactivate_afu_directed(struct cxl_afu *afu) | |||
568 | cxl_sysfs_afu_m_remove(afu); | 550 | cxl_sysfs_afu_m_remove(afu); |
569 | cxl_chardev_afu_remove(afu); | 551 | cxl_chardev_afu_remove(afu); |
570 | 552 | ||
571 | __cxl_afu_reset(afu); | 553 | cxl_ops->afu_reset(afu); |
572 | cxl_afu_disable(afu); | 554 | cxl_afu_disable(afu); |
573 | cxl_psl_purge(afu); | 555 | cxl_psl_purge(afu); |
574 | 556 | ||
@@ -632,7 +614,7 @@ static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr) | |||
632 | /* master only context for dedicated */ | 614 | /* master only context for dedicated */ |
633 | cxl_assign_psn_space(ctx); | 615 | cxl_assign_psn_space(ctx); |
634 | 616 | ||
635 | if ((rc = __cxl_afu_reset(afu))) | 617 | if ((rc = cxl_ops->afu_reset(afu))) |
636 | return rc; | 618 | return rc; |
637 | 619 | ||
638 | cxl_p2n_write(afu, CXL_PSL_WED_An, wed); | 620 | cxl_p2n_write(afu, CXL_PSL_WED_An, wed); |
@@ -652,7 +634,7 @@ static int deactivate_dedicated_process(struct cxl_afu *afu) | |||
652 | return 0; | 634 | return 0; |
653 | } | 635 | } |
654 | 636 | ||
655 | int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode) | 637 | static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode) |
656 | { | 638 | { |
657 | if (mode == CXL_MODE_DIRECTED) | 639 | if (mode == CXL_MODE_DIRECTED) |
658 | return deactivate_afu_directed(afu); | 640 | return deactivate_afu_directed(afu); |
@@ -661,19 +643,14 @@ int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode) | |||
661 | return 0; | 643 | return 0; |
662 | } | 644 | } |
663 | 645 | ||
664 | int cxl_afu_deactivate_mode(struct cxl_afu *afu) | 646 | static int native_afu_activate_mode(struct cxl_afu *afu, int mode) |
665 | { | ||
666 | return _cxl_afu_deactivate_mode(afu, afu->current_mode); | ||
667 | } | ||
668 | |||
669 | int cxl_afu_activate_mode(struct cxl_afu *afu, int mode) | ||
670 | { | 647 | { |
671 | if (!mode) | 648 | if (!mode) |
672 | return 0; | 649 | return 0; |
673 | if (!(mode & afu->modes_supported)) | 650 | if (!(mode & afu->modes_supported)) |
674 | return -EINVAL; | 651 | return -EINVAL; |
675 | 652 | ||
676 | if (!cxl_adapter_link_ok(afu->adapter)) { | 653 | if (!cxl_ops->link_ok(afu->adapter, afu)) { |
677 | WARN(1, "Device link is down, refusing to activate!\n"); | 654 | WARN(1, "Device link is down, refusing to activate!\n"); |
678 | return -EIO; | 655 | return -EIO; |
679 | } | 656 | } |
@@ -686,9 +663,10 @@ int cxl_afu_activate_mode(struct cxl_afu *afu, int mode) | |||
686 | return -EINVAL; | 663 | return -EINVAL; |
687 | } | 664 | } |
688 | 665 | ||
689 | int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr) | 666 | static int native_attach_process(struct cxl_context *ctx, bool kernel, |
667 | u64 wed, u64 amr) | ||
690 | { | 668 | { |
691 | if (!cxl_adapter_link_ok(ctx->afu->adapter)) { | 669 | if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { |
692 | WARN(1, "Device link is down, refusing to attach process!\n"); | 670 | WARN(1, "Device link is down, refusing to attach process!\n"); |
693 | return -EIO; | 671 | return -EIO; |
694 | } | 672 | } |
@@ -705,7 +683,7 @@ int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr) | |||
705 | 683 | ||
706 | static inline int detach_process_native_dedicated(struct cxl_context *ctx) | 684 | static inline int detach_process_native_dedicated(struct cxl_context *ctx) |
707 | { | 685 | { |
708 | __cxl_afu_reset(ctx->afu); | 686 | cxl_ops->afu_reset(ctx->afu); |
709 | cxl_afu_disable(ctx->afu); | 687 | cxl_afu_disable(ctx->afu); |
710 | cxl_psl_purge(ctx->afu); | 688 | cxl_psl_purge(ctx->afu); |
711 | return 0; | 689 | return 0; |
@@ -723,7 +701,7 @@ static inline int detach_process_native_afu_directed(struct cxl_context *ctx) | |||
723 | return 0; | 701 | return 0; |
724 | } | 702 | } |
725 | 703 | ||
726 | int cxl_detach_process(struct cxl_context *ctx) | 704 | static int native_detach_process(struct cxl_context *ctx) |
727 | { | 705 | { |
728 | trace_cxl_detach(ctx); | 706 | trace_cxl_detach(ctx); |
729 | 707 | ||
@@ -733,14 +711,14 @@ int cxl_detach_process(struct cxl_context *ctx) | |||
733 | return detach_process_native_afu_directed(ctx); | 711 | return detach_process_native_afu_directed(ctx); |
734 | } | 712 | } |
735 | 713 | ||
736 | int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info) | 714 | static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info) |
737 | { | 715 | { |
738 | u64 pidtid; | 716 | u64 pidtid; |
739 | 717 | ||
740 | /* If the adapter has gone away, we can't get any meaningful | 718 | /* If the adapter has gone away, we can't get any meaningful |
741 | * information. | 719 | * information. |
742 | */ | 720 | */ |
743 | if (!cxl_adapter_link_ok(afu->adapter)) | 721 | if (!cxl_ops->link_ok(afu->adapter, afu)) |
744 | return -EIO; | 722 | return -EIO; |
745 | 723 | ||
746 | info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); | 724 | info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); |
@@ -751,10 +729,214 @@ int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info) | |||
751 | info->tid = pidtid & 0xffffffff; | 729 | info->tid = pidtid & 0xffffffff; |
752 | info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An); | 730 | info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An); |
753 | info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); | 731 | info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); |
732 | info->proc_handle = 0; | ||
733 | |||
734 | return 0; | ||
735 | } | ||
736 | |||
737 | static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx, | ||
738 | u64 dsisr, u64 errstat) | ||
739 | { | ||
740 | u64 fir1, fir2, fir_slice, serr, afu_debug; | ||
741 | |||
742 | fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1); | ||
743 | fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2); | ||
744 | fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An); | ||
745 | serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); | ||
746 | afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An); | ||
747 | |||
748 | dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat); | ||
749 | dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); | ||
750 | dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2); | ||
751 | dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); | ||
752 | dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); | ||
753 | dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); | ||
754 | |||
755 | dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); | ||
756 | cxl_stop_trace(ctx->afu->adapter); | ||
757 | |||
758 | return cxl_ops->ack_irq(ctx, 0, errstat); | ||
759 | } | ||
760 | |||
761 | static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info) | ||
762 | { | ||
763 | if (irq_info->dsisr & CXL_PSL_DSISR_TRANS) | ||
764 | cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); | ||
765 | else | ||
766 | cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); | ||
767 | |||
768 | return IRQ_HANDLED; | ||
769 | } | ||
770 | |||
771 | static irqreturn_t native_irq_multiplexed(int irq, void *data) | ||
772 | { | ||
773 | struct cxl_afu *afu = data; | ||
774 | struct cxl_context *ctx; | ||
775 | struct cxl_irq_info irq_info; | ||
776 | int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff; | ||
777 | int ret; | ||
778 | |||
779 | if ((ret = native_get_irq_info(afu, &irq_info))) { | ||
780 | WARN(1, "Unable to get CXL IRQ Info: %i\n", ret); | ||
781 | return fail_psl_irq(afu, &irq_info); | ||
782 | } | ||
783 | |||
784 | rcu_read_lock(); | ||
785 | ctx = idr_find(&afu->contexts_idr, ph); | ||
786 | if (ctx) { | ||
787 | ret = cxl_irq(irq, ctx, &irq_info); | ||
788 | rcu_read_unlock(); | ||
789 | return ret; | ||
790 | } | ||
791 | rcu_read_unlock(); | ||
792 | |||
793 | WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR" | ||
794 | " %016llx\n(Possible AFU HW issue - was a term/remove acked" | ||
795 | " with outstanding transactions?)\n", ph, irq_info.dsisr, | ||
796 | irq_info.dar); | ||
797 | return fail_psl_irq(afu, &irq_info); | ||
798 | } | ||
799 | |||
800 | static irqreturn_t native_slice_irq_err(int irq, void *data) | ||
801 | { | ||
802 | struct cxl_afu *afu = data; | ||
803 | u64 fir_slice, errstat, serr, afu_debug; | ||
804 | |||
805 | WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq); | ||
806 | |||
807 | serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); | ||
808 | fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An); | ||
809 | errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); | ||
810 | afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An); | ||
811 | dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); | ||
812 | dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); | ||
813 | dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat); | ||
814 | dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); | ||
815 | |||
816 | cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); | ||
817 | |||
818 | return IRQ_HANDLED; | ||
819 | } | ||
820 | |||
821 | static irqreturn_t native_irq_err(int irq, void *data) | ||
822 | { | ||
823 | struct cxl *adapter = data; | ||
824 | u64 fir1, fir2, err_ivte; | ||
825 | |||
826 | WARN(1, "CXL ERROR interrupt %i\n", irq); | ||
827 | |||
828 | err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE); | ||
829 | dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte); | ||
830 | |||
831 | dev_crit(&adapter->dev, "STOPPING CXL TRACE\n"); | ||
832 | cxl_stop_trace(adapter); | ||
833 | |||
834 | fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1); | ||
835 | fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2); | ||
836 | |||
837 | dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2); | ||
838 | |||
839 | return IRQ_HANDLED; | ||
840 | } | ||
841 | |||
842 | int cxl_native_register_psl_err_irq(struct cxl *adapter) | ||
843 | { | ||
844 | int rc; | ||
845 | |||
846 | adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", | ||
847 | dev_name(&adapter->dev)); | ||
848 | if (!adapter->irq_name) | ||
849 | return -ENOMEM; | ||
850 | |||
851 | if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter, | ||
852 | &adapter->native->err_hwirq, | ||
853 | &adapter->native->err_virq, | ||
854 | adapter->irq_name))) { | ||
855 | kfree(adapter->irq_name); | ||
856 | adapter->irq_name = NULL; | ||
857 | return rc; | ||
858 | } | ||
859 | |||
860 | cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff); | ||
861 | |||
862 | return 0; | ||
863 | } | ||
864 | |||
865 | void cxl_native_release_psl_err_irq(struct cxl *adapter) | ||
866 | { | ||
867 | if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq)) | ||
868 | return; | ||
869 | |||
870 | cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); | ||
871 | cxl_unmap_irq(adapter->native->err_virq, adapter); | ||
872 | cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq); | ||
873 | kfree(adapter->irq_name); | ||
874 | } | ||
875 | |||
876 | int cxl_native_register_serr_irq(struct cxl_afu *afu) | ||
877 | { | ||
878 | u64 serr; | ||
879 | int rc; | ||
880 | |||
881 | afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", | ||
882 | dev_name(&afu->dev)); | ||
883 | if (!afu->err_irq_name) | ||
884 | return -ENOMEM; | ||
885 | |||
886 | if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu, | ||
887 | &afu->serr_hwirq, | ||
888 | &afu->serr_virq, afu->err_irq_name))) { | ||
889 | kfree(afu->err_irq_name); | ||
890 | afu->err_irq_name = NULL; | ||
891 | return rc; | ||
892 | } | ||
893 | |||
894 | serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); | ||
895 | serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); | ||
896 | cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); | ||
754 | 897 | ||
755 | return 0; | 898 | return 0; |
756 | } | 899 | } |
757 | 900 | ||
901 | void cxl_native_release_serr_irq(struct cxl_afu *afu) | ||
902 | { | ||
903 | if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) | ||
904 | return; | ||
905 | |||
906 | cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); | ||
907 | cxl_unmap_irq(afu->serr_virq, afu); | ||
908 | cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); | ||
909 | kfree(afu->err_irq_name); | ||
910 | } | ||
911 | |||
912 | int cxl_native_register_psl_irq(struct cxl_afu *afu) | ||
913 | { | ||
914 | int rc; | ||
915 | |||
916 | afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s", | ||
917 | dev_name(&afu->dev)); | ||
918 | if (!afu->psl_irq_name) | ||
919 | return -ENOMEM; | ||
920 | |||
921 | if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed, | ||
922 | afu, &afu->native->psl_hwirq, &afu->native->psl_virq, | ||
923 | afu->psl_irq_name))) { | ||
924 | kfree(afu->psl_irq_name); | ||
925 | afu->psl_irq_name = NULL; | ||
926 | } | ||
927 | return rc; | ||
928 | } | ||
929 | |||
930 | void cxl_native_release_psl_irq(struct cxl_afu *afu) | ||
931 | { | ||
932 | if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq)) | ||
933 | return; | ||
934 | |||
935 | cxl_unmap_irq(afu->native->psl_virq, afu); | ||
936 | cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq); | ||
937 | kfree(afu->psl_irq_name); | ||
938 | } | ||
939 | |||
758 | static void recover_psl_err(struct cxl_afu *afu, u64 errstat) | 940 | static void recover_psl_err(struct cxl_afu *afu, u64 errstat) |
759 | { | 941 | { |
760 | u64 dsisr; | 942 | u64 dsisr; |
@@ -769,7 +951,7 @@ static void recover_psl_err(struct cxl_afu *afu, u64 errstat) | |||
769 | cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat); | 951 | cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat); |
770 | } | 952 | } |
771 | 953 | ||
772 | int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) | 954 | static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) |
773 | { | 955 | { |
774 | trace_cxl_psl_irq_ack(ctx, tfc); | 956 | trace_cxl_psl_irq_ack(ctx, tfc); |
775 | if (tfc) | 957 | if (tfc) |
@@ -784,3 +966,132 @@ int cxl_check_error(struct cxl_afu *afu) | |||
784 | { | 966 | { |
785 | return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL); | 967 | return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL); |
786 | } | 968 | } |
969 | |||
970 | static bool native_support_attributes(const char *attr_name, | ||
971 | enum cxl_attrs type) | ||
972 | { | ||
973 | return true; | ||
974 | } | ||
975 | |||
976 | static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out) | ||
977 | { | ||
978 | if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) | ||
979 | return -EIO; | ||
980 | if (unlikely(off >= afu->crs_len)) | ||
981 | return -ERANGE; | ||
982 | *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset + | ||
983 | (cr * afu->crs_len) + off); | ||
984 | return 0; | ||
985 | } | ||
986 | |||
987 | static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out) | ||
988 | { | ||
989 | if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) | ||
990 | return -EIO; | ||
991 | if (unlikely(off >= afu->crs_len)) | ||
992 | return -ERANGE; | ||
993 | *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset + | ||
994 | (cr * afu->crs_len) + off); | ||
995 | return 0; | ||
996 | } | ||
997 | |||
998 | static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out) | ||
999 | { | ||
1000 | u64 aligned_off = off & ~0x3L; | ||
1001 | u32 val; | ||
1002 | int rc; | ||
1003 | |||
1004 | rc = native_afu_cr_read32(afu, cr, aligned_off, &val); | ||
1005 | if (!rc) | ||
1006 | *out = (val >> ((off & 0x3) * 8)) & 0xffff; | ||
1007 | return rc; | ||
1008 | } | ||
1009 | |||
1010 | static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out) | ||
1011 | { | ||
1012 | u64 aligned_off = off & ~0x3L; | ||
1013 | u32 val; | ||
1014 | int rc; | ||
1015 | |||
1016 | rc = native_afu_cr_read32(afu, cr, aligned_off, &val); | ||
1017 | if (!rc) | ||
1018 | *out = (val >> ((off & 0x3) * 8)) & 0xff; | ||
1019 | return rc; | ||
1020 | } | ||
1021 | |||
1022 | static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) | ||
1023 | { | ||
1024 | if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) | ||
1025 | return -EIO; | ||
1026 | if (unlikely(off >= afu->crs_len)) | ||
1027 | return -ERANGE; | ||
1028 | out_le32(afu->native->afu_desc_mmio + afu->crs_offset + | ||
1029 | (cr * afu->crs_len) + off, in); | ||
1030 | return 0; | ||
1031 | } | ||
1032 | |||
1033 | static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) | ||
1034 | { | ||
1035 | u64 aligned_off = off & ~0x3L; | ||
1036 | u32 val32, mask, shift; | ||
1037 | int rc; | ||
1038 | |||
1039 | rc = native_afu_cr_read32(afu, cr, aligned_off, &val32); | ||
1040 | if (rc) | ||
1041 | return rc; | ||
1042 | shift = (off & 0x3) * 8; | ||
1043 | WARN_ON(shift == 24); | ||
1044 | mask = 0xffff << shift; | ||
1045 | val32 = (val32 & ~mask) | (in << shift); | ||
1046 | |||
1047 | rc = native_afu_cr_write32(afu, cr, aligned_off, val32); | ||
1048 | return rc; | ||
1049 | } | ||
1050 | |||
1051 | static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) | ||
1052 | { | ||
1053 | u64 aligned_off = off & ~0x3L; | ||
1054 | u32 val32, mask, shift; | ||
1055 | int rc; | ||
1056 | |||
1057 | rc = native_afu_cr_read32(afu, cr, aligned_off, &val32); | ||
1058 | if (rc) | ||
1059 | return rc; | ||
1060 | shift = (off & 0x3) * 8; | ||
1061 | mask = 0xff << shift; | ||
1062 | val32 = (val32 & ~mask) | (in << shift); | ||
1063 | |||
1064 | rc = native_afu_cr_write32(afu, cr, aligned_off, val32); | ||
1065 | return rc; | ||
1066 | } | ||
1067 | |||
1068 | const struct cxl_backend_ops cxl_native_ops = { | ||
1069 | .module = THIS_MODULE, | ||
1070 | .adapter_reset = cxl_pci_reset, | ||
1071 | .alloc_one_irq = cxl_pci_alloc_one_irq, | ||
1072 | .release_one_irq = cxl_pci_release_one_irq, | ||
1073 | .alloc_irq_ranges = cxl_pci_alloc_irq_ranges, | ||
1074 | .release_irq_ranges = cxl_pci_release_irq_ranges, | ||
1075 | .setup_irq = cxl_pci_setup_irq, | ||
1076 | .handle_psl_slice_error = native_handle_psl_slice_error, | ||
1077 | .psl_interrupt = NULL, | ||
1078 | .ack_irq = native_ack_irq, | ||
1079 | .attach_process = native_attach_process, | ||
1080 | .detach_process = native_detach_process, | ||
1081 | .support_attributes = native_support_attributes, | ||
1082 | .link_ok = cxl_adapter_link_ok, | ||
1083 | .release_afu = cxl_pci_release_afu, | ||
1084 | .afu_read_err_buffer = cxl_pci_afu_read_err_buffer, | ||
1085 | .afu_check_and_enable = native_afu_check_and_enable, | ||
1086 | .afu_activate_mode = native_afu_activate_mode, | ||
1087 | .afu_deactivate_mode = native_afu_deactivate_mode, | ||
1088 | .afu_reset = native_afu_reset, | ||
1089 | .afu_cr_read8 = native_afu_cr_read8, | ||
1090 | .afu_cr_read16 = native_afu_cr_read16, | ||
1091 | .afu_cr_read32 = native_afu_cr_read32, | ||
1092 | .afu_cr_read64 = native_afu_cr_read64, | ||
1093 | .afu_cr_write8 = native_afu_cr_write8, | ||
1094 | .afu_cr_write16 = native_afu_cr_write16, | ||
1095 | .afu_cr_write32 = native_afu_cr_write32, | ||
1096 | .read_adapter_vpd = cxl_pci_read_adapter_vpd, | ||
1097 | }; | ||
diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c new file mode 100644 index 000000000000..edc458395f68 --- /dev/null +++ b/drivers/misc/cxl/of.c | |||
@@ -0,0 +1,513 @@ | |||
1 | /* | ||
2 | * Copyright 2015 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/platform_device.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/of_address.h> | ||
15 | #include <linux/of_platform.h> | ||
16 | |||
17 | #include "cxl.h" | ||
18 | |||
19 | |||
20 | static const __be32 *read_prop_string(const struct device_node *np, | ||
21 | const char *prop_name) | ||
22 | { | ||
23 | const __be32 *prop; | ||
24 | |||
25 | prop = of_get_property(np, prop_name, NULL); | ||
26 | if (cxl_verbose && prop) | ||
27 | pr_info("%s: %s\n", prop_name, (char *) prop); | ||
28 | return prop; | ||
29 | } | ||
30 | |||
31 | static const __be32 *read_prop_dword(const struct device_node *np, | ||
32 | const char *prop_name, u32 *val) | ||
33 | { | ||
34 | const __be32 *prop; | ||
35 | |||
36 | prop = of_get_property(np, prop_name, NULL); | ||
37 | if (prop) | ||
38 | *val = be32_to_cpu(prop[0]); | ||
39 | if (cxl_verbose && prop) | ||
40 | pr_info("%s: %#x (%u)\n", prop_name, *val, *val); | ||
41 | return prop; | ||
42 | } | ||
43 | |||
44 | static const __be64 *read_prop64_dword(const struct device_node *np, | ||
45 | const char *prop_name, u64 *val) | ||
46 | { | ||
47 | const __be64 *prop; | ||
48 | |||
49 | prop = of_get_property(np, prop_name, NULL); | ||
50 | if (prop) | ||
51 | *val = be64_to_cpu(prop[0]); | ||
52 | if (cxl_verbose && prop) | ||
53 | pr_info("%s: %#llx (%llu)\n", prop_name, *val, *val); | ||
54 | return prop; | ||
55 | } | ||
56 | |||
57 | |||
58 | static int read_handle(struct device_node *np, u64 *handle) | ||
59 | { | ||
60 | const __be32 *prop; | ||
61 | u64 size; | ||
62 | |||
63 | /* Get address and size of the node */ | ||
64 | prop = of_get_address(np, 0, &size, NULL); | ||
65 | if (size) | ||
66 | return -EINVAL; | ||
67 | |||
68 | /* Helper to read a big number; size is in cells (not bytes) */ | ||
69 | *handle = of_read_number(prop, of_n_addr_cells(np)); | ||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | static int read_phys_addr(struct device_node *np, char *prop_name, | ||
74 | struct cxl_afu *afu) | ||
75 | { | ||
76 | int i, len, entry_size, naddr, nsize, type; | ||
77 | u64 addr, size; | ||
78 | const __be32 *prop; | ||
79 | |||
80 | naddr = of_n_addr_cells(np); | ||
81 | nsize = of_n_size_cells(np); | ||
82 | |||
83 | prop = of_get_property(np, prop_name, &len); | ||
84 | if (prop) { | ||
85 | entry_size = naddr + nsize; | ||
86 | for (i = 0; i < (len / 4); i += entry_size, prop += entry_size) { | ||
87 | type = be32_to_cpu(prop[0]); | ||
88 | addr = of_read_number(prop, naddr); | ||
89 | size = of_read_number(&prop[naddr], nsize); | ||
90 | switch (type) { | ||
91 | case 0: /* unit address */ | ||
92 | afu->guest->handle = addr; | ||
93 | break; | ||
94 | case 1: /* p2 area */ | ||
95 | afu->guest->p2n_phys += addr; | ||
96 | afu->guest->p2n_size = size; | ||
97 | break; | ||
98 | case 2: /* problem state area */ | ||
99 | afu->psn_phys += addr; | ||
100 | afu->adapter->ps_size = size; | ||
101 | break; | ||
102 | default: | ||
103 | pr_err("Invalid address type %d found in %s property of AFU\n", | ||
104 | type, prop_name); | ||
105 | return -EINVAL; | ||
106 | } | ||
107 | if (cxl_verbose) | ||
108 | pr_info("%s: %#x %#llx (size %#llx)\n", | ||
109 | prop_name, type, addr, size); | ||
110 | } | ||
111 | } | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static int read_vpd(struct cxl *adapter, struct cxl_afu *afu) | ||
116 | { | ||
117 | char vpd[256]; | ||
118 | int rc; | ||
119 | size_t len = sizeof(vpd); | ||
120 | |||
121 | memset(vpd, 0, len); | ||
122 | |||
123 | if (adapter) | ||
124 | rc = cxl_guest_read_adapter_vpd(adapter, vpd, len); | ||
125 | else | ||
126 | rc = cxl_guest_read_afu_vpd(afu, vpd, len); | ||
127 | |||
128 | if (rc > 0) { | ||
129 | cxl_dump_debug_buffer(vpd, rc); | ||
130 | rc = 0; | ||
131 | } | ||
132 | return rc; | ||
133 | } | ||
134 | |||
135 | int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np) | ||
136 | { | ||
137 | if (read_handle(afu_np, &afu->guest->handle)) | ||
138 | return -EINVAL; | ||
139 | pr_devel("AFU handle: 0x%.16llx\n", afu->guest->handle); | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np) | ||
145 | { | ||
146 | int i, len, rc; | ||
147 | char *p; | ||
148 | const __be32 *prop; | ||
149 | u16 device_id, vendor_id; | ||
150 | u32 val = 0, class_code; | ||
151 | |||
152 | /* Properties are read in the same order as listed in PAPR */ | ||
153 | |||
154 | if (cxl_verbose) { | ||
155 | pr_info("Dump of the 'ibm,coherent-platform-function' node properties:\n"); | ||
156 | |||
157 | prop = of_get_property(np, "compatible", &len); | ||
158 | i = 0; | ||
159 | while (i < len) { | ||
160 | p = (char *) prop + i; | ||
161 | pr_info("compatible: %s\n", p); | ||
162 | i += strlen(p) + 1; | ||
163 | } | ||
164 | read_prop_string(np, "name"); | ||
165 | } | ||
166 | |||
167 | rc = read_phys_addr(np, "reg", afu); | ||
168 | if (rc) | ||
169 | return rc; | ||
170 | |||
171 | rc = read_phys_addr(np, "assigned-addresses", afu); | ||
172 | if (rc) | ||
173 | return rc; | ||
174 | |||
175 | if (afu->psn_phys == 0) | ||
176 | afu->psa = false; | ||
177 | else | ||
178 | afu->psa = true; | ||
179 | |||
180 | if (cxl_verbose) { | ||
181 | read_prop_string(np, "ibm,loc-code"); | ||
182 | read_prop_string(np, "device_type"); | ||
183 | } | ||
184 | |||
185 | read_prop_dword(np, "ibm,#processes", &afu->max_procs_virtualised); | ||
186 | |||
187 | if (cxl_verbose) { | ||
188 | read_prop_dword(np, "ibm,scratchpad-size", &val); | ||
189 | read_prop_dword(np, "ibm,programmable", &val); | ||
190 | read_prop_string(np, "ibm,phandle"); | ||
191 | read_vpd(NULL, afu); | ||
192 | } | ||
193 | |||
194 | read_prop_dword(np, "ibm,max-ints-per-process", &afu->guest->max_ints); | ||
195 | afu->irqs_max = afu->guest->max_ints; | ||
196 | |||
197 | prop = read_prop_dword(np, "ibm,min-ints-per-process", &afu->pp_irqs); | ||
198 | if (prop) { | ||
199 | /* One extra interrupt for the PSL interrupt is already | ||
200 | * included. Remove it now to keep only AFU interrupts and | ||
201 | * match the native case. | ||
202 | */ | ||
203 | afu->pp_irqs--; | ||
204 | } | ||
205 | |||
206 | if (cxl_verbose) { | ||
207 | read_prop_dword(np, "ibm,max-ints", &val); | ||
208 | read_prop_dword(np, "ibm,vpd-size", &val); | ||
209 | } | ||
210 | |||
211 | read_prop64_dword(np, "ibm,error-buffer-size", &afu->eb_len); | ||
212 | afu->eb_offset = 0; | ||
213 | |||
214 | if (cxl_verbose) | ||
215 | read_prop_dword(np, "ibm,config-record-type", &val); | ||
216 | |||
217 | read_prop64_dword(np, "ibm,config-record-size", &afu->crs_len); | ||
218 | afu->crs_offset = 0; | ||
219 | |||
220 | read_prop_dword(np, "ibm,#config-records", &afu->crs_num); | ||
221 | |||
222 | if (cxl_verbose) { | ||
223 | for (i = 0; i < afu->crs_num; i++) { | ||
224 | rc = cxl_ops->afu_cr_read16(afu, i, PCI_DEVICE_ID, | ||
225 | &device_id); | ||
226 | if (!rc) | ||
227 | pr_info("record %d - device-id: %#x\n", | ||
228 | i, device_id); | ||
229 | rc = cxl_ops->afu_cr_read16(afu, i, PCI_VENDOR_ID, | ||
230 | &vendor_id); | ||
231 | if (!rc) | ||
232 | pr_info("record %d - vendor-id: %#x\n", | ||
233 | i, vendor_id); | ||
234 | rc = cxl_ops->afu_cr_read32(afu, i, PCI_CLASS_REVISION, | ||
235 | &class_code); | ||
236 | if (!rc) { | ||
237 | class_code >>= 8; | ||
238 | pr_info("record %d - class-code: %#x\n", | ||
239 | i, class_code); | ||
240 | } | ||
241 | } | ||
242 | |||
243 | read_prop_dword(np, "ibm,function-number", &val); | ||
244 | read_prop_dword(np, "ibm,privileged-function", &val); | ||
245 | read_prop_dword(np, "vendor-id", &val); | ||
246 | read_prop_dword(np, "device-id", &val); | ||
247 | read_prop_dword(np, "revision-id", &val); | ||
248 | read_prop_dword(np, "class-code", &val); | ||
249 | read_prop_dword(np, "subsystem-vendor-id", &val); | ||
250 | read_prop_dword(np, "subsystem-id", &val); | ||
251 | } | ||
252 | /* | ||
253 | * if "ibm,process-mmio" doesn't exist then per-process mmio is | ||
254 | * not supported | ||
255 | */ | ||
256 | val = 0; | ||
257 | prop = read_prop_dword(np, "ibm,process-mmio", &val); | ||
258 | if (prop && val == 1) | ||
259 | afu->pp_psa = true; | ||
260 | else | ||
261 | afu->pp_psa = false; | ||
262 | |||
263 | if (cxl_verbose) { | ||
264 | read_prop_dword(np, "ibm,supports-aur", &val); | ||
265 | read_prop_dword(np, "ibm,supports-csrp", &val); | ||
266 | read_prop_dword(np, "ibm,supports-prr", &val); | ||
267 | } | ||
268 | |||
269 | prop = read_prop_dword(np, "ibm,function-error-interrupt", &val); | ||
270 | if (prop) | ||
271 | afu->serr_hwirq = val; | ||
272 | |||
273 | pr_devel("AFU handle: %#llx\n", afu->guest->handle); | ||
274 | pr_devel("p2n_phys: %#llx (size %#llx)\n", | ||
275 | afu->guest->p2n_phys, afu->guest->p2n_size); | ||
276 | pr_devel("psn_phys: %#llx (size %#llx)\n", | ||
277 | afu->psn_phys, afu->adapter->ps_size); | ||
278 | pr_devel("Max number of processes virtualised=%i\n", | ||
279 | afu->max_procs_virtualised); | ||
280 | pr_devel("Per-process irqs min=%i, max=%i\n", afu->pp_irqs, | ||
281 | afu->irqs_max); | ||
282 | pr_devel("Slice error interrupt=%#lx\n", afu->serr_hwirq); | ||
283 | |||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | static int read_adapter_irq_config(struct cxl *adapter, struct device_node *np) | ||
288 | { | ||
289 | const __be32 *ranges; | ||
290 | int len, nranges, i; | ||
291 | struct irq_avail *cur; | ||
292 | |||
293 | ranges = of_get_property(np, "interrupt-ranges", &len); | ||
294 | if (ranges == NULL || len < (2 * sizeof(int))) | ||
295 | return -EINVAL; | ||
296 | |||
297 | /* | ||
298 | * encoded array of two cells per entry, each cell encoded as | ||
299 | * with encode-int | ||
300 | */ | ||
301 | nranges = len / (2 * sizeof(int)); | ||
302 | if (nranges == 0 || (nranges * 2 * sizeof(int)) != len) | ||
303 | return -EINVAL; | ||
304 | |||
305 | adapter->guest->irq_avail = kzalloc(nranges * sizeof(struct irq_avail), | ||
306 | GFP_KERNEL); | ||
307 | if (adapter->guest->irq_avail == NULL) | ||
308 | return -ENOMEM; | ||
309 | |||
310 | adapter->guest->irq_base_offset = be32_to_cpu(ranges[0]); | ||
311 | for (i = 0; i < nranges; i++) { | ||
312 | cur = &adapter->guest->irq_avail[i]; | ||
313 | cur->offset = be32_to_cpu(ranges[i * 2]); | ||
314 | cur->range = be32_to_cpu(ranges[i * 2 + 1]); | ||
315 | cur->bitmap = kcalloc(BITS_TO_LONGS(cur->range), | ||
316 | sizeof(*cur->bitmap), GFP_KERNEL); | ||
317 | if (cur->bitmap == NULL) | ||
318 | goto err; | ||
319 | if (cur->offset < adapter->guest->irq_base_offset) | ||
320 | adapter->guest->irq_base_offset = cur->offset; | ||
321 | if (cxl_verbose) | ||
322 | pr_info("available IRQ range: %#lx-%#lx (%lu)\n", | ||
323 | cur->offset, cur->offset + cur->range - 1, | ||
324 | cur->range); | ||
325 | } | ||
326 | adapter->guest->irq_nranges = nranges; | ||
327 | spin_lock_init(&adapter->guest->irq_alloc_lock); | ||
328 | |||
329 | return 0; | ||
330 | err: | ||
331 | for (i--; i >= 0; i--) { | ||
332 | cur = &adapter->guest->irq_avail[i]; | ||
333 | kfree(cur->bitmap); | ||
334 | } | ||
335 | kfree(adapter->guest->irq_avail); | ||
336 | adapter->guest->irq_avail = NULL; | ||
337 | return -ENOMEM; | ||
338 | } | ||
339 | |||
340 | int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np) | ||
341 | { | ||
342 | if (read_handle(np, &adapter->guest->handle)) | ||
343 | return -EINVAL; | ||
344 | pr_devel("Adapter handle: 0x%.16llx\n", adapter->guest->handle); | ||
345 | |||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np) | ||
350 | { | ||
351 | int rc, len, naddr, i; | ||
352 | char *p; | ||
353 | const __be32 *prop; | ||
354 | u32 val = 0; | ||
355 | |||
356 | /* Properties are read in the same order as listed in PAPR */ | ||
357 | |||
358 | naddr = of_n_addr_cells(np); | ||
359 | |||
360 | if (cxl_verbose) { | ||
361 | pr_info("Dump of the 'ibm,coherent-platform-facility' node properties:\n"); | ||
362 | |||
363 | read_prop_dword(np, "#address-cells", &val); | ||
364 | read_prop_dword(np, "#size-cells", &val); | ||
365 | |||
366 | prop = of_get_property(np, "compatible", &len); | ||
367 | i = 0; | ||
368 | while (i < len) { | ||
369 | p = (char *) prop + i; | ||
370 | pr_info("compatible: %s\n", p); | ||
371 | i += strlen(p) + 1; | ||
372 | } | ||
373 | read_prop_string(np, "name"); | ||
374 | read_prop_string(np, "model"); | ||
375 | |||
376 | prop = of_get_property(np, "reg", NULL); | ||
377 | if (prop) { | ||
378 | pr_info("reg: addr:%#llx size:%#x\n", | ||
379 | of_read_number(prop, naddr), | ||
380 | be32_to_cpu(prop[naddr])); | ||
381 | } | ||
382 | |||
383 | read_prop_string(np, "ibm,loc-code"); | ||
384 | } | ||
385 | |||
386 | if ((rc = read_adapter_irq_config(adapter, np))) | ||
387 | return rc; | ||
388 | |||
389 | if (cxl_verbose) { | ||
390 | read_prop_string(np, "device_type"); | ||
391 | read_prop_string(np, "ibm,phandle"); | ||
392 | } | ||
393 | |||
394 | prop = read_prop_dword(np, "ibm,caia-version", &val); | ||
395 | if (prop) { | ||
396 | adapter->caia_major = (val & 0xFF00) >> 8; | ||
397 | adapter->caia_minor = val & 0xFF; | ||
398 | } | ||
399 | |||
400 | prop = read_prop_dword(np, "ibm,psl-revision", &val); | ||
401 | if (prop) | ||
402 | adapter->psl_rev = val; | ||
403 | |||
404 | prop = read_prop_string(np, "status"); | ||
405 | if (prop) { | ||
406 | adapter->guest->status = kasprintf(GFP_KERNEL, "%s", (char *) prop); | ||
407 | if (adapter->guest->status == NULL) | ||
408 | return -ENOMEM; | ||
409 | } | ||
410 | |||
411 | prop = read_prop_dword(np, "vendor-id", &val); | ||
412 | if (prop) | ||
413 | adapter->guest->vendor = val; | ||
414 | |||
415 | prop = read_prop_dword(np, "device-id", &val); | ||
416 | if (prop) | ||
417 | adapter->guest->device = val; | ||
418 | |||
419 | if (cxl_verbose) { | ||
420 | read_prop_dword(np, "ibm,privileged-facility", &val); | ||
421 | read_prop_dword(np, "revision-id", &val); | ||
422 | read_prop_dword(np, "class-code", &val); | ||
423 | } | ||
424 | |||
425 | prop = read_prop_dword(np, "subsystem-vendor-id", &val); | ||
426 | if (prop) | ||
427 | adapter->guest->subsystem_vendor = val; | ||
428 | |||
429 | prop = read_prop_dword(np, "subsystem-id", &val); | ||
430 | if (prop) | ||
431 | adapter->guest->subsystem = val; | ||
432 | |||
433 | if (cxl_verbose) | ||
434 | read_vpd(adapter, NULL); | ||
435 | |||
436 | return 0; | ||
437 | } | ||
438 | |||
439 | static int cxl_of_remove(struct platform_device *pdev) | ||
440 | { | ||
441 | struct cxl *adapter; | ||
442 | int afu; | ||
443 | |||
444 | adapter = dev_get_drvdata(&pdev->dev); | ||
445 | for (afu = 0; afu < adapter->slices; afu++) | ||
446 | cxl_guest_remove_afu(adapter->afu[afu]); | ||
447 | |||
448 | cxl_guest_remove_adapter(adapter); | ||
449 | return 0; | ||
450 | } | ||
451 | |||
452 | static void cxl_of_shutdown(struct platform_device *pdev) | ||
453 | { | ||
454 | cxl_of_remove(pdev); | ||
455 | } | ||
456 | |||
457 | int cxl_of_probe(struct platform_device *pdev) | ||
458 | { | ||
459 | struct device_node *np = NULL; | ||
460 | struct device_node *afu_np = NULL; | ||
461 | struct cxl *adapter = NULL; | ||
462 | int ret; | ||
463 | int slice, slice_ok; | ||
464 | |||
465 | pr_devel("in %s\n", __func__); | ||
466 | |||
467 | np = pdev->dev.of_node; | ||
468 | if (np == NULL) | ||
469 | return -ENODEV; | ||
470 | |||
471 | /* init adapter */ | ||
472 | adapter = cxl_guest_init_adapter(np, pdev); | ||
473 | if (IS_ERR(adapter)) { | ||
474 | dev_err(&pdev->dev, "guest_init_adapter failed: %li\n", PTR_ERR(adapter)); | ||
475 | return PTR_ERR(adapter); | ||
476 | } | ||
477 | |||
478 | /* init afu */ | ||
479 | slice_ok = 0; | ||
480 | for (afu_np = NULL, slice = 0; (afu_np = of_get_next_child(np, afu_np)); slice++) { | ||
481 | if ((ret = cxl_guest_init_afu(adapter, slice, afu_np))) | ||
482 | dev_err(&pdev->dev, "AFU %i failed to initialise: %i\n", | ||
483 | slice, ret); | ||
484 | else | ||
485 | slice_ok++; | ||
486 | } | ||
487 | |||
488 | if (slice_ok == 0) { | ||
489 | dev_info(&pdev->dev, "No active AFU"); | ||
490 | adapter->slices = 0; | ||
491 | } | ||
492 | |||
493 | if (afu_np) | ||
494 | of_node_put(afu_np); | ||
495 | return 0; | ||
496 | } | ||
497 | |||
498 | static const struct of_device_id cxl_of_match[] = { | ||
499 | { .compatible = "ibm,coherent-platform-facility",}, | ||
500 | {}, | ||
501 | }; | ||
502 | MODULE_DEVICE_TABLE(of, cxl_of_match); | ||
503 | |||
504 | struct platform_driver cxl_of_driver = { | ||
505 | .driver = { | ||
506 | .name = "cxl_of", | ||
507 | .of_match_table = cxl_of_match, | ||
508 | .owner = THIS_MODULE | ||
509 | }, | ||
510 | .probe = cxl_of_probe, | ||
511 | .remove = cxl_of_remove, | ||
512 | .shutdown = cxl_of_shutdown, | ||
513 | }; | ||
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index a89608334ed5..2844e975bf79 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c | |||
@@ -89,8 +89,8 @@ | |||
89 | 89 | ||
90 | /* This works a little different than the p1/p2 register accesses to make it | 90 | /* This works a little different than the p1/p2 register accesses to make it |
91 | * easier to pull out individual fields */ | 91 | * easier to pull out individual fields */ |
92 | #define AFUD_READ(afu, off) in_be64(afu->afu_desc_mmio + off) | 92 | #define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off) |
93 | #define AFUD_READ_LE(afu, off) in_le64(afu->afu_desc_mmio + off) | 93 | #define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off) |
94 | #define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit))) | 94 | #define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit))) |
95 | #define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be)) | 95 | #define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be)) |
96 | 96 | ||
@@ -115,24 +115,6 @@ | |||
115 | #define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63) | 115 | #define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63) |
116 | #define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48) | 116 | #define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48) |
117 | 117 | ||
118 | u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off) | ||
119 | { | ||
120 | u64 aligned_off = off & ~0x3L; | ||
121 | u32 val; | ||
122 | |||
123 | val = cxl_afu_cr_read32(afu, cr, aligned_off); | ||
124 | return (val >> ((off & 0x2) * 8)) & 0xffff; | ||
125 | } | ||
126 | |||
127 | u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off) | ||
128 | { | ||
129 | u64 aligned_off = off & ~0x3L; | ||
130 | u32 val; | ||
131 | |||
132 | val = cxl_afu_cr_read32(afu, cr, aligned_off); | ||
133 | return (val >> ((off & 0x3) * 8)) & 0xff; | ||
134 | } | ||
135 | |||
136 | static const struct pci_device_id cxl_pci_tbl[] = { | 118 | static const struct pci_device_id cxl_pci_tbl[] = { |
137 | { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), }, | 119 | { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), }, |
138 | { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), }, | 120 | { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), }, |
@@ -432,8 +414,8 @@ static int init_implementation_afu_regs(struct cxl_afu *afu) | |||
432 | return 0; | 414 | return 0; |
433 | } | 415 | } |
434 | 416 | ||
435 | int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq, | 417 | int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq, |
436 | unsigned int virq) | 418 | unsigned int virq) |
437 | { | 419 | { |
438 | struct pci_dev *dev = to_pci_dev(adapter->dev.parent); | 420 | struct pci_dev *dev = to_pci_dev(adapter->dev.parent); |
439 | 421 | ||
@@ -475,28 +457,30 @@ int cxl_update_image_control(struct cxl *adapter) | |||
475 | return 0; | 457 | return 0; |
476 | } | 458 | } |
477 | 459 | ||
478 | int cxl_alloc_one_irq(struct cxl *adapter) | 460 | int cxl_pci_alloc_one_irq(struct cxl *adapter) |
479 | { | 461 | { |
480 | struct pci_dev *dev = to_pci_dev(adapter->dev.parent); | 462 | struct pci_dev *dev = to_pci_dev(adapter->dev.parent); |
481 | 463 | ||
482 | return pnv_cxl_alloc_hwirqs(dev, 1); | 464 | return pnv_cxl_alloc_hwirqs(dev, 1); |
483 | } | 465 | } |
484 | 466 | ||
485 | void cxl_release_one_irq(struct cxl *adapter, int hwirq) | 467 | void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq) |
486 | { | 468 | { |
487 | struct pci_dev *dev = to_pci_dev(adapter->dev.parent); | 469 | struct pci_dev *dev = to_pci_dev(adapter->dev.parent); |
488 | 470 | ||
489 | return pnv_cxl_release_hwirqs(dev, hwirq, 1); | 471 | return pnv_cxl_release_hwirqs(dev, hwirq, 1); |
490 | } | 472 | } |
491 | 473 | ||
492 | int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num) | 474 | int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs, |
475 | struct cxl *adapter, unsigned int num) | ||
493 | { | 476 | { |
494 | struct pci_dev *dev = to_pci_dev(adapter->dev.parent); | 477 | struct pci_dev *dev = to_pci_dev(adapter->dev.parent); |
495 | 478 | ||
496 | return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num); | 479 | return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num); |
497 | } | 480 | } |
498 | 481 | ||
499 | void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter) | 482 | void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs, |
483 | struct cxl *adapter) | ||
500 | { | 484 | { |
501 | struct pci_dev *dev = to_pci_dev(adapter->dev.parent); | 485 | struct pci_dev *dev = to_pci_dev(adapter->dev.parent); |
502 | 486 | ||
@@ -557,7 +541,7 @@ static int switch_card_to_cxl(struct pci_dev *dev) | |||
557 | return 0; | 541 | return 0; |
558 | } | 542 | } |
559 | 543 | ||
560 | static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev) | 544 | static int pci_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev) |
561 | { | 545 | { |
562 | u64 p1n_base, p2n_base, afu_desc; | 546 | u64 p1n_base, p2n_base, afu_desc; |
563 | const u64 p1n_size = 0x100; | 547 | const u64 p1n_size = 0x100; |
@@ -565,15 +549,15 @@ static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct p | |||
565 | 549 | ||
566 | p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size); | 550 | p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size); |
567 | p2n_base = p2_base(dev) + (afu->slice * p2n_size); | 551 | p2n_base = p2_base(dev) + (afu->slice * p2n_size); |
568 | afu->psn_phys = p2_base(dev) + (adapter->ps_off + (afu->slice * adapter->ps_size)); | 552 | afu->psn_phys = p2_base(dev) + (adapter->native->ps_off + (afu->slice * adapter->ps_size)); |
569 | afu_desc = p2_base(dev) + adapter->afu_desc_off + (afu->slice * adapter->afu_desc_size); | 553 | afu_desc = p2_base(dev) + adapter->native->afu_desc_off + (afu->slice * adapter->native->afu_desc_size); |
570 | 554 | ||
571 | if (!(afu->p1n_mmio = ioremap(p1n_base, p1n_size))) | 555 | if (!(afu->native->p1n_mmio = ioremap(p1n_base, p1n_size))) |
572 | goto err; | 556 | goto err; |
573 | if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size))) | 557 | if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size))) |
574 | goto err1; | 558 | goto err1; |
575 | if (afu_desc) { | 559 | if (afu_desc) { |
576 | if (!(afu->afu_desc_mmio = ioremap(afu_desc, adapter->afu_desc_size))) | 560 | if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size))) |
577 | goto err2; | 561 | goto err2; |
578 | } | 562 | } |
579 | 563 | ||
@@ -581,62 +565,41 @@ static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct p | |||
581 | err2: | 565 | err2: |
582 | iounmap(afu->p2n_mmio); | 566 | iounmap(afu->p2n_mmio); |
583 | err1: | 567 | err1: |
584 | iounmap(afu->p1n_mmio); | 568 | iounmap(afu->native->p1n_mmio); |
585 | err: | 569 | err: |
586 | dev_err(&afu->dev, "Error mapping AFU MMIO regions\n"); | 570 | dev_err(&afu->dev, "Error mapping AFU MMIO regions\n"); |
587 | return -ENOMEM; | 571 | return -ENOMEM; |
588 | } | 572 | } |
589 | 573 | ||
590 | static void cxl_unmap_slice_regs(struct cxl_afu *afu) | 574 | static void pci_unmap_slice_regs(struct cxl_afu *afu) |
591 | { | 575 | { |
592 | if (afu->p2n_mmio) { | 576 | if (afu->p2n_mmio) { |
593 | iounmap(afu->p2n_mmio); | 577 | iounmap(afu->p2n_mmio); |
594 | afu->p2n_mmio = NULL; | 578 | afu->p2n_mmio = NULL; |
595 | } | 579 | } |
596 | if (afu->p1n_mmio) { | 580 | if (afu->native->p1n_mmio) { |
597 | iounmap(afu->p1n_mmio); | 581 | iounmap(afu->native->p1n_mmio); |
598 | afu->p1n_mmio = NULL; | 582 | afu->native->p1n_mmio = NULL; |
599 | } | 583 | } |
600 | if (afu->afu_desc_mmio) { | 584 | if (afu->native->afu_desc_mmio) { |
601 | iounmap(afu->afu_desc_mmio); | 585 | iounmap(afu->native->afu_desc_mmio); |
602 | afu->afu_desc_mmio = NULL; | 586 | afu->native->afu_desc_mmio = NULL; |
603 | } | 587 | } |
604 | } | 588 | } |
605 | 589 | ||
606 | static void cxl_release_afu(struct device *dev) | 590 | void cxl_pci_release_afu(struct device *dev) |
607 | { | 591 | { |
608 | struct cxl_afu *afu = to_cxl_afu(dev); | 592 | struct cxl_afu *afu = to_cxl_afu(dev); |
609 | 593 | ||
610 | pr_devel("cxl_release_afu\n"); | 594 | pr_devel("%s\n", __func__); |
611 | 595 | ||
612 | idr_destroy(&afu->contexts_idr); | 596 | idr_destroy(&afu->contexts_idr); |
613 | cxl_release_spa(afu); | 597 | cxl_release_spa(afu); |
614 | 598 | ||
599 | kfree(afu->native); | ||
615 | kfree(afu); | 600 | kfree(afu); |
616 | } | 601 | } |
617 | 602 | ||
618 | static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice) | ||
619 | { | ||
620 | struct cxl_afu *afu; | ||
621 | |||
622 | if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL))) | ||
623 | return NULL; | ||
624 | |||
625 | afu->adapter = adapter; | ||
626 | afu->dev.parent = &adapter->dev; | ||
627 | afu->dev.release = cxl_release_afu; | ||
628 | afu->slice = slice; | ||
629 | idr_init(&afu->contexts_idr); | ||
630 | mutex_init(&afu->contexts_lock); | ||
631 | spin_lock_init(&afu->afu_cntl_lock); | ||
632 | mutex_init(&afu->spa_mutex); | ||
633 | |||
634 | afu->prefault_mode = CXL_PREFAULT_NONE; | ||
635 | afu->irqs_max = afu->adapter->user_irqs; | ||
636 | |||
637 | return afu; | ||
638 | } | ||
639 | |||
640 | /* Expects AFU struct to have recently been zeroed out */ | 603 | /* Expects AFU struct to have recently been zeroed out */ |
641 | static int cxl_read_afu_descriptor(struct cxl_afu *afu) | 604 | static int cxl_read_afu_descriptor(struct cxl_afu *afu) |
642 | { | 605 | { |
@@ -658,7 +621,7 @@ static int cxl_read_afu_descriptor(struct cxl_afu *afu) | |||
658 | afu->pp_size = AFUD_PPPSA_LEN(val) * 4096; | 621 | afu->pp_size = AFUD_PPPSA_LEN(val) * 4096; |
659 | afu->psa = AFUD_PPPSA_PSA(val); | 622 | afu->psa = AFUD_PPPSA_PSA(val); |
660 | if ((afu->pp_psa = AFUD_PPPSA_PP(val))) | 623 | if ((afu->pp_psa = AFUD_PPPSA_PP(val))) |
661 | afu->pp_offset = AFUD_READ_PPPSA_OFF(afu); | 624 | afu->native->pp_offset = AFUD_READ_PPPSA_OFF(afu); |
662 | 625 | ||
663 | val = AFUD_READ_CR(afu); | 626 | val = AFUD_READ_CR(afu); |
664 | afu->crs_len = AFUD_CR_LEN(val) * 256; | 627 | afu->crs_len = AFUD_CR_LEN(val) * 256; |
@@ -685,10 +648,11 @@ static int cxl_read_afu_descriptor(struct cxl_afu *afu) | |||
685 | 648 | ||
686 | static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu) | 649 | static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu) |
687 | { | 650 | { |
688 | int i; | 651 | int i, rc; |
652 | u32 val; | ||
689 | 653 | ||
690 | if (afu->psa && afu->adapter->ps_size < | 654 | if (afu->psa && afu->adapter->ps_size < |
691 | (afu->pp_offset + afu->pp_size*afu->max_procs_virtualised)) { | 655 | (afu->native->pp_offset + afu->pp_size*afu->max_procs_virtualised)) { |
692 | dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n"); | 656 | dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n"); |
693 | return -ENODEV; | 657 | return -ENODEV; |
694 | } | 658 | } |
@@ -697,7 +661,8 @@ static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu) | |||
697 | dev_warn(&afu->dev, "AFU uses < PAGE_SIZE per-process PSA!"); | 661 | dev_warn(&afu->dev, "AFU uses < PAGE_SIZE per-process PSA!"); |
698 | 662 | ||
699 | for (i = 0; i < afu->crs_num; i++) { | 663 | for (i = 0; i < afu->crs_num; i++) { |
700 | if ((cxl_afu_cr_read32(afu, i, 0) == 0)) { | 664 | rc = cxl_ops->afu_cr_read32(afu, i, 0, &val); |
665 | if (rc || val == 0) { | ||
701 | dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i); | 666 | dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i); |
702 | return -EINVAL; | 667 | return -EINVAL; |
703 | } | 668 | } |
@@ -718,7 +683,7 @@ static int sanitise_afu_regs(struct cxl_afu *afu) | |||
718 | reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An); | 683 | reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An); |
719 | if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { | 684 | if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { |
720 | dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg); | 685 | dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg); |
721 | if (__cxl_afu_reset(afu)) | 686 | if (cxl_ops->afu_reset(afu)) |
722 | return -EIO; | 687 | return -EIO; |
723 | if (cxl_afu_disable(afu)) | 688 | if (cxl_afu_disable(afu)) |
724 | return -EIO; | 689 | return -EIO; |
@@ -766,13 +731,13 @@ static int sanitise_afu_regs(struct cxl_afu *afu) | |||
766 | * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte | 731 | * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte |
767 | * aligned the function uses a bounce buffer which can be max PAGE_SIZE. | 732 | * aligned the function uses a bounce buffer which can be max PAGE_SIZE. |
768 | */ | 733 | */ |
769 | ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf, | 734 | ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, |
770 | loff_t off, size_t count) | 735 | loff_t off, size_t count) |
771 | { | 736 | { |
772 | loff_t aligned_start, aligned_end; | 737 | loff_t aligned_start, aligned_end; |
773 | size_t aligned_length; | 738 | size_t aligned_length; |
774 | void *tbuf; | 739 | void *tbuf; |
775 | const void __iomem *ebuf = afu->afu_desc_mmio + afu->eb_offset; | 740 | const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset; |
776 | 741 | ||
777 | if (count == 0 || off < 0 || (size_t)off >= afu->eb_len) | 742 | if (count == 0 || off < 0 || (size_t)off >= afu->eb_len) |
778 | return 0; | 743 | return 0; |
@@ -803,18 +768,18 @@ ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf, | |||
803 | return count; | 768 | return count; |
804 | } | 769 | } |
805 | 770 | ||
806 | static int cxl_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev) | 771 | static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev) |
807 | { | 772 | { |
808 | int rc; | 773 | int rc; |
809 | 774 | ||
810 | if ((rc = cxl_map_slice_regs(afu, adapter, dev))) | 775 | if ((rc = pci_map_slice_regs(afu, adapter, dev))) |
811 | return rc; | 776 | return rc; |
812 | 777 | ||
813 | if ((rc = sanitise_afu_regs(afu))) | 778 | if ((rc = sanitise_afu_regs(afu))) |
814 | goto err1; | 779 | goto err1; |
815 | 780 | ||
816 | /* We need to reset the AFU before we can read the AFU descriptor */ | 781 | /* We need to reset the AFU before we can read the AFU descriptor */ |
817 | if ((rc = __cxl_afu_reset(afu))) | 782 | if ((rc = cxl_ops->afu_reset(afu))) |
818 | goto err1; | 783 | goto err1; |
819 | 784 | ||
820 | if (cxl_verbose) | 785 | if (cxl_verbose) |
@@ -829,44 +794,50 @@ static int cxl_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc | |||
829 | if ((rc = init_implementation_afu_regs(afu))) | 794 | if ((rc = init_implementation_afu_regs(afu))) |
830 | goto err1; | 795 | goto err1; |
831 | 796 | ||
832 | if ((rc = cxl_register_serr_irq(afu))) | 797 | if ((rc = cxl_native_register_serr_irq(afu))) |
833 | goto err1; | 798 | goto err1; |
834 | 799 | ||
835 | if ((rc = cxl_register_psl_irq(afu))) | 800 | if ((rc = cxl_native_register_psl_irq(afu))) |
836 | goto err2; | 801 | goto err2; |
837 | 802 | ||
838 | return 0; | 803 | return 0; |
839 | 804 | ||
840 | err2: | 805 | err2: |
841 | cxl_release_serr_irq(afu); | 806 | cxl_native_release_serr_irq(afu); |
842 | err1: | 807 | err1: |
843 | cxl_unmap_slice_regs(afu); | 808 | pci_unmap_slice_regs(afu); |
844 | return rc; | 809 | return rc; |
845 | } | 810 | } |
846 | 811 | ||
847 | static void cxl_deconfigure_afu(struct cxl_afu *afu) | 812 | static void pci_deconfigure_afu(struct cxl_afu *afu) |
848 | { | 813 | { |
849 | cxl_release_psl_irq(afu); | 814 | cxl_native_release_psl_irq(afu); |
850 | cxl_release_serr_irq(afu); | 815 | cxl_native_release_serr_irq(afu); |
851 | cxl_unmap_slice_regs(afu); | 816 | pci_unmap_slice_regs(afu); |
852 | } | 817 | } |
853 | 818 | ||
854 | static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) | 819 | static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) |
855 | { | 820 | { |
856 | struct cxl_afu *afu; | 821 | struct cxl_afu *afu; |
857 | int rc; | 822 | int rc = -ENOMEM; |
858 | 823 | ||
859 | afu = cxl_alloc_afu(adapter, slice); | 824 | afu = cxl_alloc_afu(adapter, slice); |
860 | if (!afu) | 825 | if (!afu) |
861 | return -ENOMEM; | 826 | return -ENOMEM; |
862 | 827 | ||
828 | afu->native = kzalloc(sizeof(struct cxl_afu_native), GFP_KERNEL); | ||
829 | if (!afu->native) | ||
830 | goto err_free_afu; | ||
831 | |||
832 | mutex_init(&afu->native->spa_mutex); | ||
833 | |||
863 | rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice); | 834 | rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice); |
864 | if (rc) | 835 | if (rc) |
865 | goto err_free; | 836 | goto err_free_native; |
866 | 837 | ||
867 | rc = cxl_configure_afu(afu, adapter, dev); | 838 | rc = pci_configure_afu(afu, adapter, dev); |
868 | if (rc) | 839 | if (rc) |
869 | goto err_free; | 840 | goto err_free_native; |
870 | 841 | ||
871 | /* Don't care if this fails */ | 842 | /* Don't care if this fails */ |
872 | cxl_debugfs_afu_add(afu); | 843 | cxl_debugfs_afu_add(afu); |
@@ -889,24 +860,27 @@ static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) | |||
889 | return 0; | 860 | return 0; |
890 | 861 | ||
891 | err_put1: | 862 | err_put1: |
892 | cxl_deconfigure_afu(afu); | 863 | pci_deconfigure_afu(afu); |
893 | cxl_debugfs_afu_remove(afu); | 864 | cxl_debugfs_afu_remove(afu); |
894 | device_unregister(&afu->dev); | 865 | device_unregister(&afu->dev); |
895 | return rc; | 866 | return rc; |
896 | 867 | ||
897 | err_free: | 868 | err_free_native: |
869 | kfree(afu->native); | ||
870 | err_free_afu: | ||
898 | kfree(afu); | 871 | kfree(afu); |
899 | return rc; | 872 | return rc; |
900 | 873 | ||
901 | } | 874 | } |
902 | 875 | ||
903 | static void cxl_remove_afu(struct cxl_afu *afu) | 876 | static void cxl_pci_remove_afu(struct cxl_afu *afu) |
904 | { | 877 | { |
905 | pr_devel("cxl_remove_afu\n"); | 878 | pr_devel("%s\n", __func__); |
906 | 879 | ||
907 | if (!afu) | 880 | if (!afu) |
908 | return; | 881 | return; |
909 | 882 | ||
883 | cxl_pci_vphb_remove(afu); | ||
910 | cxl_sysfs_afu_remove(afu); | 884 | cxl_sysfs_afu_remove(afu); |
911 | cxl_debugfs_afu_remove(afu); | 885 | cxl_debugfs_afu_remove(afu); |
912 | 886 | ||
@@ -915,13 +889,13 @@ static void cxl_remove_afu(struct cxl_afu *afu) | |||
915 | spin_unlock(&afu->adapter->afu_list_lock); | 889 | spin_unlock(&afu->adapter->afu_list_lock); |
916 | 890 | ||
917 | cxl_context_detach_all(afu); | 891 | cxl_context_detach_all(afu); |
918 | cxl_afu_deactivate_mode(afu); | 892 | cxl_ops->afu_deactivate_mode(afu, afu->current_mode); |
919 | 893 | ||
920 | cxl_deconfigure_afu(afu); | 894 | pci_deconfigure_afu(afu); |
921 | device_unregister(&afu->dev); | 895 | device_unregister(&afu->dev); |
922 | } | 896 | } |
923 | 897 | ||
924 | int cxl_reset(struct cxl *adapter) | 898 | int cxl_pci_reset(struct cxl *adapter) |
925 | { | 899 | { |
926 | struct pci_dev *dev = to_pci_dev(adapter->dev.parent); | 900 | struct pci_dev *dev = to_pci_dev(adapter->dev.parent); |
927 | int rc; | 901 | int rc; |
@@ -955,17 +929,17 @@ static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev) | |||
955 | pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx", | 929 | pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx", |
956 | p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev)); | 930 | p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev)); |
957 | 931 | ||
958 | if (!(adapter->p1_mmio = ioremap(p1_base(dev), p1_size(dev)))) | 932 | if (!(adapter->native->p1_mmio = ioremap(p1_base(dev), p1_size(dev)))) |
959 | goto err3; | 933 | goto err3; |
960 | 934 | ||
961 | if (!(adapter->p2_mmio = ioremap(p2_base(dev), p2_size(dev)))) | 935 | if (!(adapter->native->p2_mmio = ioremap(p2_base(dev), p2_size(dev)))) |
962 | goto err4; | 936 | goto err4; |
963 | 937 | ||
964 | return 0; | 938 | return 0; |
965 | 939 | ||
966 | err4: | 940 | err4: |
967 | iounmap(adapter->p1_mmio); | 941 | iounmap(adapter->native->p1_mmio); |
968 | adapter->p1_mmio = NULL; | 942 | adapter->native->p1_mmio = NULL; |
969 | err3: | 943 | err3: |
970 | pci_release_region(dev, 0); | 944 | pci_release_region(dev, 0); |
971 | err2: | 945 | err2: |
@@ -976,14 +950,14 @@ err1: | |||
976 | 950 | ||
977 | static void cxl_unmap_adapter_regs(struct cxl *adapter) | 951 | static void cxl_unmap_adapter_regs(struct cxl *adapter) |
978 | { | 952 | { |
979 | if (adapter->p1_mmio) { | 953 | if (adapter->native->p1_mmio) { |
980 | iounmap(adapter->p1_mmio); | 954 | iounmap(adapter->native->p1_mmio); |
981 | adapter->p1_mmio = NULL; | 955 | adapter->native->p1_mmio = NULL; |
982 | pci_release_region(to_pci_dev(adapter->dev.parent), 2); | 956 | pci_release_region(to_pci_dev(adapter->dev.parent), 2); |
983 | } | 957 | } |
984 | if (adapter->p2_mmio) { | 958 | if (adapter->native->p2_mmio) { |
985 | iounmap(adapter->p2_mmio); | 959 | iounmap(adapter->native->p2_mmio); |
986 | adapter->p2_mmio = NULL; | 960 | adapter->native->p2_mmio = NULL; |
987 | pci_release_region(to_pci_dev(adapter->dev.parent), 0); | 961 | pci_release_region(to_pci_dev(adapter->dev.parent), 0); |
988 | } | 962 | } |
989 | } | 963 | } |
@@ -1024,10 +998,10 @@ static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev) | |||
1024 | 998 | ||
1025 | /* Convert everything to bytes, because there is NO WAY I'd look at the | 999 | /* Convert everything to bytes, because there is NO WAY I'd look at the |
1026 | * code a month later and forget what units these are in ;-) */ | 1000 | * code a month later and forget what units these are in ;-) */ |
1027 | adapter->ps_off = ps_off * 64 * 1024; | 1001 | adapter->native->ps_off = ps_off * 64 * 1024; |
1028 | adapter->ps_size = ps_size * 64 * 1024; | 1002 | adapter->ps_size = ps_size * 64 * 1024; |
1029 | adapter->afu_desc_off = afu_desc_off * 64 * 1024; | 1003 | adapter->native->afu_desc_off = afu_desc_off * 64 * 1024; |
1030 | adapter->afu_desc_size = afu_desc_size *64 * 1024; | 1004 | adapter->native->afu_desc_size = afu_desc_size * 64 * 1024; |
1031 | 1005 | ||
1032 | /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */ | 1006 | /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */ |
1033 | adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices; | 1007 | adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices; |
@@ -1078,21 +1052,26 @@ static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev) | |||
1078 | return -EINVAL; | 1052 | return -EINVAL; |
1079 | } | 1053 | } |
1080 | 1054 | ||
1081 | if (!adapter->afu_desc_off || !adapter->afu_desc_size) { | 1055 | if (!adapter->native->afu_desc_off || !adapter->native->afu_desc_size) { |
1082 | dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n"); | 1056 | dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n"); |
1083 | return -EINVAL; | 1057 | return -EINVAL; |
1084 | } | 1058 | } |
1085 | 1059 | ||
1086 | if (adapter->ps_size > p2_size(dev) - adapter->ps_off) { | 1060 | if (adapter->ps_size > p2_size(dev) - adapter->native->ps_off) { |
1087 | dev_err(&dev->dev, "ABORTING: Problem state size larger than " | 1061 | dev_err(&dev->dev, "ABORTING: Problem state size larger than " |
1088 | "available in BAR2: 0x%llx > 0x%llx\n", | 1062 | "available in BAR2: 0x%llx > 0x%llx\n", |
1089 | adapter->ps_size, p2_size(dev) - adapter->ps_off); | 1063 | adapter->ps_size, p2_size(dev) - adapter->native->ps_off); |
1090 | return -EINVAL; | 1064 | return -EINVAL; |
1091 | } | 1065 | } |
1092 | 1066 | ||
1093 | return 0; | 1067 | return 0; |
1094 | } | 1068 | } |
1095 | 1069 | ||
1070 | ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len) | ||
1071 | { | ||
1072 | return pci_read_vpd(to_pci_dev(adapter->dev.parent), 0, len, buf); | ||
1073 | } | ||
1074 | |||
1096 | static void cxl_release_adapter(struct device *dev) | 1075 | static void cxl_release_adapter(struct device *dev) |
1097 | { | 1076 | { |
1098 | struct cxl *adapter = to_cxl_adapter(dev); | 1077 | struct cxl *adapter = to_cxl_adapter(dev); |
@@ -1101,33 +1080,10 @@ static void cxl_release_adapter(struct device *dev) | |||
1101 | 1080 | ||
1102 | cxl_remove_adapter_nr(adapter); | 1081 | cxl_remove_adapter_nr(adapter); |
1103 | 1082 | ||
1083 | kfree(adapter->native); | ||
1104 | kfree(adapter); | 1084 | kfree(adapter); |
1105 | } | 1085 | } |
1106 | 1086 | ||
1107 | static struct cxl *cxl_alloc_adapter(void) | ||
1108 | { | ||
1109 | struct cxl *adapter; | ||
1110 | |||
1111 | if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL))) | ||
1112 | return NULL; | ||
1113 | |||
1114 | spin_lock_init(&adapter->afu_list_lock); | ||
1115 | |||
1116 | if (cxl_alloc_adapter_nr(adapter)) | ||
1117 | goto err1; | ||
1118 | |||
1119 | if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)) | ||
1120 | goto err2; | ||
1121 | |||
1122 | return adapter; | ||
1123 | |||
1124 | err2: | ||
1125 | cxl_remove_adapter_nr(adapter); | ||
1126 | err1: | ||
1127 | kfree(adapter); | ||
1128 | return NULL; | ||
1129 | } | ||
1130 | |||
1131 | #define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31)) | 1087 | #define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31)) |
1132 | 1088 | ||
1133 | static int sanitise_adapter_regs(struct cxl *adapter) | 1089 | static int sanitise_adapter_regs(struct cxl *adapter) |
@@ -1191,7 +1147,7 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) | |||
1191 | if ((rc = cxl_setup_psl_timebase(adapter, dev))) | 1147 | if ((rc = cxl_setup_psl_timebase(adapter, dev))) |
1192 | goto err; | 1148 | goto err; |
1193 | 1149 | ||
1194 | if ((rc = cxl_register_psl_err_irq(adapter))) | 1150 | if ((rc = cxl_native_register_psl_err_irq(adapter))) |
1195 | goto err; | 1151 | goto err; |
1196 | 1152 | ||
1197 | return 0; | 1153 | return 0; |
@@ -1206,13 +1162,13 @@ static void cxl_deconfigure_adapter(struct cxl *adapter) | |||
1206 | { | 1162 | { |
1207 | struct pci_dev *pdev = to_pci_dev(adapter->dev.parent); | 1163 | struct pci_dev *pdev = to_pci_dev(adapter->dev.parent); |
1208 | 1164 | ||
1209 | cxl_release_psl_err_irq(adapter); | 1165 | cxl_native_release_psl_err_irq(adapter); |
1210 | cxl_unmap_adapter_regs(adapter); | 1166 | cxl_unmap_adapter_regs(adapter); |
1211 | 1167 | ||
1212 | pci_disable_device(pdev); | 1168 | pci_disable_device(pdev); |
1213 | } | 1169 | } |
1214 | 1170 | ||
1215 | static struct cxl *cxl_init_adapter(struct pci_dev *dev) | 1171 | static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev) |
1216 | { | 1172 | { |
1217 | struct cxl *adapter; | 1173 | struct cxl *adapter; |
1218 | int rc; | 1174 | int rc; |
@@ -1221,6 +1177,12 @@ static struct cxl *cxl_init_adapter(struct pci_dev *dev) | |||
1221 | if (!adapter) | 1177 | if (!adapter) |
1222 | return ERR_PTR(-ENOMEM); | 1178 | return ERR_PTR(-ENOMEM); |
1223 | 1179 | ||
1180 | adapter->native = kzalloc(sizeof(struct cxl_native), GFP_KERNEL); | ||
1181 | if (!adapter->native) { | ||
1182 | rc = -ENOMEM; | ||
1183 | goto err_release; | ||
1184 | } | ||
1185 | |||
1224 | /* Set defaults for parameters which need to persist over | 1186 | /* Set defaults for parameters which need to persist over |
1225 | * configure/reconfigure | 1187 | * configure/reconfigure |
1226 | */ | 1188 | */ |
@@ -1230,8 +1192,7 @@ static struct cxl *cxl_init_adapter(struct pci_dev *dev) | |||
1230 | rc = cxl_configure_adapter(adapter, dev); | 1192 | rc = cxl_configure_adapter(adapter, dev); |
1231 | if (rc) { | 1193 | if (rc) { |
1232 | pci_disable_device(dev); | 1194 | pci_disable_device(dev); |
1233 | cxl_release_adapter(&adapter->dev); | 1195 | goto err_release; |
1234 | return ERR_PTR(rc); | ||
1235 | } | 1196 | } |
1236 | 1197 | ||
1237 | /* Don't care if this one fails: */ | 1198 | /* Don't care if this one fails: */ |
@@ -1257,9 +1218,13 @@ err_put1: | |||
1257 | cxl_deconfigure_adapter(adapter); | 1218 | cxl_deconfigure_adapter(adapter); |
1258 | device_unregister(&adapter->dev); | 1219 | device_unregister(&adapter->dev); |
1259 | return ERR_PTR(rc); | 1220 | return ERR_PTR(rc); |
1221 | |||
1222 | err_release: | ||
1223 | cxl_release_adapter(&adapter->dev); | ||
1224 | return ERR_PTR(rc); | ||
1260 | } | 1225 | } |
1261 | 1226 | ||
1262 | static void cxl_remove_adapter(struct cxl *adapter) | 1227 | static void cxl_pci_remove_adapter(struct cxl *adapter) |
1263 | { | 1228 | { |
1264 | pr_devel("cxl_remove_adapter\n"); | 1229 | pr_devel("cxl_remove_adapter\n"); |
1265 | 1230 | ||
@@ -1277,17 +1242,22 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
1277 | int slice; | 1242 | int slice; |
1278 | int rc; | 1243 | int rc; |
1279 | 1244 | ||
1245 | if (cxl_pci_is_vphb_device(dev)) { | ||
1246 | dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n"); | ||
1247 | return -ENODEV; | ||
1248 | } | ||
1249 | |||
1280 | if (cxl_verbose) | 1250 | if (cxl_verbose) |
1281 | dump_cxl_config_space(dev); | 1251 | dump_cxl_config_space(dev); |
1282 | 1252 | ||
1283 | adapter = cxl_init_adapter(dev); | 1253 | adapter = cxl_pci_init_adapter(dev); |
1284 | if (IS_ERR(adapter)) { | 1254 | if (IS_ERR(adapter)) { |
1285 | dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter)); | 1255 | dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter)); |
1286 | return PTR_ERR(adapter); | 1256 | return PTR_ERR(adapter); |
1287 | } | 1257 | } |
1288 | 1258 | ||
1289 | for (slice = 0; slice < adapter->slices; slice++) { | 1259 | for (slice = 0; slice < adapter->slices; slice++) { |
1290 | if ((rc = cxl_init_afu(adapter, slice, dev))) { | 1260 | if ((rc = pci_init_afu(adapter, slice, dev))) { |
1291 | dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc); | 1261 | dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc); |
1292 | continue; | 1262 | continue; |
1293 | } | 1263 | } |
@@ -1312,10 +1282,9 @@ static void cxl_remove(struct pci_dev *dev) | |||
1312 | */ | 1282 | */ |
1313 | for (i = 0; i < adapter->slices; i++) { | 1283 | for (i = 0; i < adapter->slices; i++) { |
1314 | afu = adapter->afu[i]; | 1284 | afu = adapter->afu[i]; |
1315 | cxl_pci_vphb_remove(afu); | 1285 | cxl_pci_remove_afu(afu); |
1316 | cxl_remove_afu(afu); | ||
1317 | } | 1286 | } |
1318 | cxl_remove_adapter(adapter); | 1287 | cxl_pci_remove_adapter(adapter); |
1319 | } | 1288 | } |
1320 | 1289 | ||
1321 | static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu, | 1290 | static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu, |
@@ -1461,8 +1430,8 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, | |||
1461 | return result; | 1430 | return result; |
1462 | 1431 | ||
1463 | cxl_context_detach_all(afu); | 1432 | cxl_context_detach_all(afu); |
1464 | cxl_afu_deactivate_mode(afu); | 1433 | cxl_ops->afu_deactivate_mode(afu, afu->current_mode); |
1465 | cxl_deconfigure_afu(afu); | 1434 | pci_deconfigure_afu(afu); |
1466 | } | 1435 | } |
1467 | cxl_deconfigure_adapter(adapter); | 1436 | cxl_deconfigure_adapter(adapter); |
1468 | 1437 | ||
@@ -1485,14 +1454,12 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev) | |||
1485 | for (i = 0; i < adapter->slices; i++) { | 1454 | for (i = 0; i < adapter->slices; i++) { |
1486 | afu = adapter->afu[i]; | 1455 | afu = adapter->afu[i]; |
1487 | 1456 | ||
1488 | if (cxl_configure_afu(afu, adapter, pdev)) | 1457 | if (pci_configure_afu(afu, adapter, pdev)) |
1489 | goto err; | 1458 | goto err; |
1490 | 1459 | ||
1491 | if (cxl_afu_select_best_mode(afu)) | 1460 | if (cxl_afu_select_best_mode(afu)) |
1492 | goto err; | 1461 | goto err; |
1493 | 1462 | ||
1494 | cxl_pci_vphb_reconfigure(afu); | ||
1495 | |||
1496 | list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { | 1463 | list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { |
1497 | /* Reset the device context. | 1464 | /* Reset the device context. |
1498 | * TODO: make this less disruptive | 1465 | * TODO: make this less disruptive |
@@ -1508,7 +1475,7 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev) | |||
1508 | 1475 | ||
1509 | afu_dev->dev.archdata.cxl_ctx = ctx; | 1476 | afu_dev->dev.archdata.cxl_ctx = ctx; |
1510 | 1477 | ||
1511 | if (cxl_afu_check_and_enable(afu)) | 1478 | if (cxl_ops->afu_check_and_enable(afu)) |
1512 | goto err; | 1479 | goto err; |
1513 | 1480 | ||
1514 | afu_dev->error_state = pci_channel_io_normal; | 1481 | afu_dev->error_state = pci_channel_io_normal; |
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index 038af5d45145..25913c08794c 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c | |||
@@ -69,7 +69,7 @@ static ssize_t reset_adapter_store(struct device *device, | |||
69 | if ((rc != 1) || (val != 1)) | 69 | if ((rc != 1) || (val != 1)) |
70 | return -EINVAL; | 70 | return -EINVAL; |
71 | 71 | ||
72 | if ((rc = cxl_reset(adapter))) | 72 | if ((rc = cxl_ops->adapter_reset(adapter))) |
73 | return rc; | 73 | return rc; |
74 | return count; | 74 | return count; |
75 | } | 75 | } |
@@ -165,7 +165,7 @@ static ssize_t pp_mmio_off_show(struct device *device, | |||
165 | { | 165 | { |
166 | struct cxl_afu *afu = to_afu_chardev_m(device); | 166 | struct cxl_afu *afu = to_afu_chardev_m(device); |
167 | 167 | ||
168 | return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_offset); | 168 | return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset); |
169 | } | 169 | } |
170 | 170 | ||
171 | static ssize_t pp_mmio_len_show(struct device *device, | 171 | static ssize_t pp_mmio_len_show(struct device *device, |
@@ -211,7 +211,7 @@ static ssize_t reset_store_afu(struct device *device, | |||
211 | goto err; | 211 | goto err; |
212 | } | 212 | } |
213 | 213 | ||
214 | if ((rc = __cxl_afu_reset(afu))) | 214 | if ((rc = cxl_ops->afu_reset(afu))) |
215 | goto err; | 215 | goto err; |
216 | 216 | ||
217 | rc = count; | 217 | rc = count; |
@@ -253,8 +253,14 @@ static ssize_t irqs_max_store(struct device *device, | |||
253 | if (irqs_max < afu->pp_irqs) | 253 | if (irqs_max < afu->pp_irqs) |
254 | return -EINVAL; | 254 | return -EINVAL; |
255 | 255 | ||
256 | if (irqs_max > afu->adapter->user_irqs) | 256 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
257 | return -EINVAL; | 257 | if (irqs_max > afu->adapter->user_irqs) |
258 | return -EINVAL; | ||
259 | } else { | ||
260 | /* pHyp sets a per-AFU limit */ | ||
261 | if (irqs_max > afu->guest->max_ints) | ||
262 | return -EINVAL; | ||
263 | } | ||
258 | 264 | ||
259 | afu->irqs_max = irqs_max; | 265 | afu->irqs_max = irqs_max; |
260 | return count; | 266 | return count; |
@@ -348,7 +354,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr, | |||
348 | } | 354 | } |
349 | 355 | ||
350 | /* | 356 | /* |
351 | * cxl_afu_deactivate_mode needs to be done outside the lock, prevent | 357 | * afu_deactivate_mode needs to be done outside the lock, prevent |
352 | * other contexts coming in before we are ready: | 358 | * other contexts coming in before we are ready: |
353 | */ | 359 | */ |
354 | old_mode = afu->current_mode; | 360 | old_mode = afu->current_mode; |
@@ -357,9 +363,9 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr, | |||
357 | 363 | ||
358 | mutex_unlock(&afu->contexts_lock); | 364 | mutex_unlock(&afu->contexts_lock); |
359 | 365 | ||
360 | if ((rc = _cxl_afu_deactivate_mode(afu, old_mode))) | 366 | if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode))) |
361 | return rc; | 367 | return rc; |
362 | if ((rc = cxl_afu_activate_mode(afu, mode))) | 368 | if ((rc = cxl_ops->afu_activate_mode(afu, mode))) |
363 | return rc; | 369 | return rc; |
364 | 370 | ||
365 | return count; | 371 | return count; |
@@ -388,7 +394,7 @@ static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj, | |||
388 | { | 394 | { |
389 | struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj)); | 395 | struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj)); |
390 | 396 | ||
391 | return cxl_afu_read_err_buffer(afu, buf, off, count); | 397 | return cxl_ops->afu_read_err_buffer(afu, buf, off, count); |
392 | } | 398 | } |
393 | 399 | ||
394 | static struct device_attribute afu_attrs[] = { | 400 | static struct device_attribute afu_attrs[] = { |
@@ -405,24 +411,39 @@ static struct device_attribute afu_attrs[] = { | |||
405 | 411 | ||
406 | int cxl_sysfs_adapter_add(struct cxl *adapter) | 412 | int cxl_sysfs_adapter_add(struct cxl *adapter) |
407 | { | 413 | { |
414 | struct device_attribute *dev_attr; | ||
408 | int i, rc; | 415 | int i, rc; |
409 | 416 | ||
410 | for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) { | 417 | for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) { |
411 | if ((rc = device_create_file(&adapter->dev, &adapter_attrs[i]))) | 418 | dev_attr = &adapter_attrs[i]; |
412 | goto err; | 419 | if (cxl_ops->support_attributes(dev_attr->attr.name, |
420 | CXL_ADAPTER_ATTRS)) { | ||
421 | if ((rc = device_create_file(&adapter->dev, dev_attr))) | ||
422 | goto err; | ||
423 | } | ||
413 | } | 424 | } |
414 | return 0; | 425 | return 0; |
415 | err: | 426 | err: |
416 | for (i--; i >= 0; i--) | 427 | for (i--; i >= 0; i--) { |
417 | device_remove_file(&adapter->dev, &adapter_attrs[i]); | 428 | dev_attr = &adapter_attrs[i]; |
429 | if (cxl_ops->support_attributes(dev_attr->attr.name, | ||
430 | CXL_ADAPTER_ATTRS)) | ||
431 | device_remove_file(&adapter->dev, dev_attr); | ||
432 | } | ||
418 | return rc; | 433 | return rc; |
419 | } | 434 | } |
435 | |||
420 | void cxl_sysfs_adapter_remove(struct cxl *adapter) | 436 | void cxl_sysfs_adapter_remove(struct cxl *adapter) |
421 | { | 437 | { |
438 | struct device_attribute *dev_attr; | ||
422 | int i; | 439 | int i; |
423 | 440 | ||
424 | for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) | 441 | for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) { |
425 | device_remove_file(&adapter->dev, &adapter_attrs[i]); | 442 | dev_attr = &adapter_attrs[i]; |
443 | if (cxl_ops->support_attributes(dev_attr->attr.name, | ||
444 | CXL_ADAPTER_ATTRS)) | ||
445 | device_remove_file(&adapter->dev, dev_attr); | ||
446 | } | ||
426 | } | 447 | } |
427 | 448 | ||
428 | struct afu_config_record { | 449 | struct afu_config_record { |
@@ -468,10 +489,12 @@ static ssize_t afu_read_config(struct file *filp, struct kobject *kobj, | |||
468 | struct afu_config_record *cr = to_cr(kobj); | 489 | struct afu_config_record *cr = to_cr(kobj); |
469 | struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent)); | 490 | struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent)); |
470 | 491 | ||
471 | u64 i, j, val; | 492 | u64 i, j, val, rc; |
472 | 493 | ||
473 | for (i = 0; i < count;) { | 494 | for (i = 0; i < count;) { |
474 | val = cxl_afu_cr_read64(afu, cr->cr, off & ~0x7); | 495 | rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val); |
496 | if (rc) | ||
497 | val = ~0ULL; | ||
475 | for (j = off & 0x7; j < 8 && i < count; i++, j++, off++) | 498 | for (j = off & 0x7; j < 8 && i < count; i++, j++, off++) |
476 | buf[i] = (val >> (j * 8)) & 0xff; | 499 | buf[i] = (val >> (j * 8)) & 0xff; |
477 | } | 500 | } |
@@ -516,14 +539,22 @@ static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int c | |||
516 | return ERR_PTR(-ENOMEM); | 539 | return ERR_PTR(-ENOMEM); |
517 | 540 | ||
518 | cr->cr = cr_idx; | 541 | cr->cr = cr_idx; |
519 | cr->device = cxl_afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID); | 542 | |
520 | cr->vendor = cxl_afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID); | 543 | rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device); |
521 | cr->class = cxl_afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION) >> 8; | 544 | if (rc) |
545 | goto err; | ||
546 | rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor); | ||
547 | if (rc) | ||
548 | goto err; | ||
549 | rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class); | ||
550 | if (rc) | ||
551 | goto err; | ||
552 | cr->class >>= 8; | ||
522 | 553 | ||
523 | /* | 554 | /* |
524 | * Export raw AFU PCIe like config record. For now this is read only by | 555 | * Export raw AFU PCIe like config record. For now this is read only by |
525 | * root - we can expand that later to be readable by non-root and maybe | 556 | * root - we can expand that later to be readable by non-root and maybe |
526 | * even writable provided we have a good use-case. Once we suport | 557 | * even writable provided we have a good use-case. Once we support |
527 | * exposing AFUs through a virtual PHB they will get that for free from | 558 | * exposing AFUs through a virtual PHB they will get that for free from |
528 | * Linux' PCI infrastructure, but until then it's not clear that we | 559 | * Linux' PCI infrastructure, but until then it's not clear that we |
529 | * need it for anything since the main use case is just identifying | 560 | * need it for anything since the main use case is just identifying |
@@ -561,6 +592,7 @@ err: | |||
561 | 592 | ||
562 | void cxl_sysfs_afu_remove(struct cxl_afu *afu) | 593 | void cxl_sysfs_afu_remove(struct cxl_afu *afu) |
563 | { | 594 | { |
595 | struct device_attribute *dev_attr; | ||
564 | struct afu_config_record *cr, *tmp; | 596 | struct afu_config_record *cr, *tmp; |
565 | int i; | 597 | int i; |
566 | 598 | ||
@@ -568,8 +600,12 @@ void cxl_sysfs_afu_remove(struct cxl_afu *afu) | |||
568 | if (afu->eb_len) | 600 | if (afu->eb_len) |
569 | device_remove_bin_file(&afu->dev, &afu->attr_eb); | 601 | device_remove_bin_file(&afu->dev, &afu->attr_eb); |
570 | 602 | ||
571 | for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) | 603 | for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) { |
572 | device_remove_file(&afu->dev, &afu_attrs[i]); | 604 | dev_attr = &afu_attrs[i]; |
605 | if (cxl_ops->support_attributes(dev_attr->attr.name, | ||
606 | CXL_AFU_ATTRS)) | ||
607 | device_remove_file(&afu->dev, &afu_attrs[i]); | ||
608 | } | ||
573 | 609 | ||
574 | list_for_each_entry_safe(cr, tmp, &afu->crs, list) { | 610 | list_for_each_entry_safe(cr, tmp, &afu->crs, list) { |
575 | sysfs_remove_bin_file(&cr->kobj, &cr->config_attr); | 611 | sysfs_remove_bin_file(&cr->kobj, &cr->config_attr); |
@@ -579,14 +615,19 @@ void cxl_sysfs_afu_remove(struct cxl_afu *afu) | |||
579 | 615 | ||
580 | int cxl_sysfs_afu_add(struct cxl_afu *afu) | 616 | int cxl_sysfs_afu_add(struct cxl_afu *afu) |
581 | { | 617 | { |
618 | struct device_attribute *dev_attr; | ||
582 | struct afu_config_record *cr; | 619 | struct afu_config_record *cr; |
583 | int i, rc; | 620 | int i, rc; |
584 | 621 | ||
585 | INIT_LIST_HEAD(&afu->crs); | 622 | INIT_LIST_HEAD(&afu->crs); |
586 | 623 | ||
587 | for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) { | 624 | for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) { |
588 | if ((rc = device_create_file(&afu->dev, &afu_attrs[i]))) | 625 | dev_attr = &afu_attrs[i]; |
589 | goto err; | 626 | if (cxl_ops->support_attributes(dev_attr->attr.name, |
627 | CXL_AFU_ATTRS)) { | ||
628 | if ((rc = device_create_file(&afu->dev, &afu_attrs[i]))) | ||
629 | goto err; | ||
630 | } | ||
590 | } | 631 | } |
591 | 632 | ||
592 | /* conditionally create the add the binary file for error info buffer */ | 633 | /* conditionally create the add the binary file for error info buffer */ |
@@ -625,32 +666,50 @@ err: | |||
625 | /* reset the eb_len as we havent created the bin attr */ | 666 | /* reset the eb_len as we havent created the bin attr */ |
626 | afu->eb_len = 0; | 667 | afu->eb_len = 0; |
627 | 668 | ||
628 | for (i--; i >= 0; i--) | 669 | for (i--; i >= 0; i--) { |
670 | dev_attr = &afu_attrs[i]; | ||
671 | if (cxl_ops->support_attributes(dev_attr->attr.name, | ||
672 | CXL_AFU_ATTRS)) | ||
629 | device_remove_file(&afu->dev, &afu_attrs[i]); | 673 | device_remove_file(&afu->dev, &afu_attrs[i]); |
674 | } | ||
630 | return rc; | 675 | return rc; |
631 | } | 676 | } |
632 | 677 | ||
633 | int cxl_sysfs_afu_m_add(struct cxl_afu *afu) | 678 | int cxl_sysfs_afu_m_add(struct cxl_afu *afu) |
634 | { | 679 | { |
680 | struct device_attribute *dev_attr; | ||
635 | int i, rc; | 681 | int i, rc; |
636 | 682 | ||
637 | for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) { | 683 | for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) { |
638 | if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i]))) | 684 | dev_attr = &afu_master_attrs[i]; |
639 | goto err; | 685 | if (cxl_ops->support_attributes(dev_attr->attr.name, |
686 | CXL_AFU_MASTER_ATTRS)) { | ||
687 | if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i]))) | ||
688 | goto err; | ||
689 | } | ||
640 | } | 690 | } |
641 | 691 | ||
642 | return 0; | 692 | return 0; |
643 | 693 | ||
644 | err: | 694 | err: |
645 | for (i--; i >= 0; i--) | 695 | for (i--; i >= 0; i--) { |
646 | device_remove_file(afu->chardev_m, &afu_master_attrs[i]); | 696 | dev_attr = &afu_master_attrs[i]; |
697 | if (cxl_ops->support_attributes(dev_attr->attr.name, | ||
698 | CXL_AFU_MASTER_ATTRS)) | ||
699 | device_remove_file(afu->chardev_m, &afu_master_attrs[i]); | ||
700 | } | ||
647 | return rc; | 701 | return rc; |
648 | } | 702 | } |
649 | 703 | ||
650 | void cxl_sysfs_afu_m_remove(struct cxl_afu *afu) | 704 | void cxl_sysfs_afu_m_remove(struct cxl_afu *afu) |
651 | { | 705 | { |
706 | struct device_attribute *dev_attr; | ||
652 | int i; | 707 | int i; |
653 | 708 | ||
654 | for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) | 709 | for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) { |
655 | device_remove_file(afu->chardev_m, &afu_master_attrs[i]); | 710 | dev_attr = &afu_master_attrs[i]; |
711 | if (cxl_ops->support_attributes(dev_attr->attr.name, | ||
712 | CXL_AFU_MASTER_ATTRS)) | ||
713 | device_remove_file(afu->chardev_m, &afu_master_attrs[i]); | ||
714 | } | ||
656 | } | 715 | } |
diff --git a/drivers/misc/cxl/trace.h b/drivers/misc/cxl/trace.h index 6e1e2adfba8e..751d6119683e 100644 --- a/drivers/misc/cxl/trace.h +++ b/drivers/misc/cxl/trace.h | |||
@@ -450,6 +450,199 @@ DEFINE_EVENT(cxl_pe_class, cxl_slbia, | |||
450 | TP_ARGS(ctx) | 450 | TP_ARGS(ctx) |
451 | ); | 451 | ); |
452 | 452 | ||
453 | TRACE_EVENT(cxl_hcall, | ||
454 | TP_PROTO(u64 unit_address, u64 process_token, long rc), | ||
455 | |||
456 | TP_ARGS(unit_address, process_token, rc), | ||
457 | |||
458 | TP_STRUCT__entry( | ||
459 | __field(u64, unit_address) | ||
460 | __field(u64, process_token) | ||
461 | __field(long, rc) | ||
462 | ), | ||
463 | |||
464 | TP_fast_assign( | ||
465 | __entry->unit_address = unit_address; | ||
466 | __entry->process_token = process_token; | ||
467 | __entry->rc = rc; | ||
468 | ), | ||
469 | |||
470 | TP_printk("unit_address=0x%016llx process_token=0x%016llx rc=%li", | ||
471 | __entry->unit_address, | ||
472 | __entry->process_token, | ||
473 | __entry->rc | ||
474 | ) | ||
475 | ); | ||
476 | |||
477 | TRACE_EVENT(cxl_hcall_control, | ||
478 | TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3, | ||
479 | u64 p4, unsigned long r4, long rc), | ||
480 | |||
481 | TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc), | ||
482 | |||
483 | TP_STRUCT__entry( | ||
484 | __field(u64, unit_address) | ||
485 | __field(char *, fct) | ||
486 | __field(u64, p1) | ||
487 | __field(u64, p2) | ||
488 | __field(u64, p3) | ||
489 | __field(u64, p4) | ||
490 | __field(unsigned long, r4) | ||
491 | __field(long, rc) | ||
492 | ), | ||
493 | |||
494 | TP_fast_assign( | ||
495 | __entry->unit_address = unit_address; | ||
496 | __entry->fct = fct; | ||
497 | __entry->p1 = p1; | ||
498 | __entry->p2 = p2; | ||
499 | __entry->p3 = p3; | ||
500 | __entry->p4 = p4; | ||
501 | __entry->r4 = r4; | ||
502 | __entry->rc = rc; | ||
503 | ), | ||
504 | |||
505 | TP_printk("unit_address=%#.16llx %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li", | ||
506 | __entry->unit_address, | ||
507 | __entry->fct, | ||
508 | __entry->p1, | ||
509 | __entry->p2, | ||
510 | __entry->p3, | ||
511 | __entry->p4, | ||
512 | __entry->r4, | ||
513 | __entry->rc | ||
514 | ) | ||
515 | ); | ||
516 | |||
517 | TRACE_EVENT(cxl_hcall_attach, | ||
518 | TP_PROTO(u64 unit_address, u64 phys_addr, unsigned long process_token, | ||
519 | unsigned long mmio_addr, unsigned long mmio_size, long rc), | ||
520 | |||
521 | TP_ARGS(unit_address, phys_addr, process_token, | ||
522 | mmio_addr, mmio_size, rc), | ||
523 | |||
524 | TP_STRUCT__entry( | ||
525 | __field(u64, unit_address) | ||
526 | __field(u64, phys_addr) | ||
527 | __field(unsigned long, process_token) | ||
528 | __field(unsigned long, mmio_addr) | ||
529 | __field(unsigned long, mmio_size) | ||
530 | __field(long, rc) | ||
531 | ), | ||
532 | |||
533 | TP_fast_assign( | ||
534 | __entry->unit_address = unit_address; | ||
535 | __entry->phys_addr = phys_addr; | ||
536 | __entry->process_token = process_token; | ||
537 | __entry->mmio_addr = mmio_addr; | ||
538 | __entry->mmio_size = mmio_size; | ||
539 | __entry->rc = rc; | ||
540 | ), | ||
541 | |||
542 | TP_printk("unit_address=0x%016llx phys_addr=0x%016llx " | ||
543 | "token=0x%.8lx mmio_addr=0x%lx mmio_size=0x%lx rc=%li", | ||
544 | __entry->unit_address, | ||
545 | __entry->phys_addr, | ||
546 | __entry->process_token, | ||
547 | __entry->mmio_addr, | ||
548 | __entry->mmio_size, | ||
549 | __entry->rc | ||
550 | ) | ||
551 | ); | ||
552 | |||
553 | DEFINE_EVENT(cxl_hcall, cxl_hcall_detach, | ||
554 | TP_PROTO(u64 unit_address, u64 process_token, long rc), | ||
555 | TP_ARGS(unit_address, process_token, rc) | ||
556 | ); | ||
557 | |||
558 | DEFINE_EVENT(cxl_hcall_control, cxl_hcall_control_function, | ||
559 | TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3, | ||
560 | u64 p4, unsigned long r4, long rc), | ||
561 | TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc) | ||
562 | ); | ||
563 | |||
564 | DEFINE_EVENT(cxl_hcall, cxl_hcall_collect_int_info, | ||
565 | TP_PROTO(u64 unit_address, u64 process_token, long rc), | ||
566 | TP_ARGS(unit_address, process_token, rc) | ||
567 | ); | ||
568 | |||
569 | TRACE_EVENT(cxl_hcall_control_faults, | ||
570 | TP_PROTO(u64 unit_address, u64 process_token, | ||
571 | u64 control_mask, u64 reset_mask, unsigned long r4, | ||
572 | long rc), | ||
573 | |||
574 | TP_ARGS(unit_address, process_token, | ||
575 | control_mask, reset_mask, r4, rc), | ||
576 | |||
577 | TP_STRUCT__entry( | ||
578 | __field(u64, unit_address) | ||
579 | __field(u64, process_token) | ||
580 | __field(u64, control_mask) | ||
581 | __field(u64, reset_mask) | ||
582 | __field(unsigned long, r4) | ||
583 | __field(long, rc) | ||
584 | ), | ||
585 | |||
586 | TP_fast_assign( | ||
587 | __entry->unit_address = unit_address; | ||
588 | __entry->process_token = process_token; | ||
589 | __entry->control_mask = control_mask; | ||
590 | __entry->reset_mask = reset_mask; | ||
591 | __entry->r4 = r4; | ||
592 | __entry->rc = rc; | ||
593 | ), | ||
594 | |||
595 | TP_printk("unit_address=0x%016llx process_token=0x%llx " | ||
596 | "control_mask=%#llx reset_mask=%#llx r4=%#lx rc=%li", | ||
597 | __entry->unit_address, | ||
598 | __entry->process_token, | ||
599 | __entry->control_mask, | ||
600 | __entry->reset_mask, | ||
601 | __entry->r4, | ||
602 | __entry->rc | ||
603 | ) | ||
604 | ); | ||
605 | |||
606 | DEFINE_EVENT(cxl_hcall_control, cxl_hcall_control_facility, | ||
607 | TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3, | ||
608 | u64 p4, unsigned long r4, long rc), | ||
609 | TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc) | ||
610 | ); | ||
611 | |||
612 | TRACE_EVENT(cxl_hcall_download_facility, | ||
613 | TP_PROTO(u64 unit_address, char *fct, u64 list_address, u64 num, | ||
614 | unsigned long r4, long rc), | ||
615 | |||
616 | TP_ARGS(unit_address, fct, list_address, num, r4, rc), | ||
617 | |||
618 | TP_STRUCT__entry( | ||
619 | __field(u64, unit_address) | ||
620 | __field(char *, fct) | ||
621 | __field(u64, list_address) | ||
622 | __field(u64, num) | ||
623 | __field(unsigned long, r4) | ||
624 | __field(long, rc) | ||
625 | ), | ||
626 | |||
627 | TP_fast_assign( | ||
628 | __entry->unit_address = unit_address; | ||
629 | __entry->fct = fct; | ||
630 | __entry->list_address = list_address; | ||
631 | __entry->num = num; | ||
632 | __entry->r4 = r4; | ||
633 | __entry->rc = rc; | ||
634 | ), | ||
635 | |||
636 | TP_printk("%#.16llx, %s(%#llx, %#llx), %#lx): %li", | ||
637 | __entry->unit_address, | ||
638 | __entry->fct, | ||
639 | __entry->list_address, | ||
640 | __entry->num, | ||
641 | __entry->r4, | ||
642 | __entry->rc | ||
643 | ) | ||
644 | ); | ||
645 | |||
453 | #endif /* _CXL_TRACE_H */ | 646 | #endif /* _CXL_TRACE_H */ |
454 | 647 | ||
455 | /* This part must be outside protection */ | 648 | /* This part must be outside protection */ |
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index cbd4331fb45c..cdc7723b845d 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c | |||
@@ -49,7 +49,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev) | |||
49 | phb = pci_bus_to_host(dev->bus); | 49 | phb = pci_bus_to_host(dev->bus); |
50 | afu = (struct cxl_afu *)phb->private_data; | 50 | afu = (struct cxl_afu *)phb->private_data; |
51 | 51 | ||
52 | if (!cxl_adapter_link_ok(afu->adapter)) { | 52 | if (!cxl_ops->link_ok(afu->adapter, afu)) { |
53 | dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__); | 53 | dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__); |
54 | return false; | 54 | return false; |
55 | } | 55 | } |
@@ -66,7 +66,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev) | |||
66 | return false; | 66 | return false; |
67 | dev->dev.archdata.cxl_ctx = ctx; | 67 | dev->dev.archdata.cxl_ctx = ctx; |
68 | 68 | ||
69 | return (cxl_afu_check_and_enable(afu) == 0); | 69 | return (cxl_ops->afu_check_and_enable(afu) == 0); |
70 | } | 70 | } |
71 | 71 | ||
72 | static void cxl_pci_disable_device(struct pci_dev *dev) | 72 | static void cxl_pci_disable_device(struct pci_dev *dev) |
@@ -99,113 +99,90 @@ static int cxl_pcie_cfg_record(u8 bus, u8 devfn) | |||
99 | return (bus << 8) + devfn; | 99 | return (bus << 8) + devfn; |
100 | } | 100 | } |
101 | 101 | ||
102 | static unsigned long cxl_pcie_cfg_addr(struct pci_controller* phb, | ||
103 | u8 bus, u8 devfn, int offset) | ||
104 | { | ||
105 | int record = cxl_pcie_cfg_record(bus, devfn); | ||
106 | |||
107 | return (unsigned long)phb->cfg_addr + ((unsigned long)phb->cfg_data * record) + offset; | ||
108 | } | ||
109 | |||
110 | |||
111 | static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn, | 102 | static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn, |
112 | int offset, int len, | 103 | struct cxl_afu **_afu, int *_record) |
113 | volatile void __iomem **ioaddr, | ||
114 | u32 *mask, int *shift) | ||
115 | { | 104 | { |
116 | struct pci_controller *phb; | 105 | struct pci_controller *phb; |
117 | struct cxl_afu *afu; | 106 | struct cxl_afu *afu; |
118 | unsigned long addr; | 107 | int record; |
119 | 108 | ||
120 | phb = pci_bus_to_host(bus); | 109 | phb = pci_bus_to_host(bus); |
121 | if (phb == NULL) | 110 | if (phb == NULL) |
122 | return PCIBIOS_DEVICE_NOT_FOUND; | 111 | return PCIBIOS_DEVICE_NOT_FOUND; |
123 | afu = (struct cxl_afu *)phb->private_data; | ||
124 | 112 | ||
125 | if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num) | 113 | afu = (struct cxl_afu *)phb->private_data; |
114 | record = cxl_pcie_cfg_record(bus->number, devfn); | ||
115 | if (record > afu->crs_num) | ||
126 | return PCIBIOS_DEVICE_NOT_FOUND; | 116 | return PCIBIOS_DEVICE_NOT_FOUND; |
127 | if (offset >= (unsigned long)phb->cfg_data) | ||
128 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
129 | addr = cxl_pcie_cfg_addr(phb, bus->number, devfn, offset); | ||
130 | 117 | ||
131 | *ioaddr = (void *)(addr & ~0x3ULL); | 118 | *_afu = afu; |
132 | *shift = ((addr & 0x3) * 8); | 119 | *_record = record; |
133 | switch (len) { | ||
134 | case 1: | ||
135 | *mask = 0xff; | ||
136 | break; | ||
137 | case 2: | ||
138 | *mask = 0xffff; | ||
139 | break; | ||
140 | default: | ||
141 | *mask = 0xffffffff; | ||
142 | break; | ||
143 | } | ||
144 | return 0; | 120 | return 0; |
145 | } | 121 | } |
146 | 122 | ||
147 | |||
148 | static inline bool cxl_config_link_ok(struct pci_bus *bus) | ||
149 | { | ||
150 | struct pci_controller *phb; | ||
151 | struct cxl_afu *afu; | ||
152 | |||
153 | /* Config space IO is based on phb->cfg_addr, which is based on | ||
154 | * afu_desc_mmio. This isn't safe to read/write when the link | ||
155 | * goes down, as EEH tears down MMIO space. | ||
156 | * | ||
157 | * Check if the link is OK before proceeding. | ||
158 | */ | ||
159 | |||
160 | phb = pci_bus_to_host(bus); | ||
161 | if (phb == NULL) | ||
162 | return false; | ||
163 | afu = (struct cxl_afu *)phb->private_data; | ||
164 | return cxl_adapter_link_ok(afu->adapter); | ||
165 | } | ||
166 | |||
167 | static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn, | 123 | static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn, |
168 | int offset, int len, u32 *val) | 124 | int offset, int len, u32 *val) |
169 | { | 125 | { |
170 | volatile void __iomem *ioaddr; | 126 | int rc, record; |
171 | int shift, rc; | 127 | struct cxl_afu *afu; |
172 | u32 mask; | 128 | u8 val8; |
129 | u16 val16; | ||
130 | u32 val32; | ||
173 | 131 | ||
174 | rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr, | 132 | rc = cxl_pcie_config_info(bus, devfn, &afu, &record); |
175 | &mask, &shift); | ||
176 | if (rc) | 133 | if (rc) |
177 | return rc; | 134 | return rc; |
178 | 135 | ||
179 | if (!cxl_config_link_ok(bus)) | 136 | switch (len) { |
137 | case 1: | ||
138 | rc = cxl_ops->afu_cr_read8(afu, record, offset, &val8); | ||
139 | *val = val8; | ||
140 | break; | ||
141 | case 2: | ||
142 | rc = cxl_ops->afu_cr_read16(afu, record, offset, &val16); | ||
143 | *val = val16; | ||
144 | break; | ||
145 | case 4: | ||
146 | rc = cxl_ops->afu_cr_read32(afu, record, offset, &val32); | ||
147 | *val = val32; | ||
148 | break; | ||
149 | default: | ||
150 | WARN_ON(1); | ||
151 | } | ||
152 | |||
153 | if (rc) | ||
180 | return PCIBIOS_DEVICE_NOT_FOUND; | 154 | return PCIBIOS_DEVICE_NOT_FOUND; |
181 | 155 | ||
182 | /* Can only read 32 bits */ | ||
183 | *val = (in_le32(ioaddr) >> shift) & mask; | ||
184 | return PCIBIOS_SUCCESSFUL; | 156 | return PCIBIOS_SUCCESSFUL; |
185 | } | 157 | } |
186 | 158 | ||
187 | static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn, | 159 | static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn, |
188 | int offset, int len, u32 val) | 160 | int offset, int len, u32 val) |
189 | { | 161 | { |
190 | volatile void __iomem *ioaddr; | 162 | int rc, record; |
191 | u32 v, mask; | 163 | struct cxl_afu *afu; |
192 | int shift, rc; | ||
193 | 164 | ||
194 | rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr, | 165 | rc = cxl_pcie_config_info(bus, devfn, &afu, &record); |
195 | &mask, &shift); | ||
196 | if (rc) | 166 | if (rc) |
197 | return rc; | 167 | return rc; |
198 | 168 | ||
199 | if (!cxl_config_link_ok(bus)) | 169 | switch (len) { |
200 | return PCIBIOS_DEVICE_NOT_FOUND; | 170 | case 1: |
201 | 171 | rc = cxl_ops->afu_cr_write8(afu, record, offset, val & 0xff); | |
202 | /* Can only write 32 bits so do read-modify-write */ | 172 | break; |
203 | mask <<= shift; | 173 | case 2: |
204 | val <<= shift; | 174 | rc = cxl_ops->afu_cr_write16(afu, record, offset, val & 0xffff); |
175 | break; | ||
176 | case 4: | ||
177 | rc = cxl_ops->afu_cr_write32(afu, record, offset, val); | ||
178 | break; | ||
179 | default: | ||
180 | WARN_ON(1); | ||
181 | } | ||
205 | 182 | ||
206 | v = (in_le32(ioaddr) & ~mask) | (val & mask); | 183 | if (rc) |
184 | return PCIBIOS_SET_FAILED; | ||
207 | 185 | ||
208 | out_le32(ioaddr, v); | ||
209 | return PCIBIOS_SUCCESSFUL; | 186 | return PCIBIOS_SUCCESSFUL; |
210 | } | 187 | } |
211 | 188 | ||
@@ -233,23 +210,31 @@ int cxl_pci_vphb_add(struct cxl_afu *afu) | |||
233 | { | 210 | { |
234 | struct pci_dev *phys_dev; | 211 | struct pci_dev *phys_dev; |
235 | struct pci_controller *phb, *phys_phb; | 212 | struct pci_controller *phb, *phys_phb; |
236 | 213 | struct device_node *vphb_dn; | |
237 | phys_dev = to_pci_dev(afu->adapter->dev.parent); | 214 | struct device *parent; |
238 | phys_phb = pci_bus_to_host(phys_dev->bus); | 215 | |
216 | if (cpu_has_feature(CPU_FTR_HVMODE)) { | ||
217 | phys_dev = to_pci_dev(afu->adapter->dev.parent); | ||
218 | phys_phb = pci_bus_to_host(phys_dev->bus); | ||
219 | vphb_dn = phys_phb->dn; | ||
220 | parent = &phys_dev->dev; | ||
221 | } else { | ||
222 | vphb_dn = afu->adapter->dev.parent->of_node; | ||
223 | parent = afu->adapter->dev.parent; | ||
224 | } | ||
239 | 225 | ||
240 | /* Alloc and setup PHB data structure */ | 226 | /* Alloc and setup PHB data structure */ |
241 | phb = pcibios_alloc_controller(phys_phb->dn); | 227 | phb = pcibios_alloc_controller(vphb_dn); |
242 | |||
243 | if (!phb) | 228 | if (!phb) |
244 | return -ENODEV; | 229 | return -ENODEV; |
245 | 230 | ||
246 | /* Setup parent in sysfs */ | 231 | /* Setup parent in sysfs */ |
247 | phb->parent = &phys_dev->dev; | 232 | phb->parent = parent; |
248 | 233 | ||
249 | /* Setup the PHB using arch provided callback */ | 234 | /* Setup the PHB using arch provided callback */ |
250 | phb->ops = &cxl_pcie_pci_ops; | 235 | phb->ops = &cxl_pcie_pci_ops; |
251 | phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset; | 236 | phb->cfg_addr = NULL; |
252 | phb->cfg_data = (void *)(u64)afu->crs_len; | 237 | phb->cfg_data = 0; |
253 | phb->private_data = afu; | 238 | phb->private_data = afu; |
254 | phb->controller_ops = cxl_pci_controller_ops; | 239 | phb->controller_ops = cxl_pci_controller_ops; |
255 | 240 | ||
@@ -272,15 +257,6 @@ int cxl_pci_vphb_add(struct cxl_afu *afu) | |||
272 | return 0; | 257 | return 0; |
273 | } | 258 | } |
274 | 259 | ||
275 | void cxl_pci_vphb_reconfigure(struct cxl_afu *afu) | ||
276 | { | ||
277 | /* When we are reconfigured, the AFU's MMIO space is unmapped | ||
278 | * and remapped. We need to reflect this in the PHB's view of | ||
279 | * the world. | ||
280 | */ | ||
281 | afu->phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset; | ||
282 | } | ||
283 | |||
284 | void cxl_pci_vphb_remove(struct cxl_afu *afu) | 260 | void cxl_pci_vphb_remove(struct cxl_afu *afu) |
285 | { | 261 | { |
286 | struct pci_controller *phb; | 262 | struct pci_controller *phb; |
@@ -296,6 +272,15 @@ void cxl_pci_vphb_remove(struct cxl_afu *afu) | |||
296 | pcibios_free_controller(phb); | 272 | pcibios_free_controller(phb); |
297 | } | 273 | } |
298 | 274 | ||
275 | bool cxl_pci_is_vphb_device(struct pci_dev *dev) | ||
276 | { | ||
277 | struct pci_controller *phb; | ||
278 | |||
279 | phb = pci_bus_to_host(dev->bus); | ||
280 | |||
281 | return (phb->ops == &cxl_pcie_pci_ops); | ||
282 | } | ||
283 | |||
299 | struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev) | 284 | struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev) |
300 | { | 285 | { |
301 | struct pci_controller *phb; | 286 | struct pci_controller *phb; |