aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/cxl/native.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-31 00:01:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-31 00:01:36 -0400
commitbad60e6f259a01cf9f29a1ef8d435ab6c60b2de9 (patch)
treec9aaa8166735659761239c117af2b11b022bc6cb /drivers/misc/cxl/native.c
parentdd0f0cf58af70dc9267409f113bea772d57f675c (diff)
parent719dbb2df78fc9a40e28392b07cd715bfc5a665c (diff)
Merge tag 'powerpc-4.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: "Highlights: - PowerNV PCI hotplug support. - Lots more Power9 support. - eBPF JIT support on ppc64le. - Lots of cxl updates. - Boot code consolidation. Bug fixes: - Fix spin_unlock_wait() from Boqun Feng - Fix stack pointer corruption in __tm_recheckpoint() from Michael Neuling - Fix multiple bugs in memory_hotplug_max() from Bharata B Rao - mm: Ensure "special" zones are empty from Oliver O'Halloran - ftrace: Separate the heuristics for checking call sites from Michael Ellerman - modules: Never restore r2 for a mprofile-kernel style mcount() call from Michael Ellerman - Fix endianness when reading TCEs from Alexey Kardashevskiy - start rtasd before PCI probing from Greg Kurz - PCI: rpaphp: Fix slot registration for multiple slots under a PHB from Tyrel Datwyler - powerpc/mm: Add memory barrier in __hugepte_alloc() from Sukadev Bhattiprolu Cleanups & fixes: - Drop support for MPIC in pseries from Rashmica Gupta - Define and use PPC64_ELF_ABI_v2/v1 from Michael Ellerman - Remove unused symbols in asm-offsets.c from Rashmica Gupta - Fix SRIOV not building without EEH enabled from Russell Currey - Remove kretprobe_trampoline_holder from Thiago Jung Bauermann - Reduce log level of PCI I/O space warning from Benjamin Herrenschmidt - Add array bounds checking to crash_shutdown_handlers from Suraj Jitindar Singh - Avoid -maltivec when using clang integrated assembler from Anton Blanchard - Fix array overrun in ppc_rtas() syscall from Andrew Donnellan - Fix error return value in cmm_mem_going_offline() from Rasmus Villemoes - export cpu_to_core_id() from Mauricio Faria de Oliveira - Remove old symbols from defconfigs from Andrew Donnellan - Update obsolete comments in setup_32.c about entry conditions from Benjamin Herrenschmidt - Add comment explaining the purpose of setup_kdump_trampoline() from Benjamin Herrenschmidt - Merge the RELOCATABLE config entries for ppc32 and ppc64 from Kevin Hao - Remove RELOCATABLE_PPC32 from Kevin Hao - Fix .long's in tlb-radix.c to more meaningful from Balbir Singh Minor cleanups & fixes: - Andrew Donnellan, Anna-Maria Gleixner, Anton Blanchard, Benjamin Herrenschmidt, Bharata B Rao, Christophe Leroy, Colin Ian King, Geliang Tang, Greg Kurz, Madhavan Srinivasan, Michael Ellerman, Michael Ellerman, Stephen Rothwell, Stewart Smith. Freescale updates from Scott: - "Highlights include more 8xx optimizations, device tree updates, and MVME7100 support." PowerNV PCI hotplug from Gavin Shan: - PCI: Add pcibios_setup_bridge() - Override pcibios_setup_bridge() - Remove PCI_RESET_DELAY_US - Move pnv_pci_ioda_setup_opal_tce_kill() around - Increase PE# capacity - Allocate PE# in reverse order - Create PEs in pcibios_setup_bridge() - Setup PE for root bus - Extend PCI bridge resources - Make pnv_ioda_deconfigure_pe() visible - Dynamically release PE - Update bridge windows on PCI plug - Delay populating pdn - Support PCI slot ID - Use PCI slot reset infrastructure - Introduce pnv_pci_get_slot_id() - Functions to get/set PCI slot state - PCI/hotplug: PowerPC PowerNV PCI hotplug driver - Print correct PHB type names Power9 idle support from Shreyas B. Prabhu: - set power_save func after the idle states are initialized - Use PNV_THREAD_WINKLE macro while requesting for winkle - make hypervisor state restore a function - Rename idle_power7.S to idle_book3s.S - Rename reusable idle functions to hardware agnostic names - Make pnv_powersave_common more generic - abstraction for saving SPRs before entering deep idle states - Add platform support for stop instruction - cpuidle/powernv: Use CPUIDLE_STATE_MAX instead of MAX_POWERNV_IDLE_STATES - cpuidle/powernv: cleanup cpuidle-powernv.c - cpuidle/powernv: Add support for POWER ISA v3 idle states - Use deepest stop state when cpu is offlined Power9 PMU from Madhavan Srinivasan: - factor out power8 pmu macros and defines - factor out power8 pmu functions - factor out power8 __init_pmu code - Add power9 event list macros for generic and cache events - Power9 PMU support - Export Power9 generic and cache events to sysfs Power9 preliminary interrupt & PCI support from Benjamin Herrenschmidt: - Add XICS emulation APIs - Move a few exception common handlers to make room - Add support for HV virtualization interrupts - Add mechanism to force a replay of interrupts - Add ICP OPAL backend - Discover IODA3 PHBs - pci: Remove obsolete SW invalidate - opal: Add real mode call wrappers - Rename TCE invalidation calls - Remove SWINV constants and obsolete TCE code - Rework accessing the TCE invalidate register - Fallback to OPAL for TCE invalidations - Use the device-tree to get available range of M64's - Check status of a PHB before using it - pci: Don't try to allocate resources that will be reassigned Other Power9: - Send SIGBUS on unaligned copy and paste from Chris Smart - Large Decrementer support from Oliver O'Halloran - Load Monitor Register Support from Jack Miller Performance improvements from Anton Blanchard: - Avoid load hit store in __giveup_fpu() and __giveup_altivec() - Avoid load hit store in setup_sigcontext() - Remove assembly versions of strcpy, strcat, strlen and strcmp - Align hot loops of some string functions eBPF JIT from Naveen N. Rao: - Fix/enhance 32-bit Load Immediate implementation - Optimize 64-bit Immediate loads - Introduce rotate immediate instructions - A few cleanups - Isolate classic BPF JIT specifics into a separate header - Implement JIT compiler for extended BPF Operator Panel driver from Suraj Jitindar Singh: - devicetree/bindings: Add binding for operator panel on FSP machines - Add inline function to get rc from an ASYNC_COMP opal_msg - Add driver for operator panel on FSP machines Sparse fixes from Daniel Axtens: - make some things static - Introduce asm-prototypes.h - Include headers containing prototypes - Use #ifdef __BIG_ENDIAN__ #else for REG_BYTE - kvm: Clarify __user annotations - Pass endianness to sparse - Make ppc_md.{halt, restart} __noreturn MM fixes & cleanups from Aneesh Kumar K.V: - radix: Update LPCR HR bit as per ISA - use _raw variant of page table accessors - Compile out radix related functions if RADIX_MMU is disabled - Clear top 16 bits of va only on older cpus - Print formation regarding the the MMU mode - hash: Update SDR1 size encoding as documented in ISA 3.0 - radix: Update PID switch sequence - radix: Update machine call back to support new HCALL. - radix: Add LPID based tlb flush helpers - radix: Add a kernel command line to disable radix - Cleanup LPCR defines Boot code consolidation from Benjamin Herrenschmidt: - Move epapr_paravirt_early_init() to early_init_devtree() - cell: Don't use flat device-tree after boot - ge_imp3a: Don't use the flat device-tree after boot - mpc85xx_ds: Don't use the flat device-tree after boot - mpc85xx_rdb: Don't use the flat device-tree after boot - Don't test for machine type in rtas_initialize() - Don't test for machine type in smp_setup_cpu_maps() - dt: Add of_device_compatible_match() - Factor do_feature_fixup calls - Move 64-bit feature fixup earlier - Move 64-bit memory reserves to setup_arch() - Use a cachable DART - Move FW feature probing out of pseries probe() - Put exception configuration in a common place - Remove early allocation of the SMU command buffer - Move MMU backend selection out of platform code - pasemi: Remove IOBMAP allocation from platform probe() - mm/hash: Don't use machine_is() early during boot - Don't test for machine type to detect HEA special case - pmac: Remove spurrious machine type test - Move hash table ops to a separate structure - Ensure that ppc_md is empty before probing for machine type - Move 64-bit probe_machine() to later in the boot process - Move 32-bit probe() machine to later in the boot process - Get rid of ppc_md.init_early() - Move the boot time info banner to a separate function - Move setting of {i,d}cache_bsize to initialize_cache_info() - Move the content of setup_system() to setup_arch() - Move cache info inits to a separate function - Re-order the call to smp_setup_cpu_maps() - Re-order setup_panic() - Make a few boot functions __init - Merge 32-bit and 64-bit setup_arch() Other new features: - tty/hvc: Use IRQF_SHARED for OPAL hvc consoles from Sam Mendoza-Jonas - tty/hvc: Use opal irqchip interface if available from Sam Mendoza-Jonas - powerpc: Add module autoloading based on CPU features from Alastair D'Silva - crypto: vmx - Convert to CPU feature based module autoloading from Alastair D'Silva - Wake up kopald polling thread before waiting for events from Benjamin Herrenschmidt - xmon: Dump ISA 2.06 SPRs from Michael Ellerman - xmon: Dump ISA 2.07 SPRs from Michael Ellerman - Add a parameter to disable 1TB segs from Oliver O'Halloran - powerpc/boot: Add OPAL console to epapr wrappers from Oliver O'Halloran - Assign fixed PHB number based on device-tree properties from Guilherme G. Piccoli - pseries: Add pseries hotplug workqueue from John Allen - pseries: Add support for hotplug interrupt source from John Allen - pseries: Use kernel hotplug queue for PowerVM hotplug events from John Allen - pseries: Move property cloning into its own routine from Nathan Fontenot - pseries: Dynamic add entires to associativity lookup array from Nathan Fontenot - pseries: Auto-online hotplugged memory from Nathan Fontenot - pseries: Remove call to memblock_add() from Nathan Fontenot cxl: - Add set and get private data to context struct from Michael Neuling - make base more explicitly non-modular from Paul Gortmaker - Use for_each_compatible_node() macro from Wei Yongjun - Frederic Barrat - Abstract the differences between the PSL and XSL - Make vPHB device node match adapter's - Philippe Bergheaud - Add mechanism for delivering AFU driver specific events - Ignore CAPI adapters misplaced in switched slots - Refine slice error debug messages - Andrew Donnellan - static-ify variables to fix sparse warnings - PCI/hotplug: pnv_php: export symbols and move struct types needed by cxl - PCI/hotplug: pnv_php: handle OPAL_PCI_SLOT_OFFLINE power state - Add cxl_check_and_switch_mode() API to switch bi-modal cards - remove dead Kconfig options - fix potential NULL dereference in free_adapter() - Ian Munsie - Update process element after allocating interrupts - Add support for CAPP DMA mode - Fix allowing bogus AFU descriptors with 0 maximum processes - Fix allocating a minimum of 2 pages for the SPA - Fix bug where AFU disable operation had no effect - Workaround XSL bug that does not clear the RA bit after a reset - Fix NULL pointer dereference on kernel contexts with no AFU interrupts - powerpc/powernv: Split cxl code out into a separate file - Add cxl_slot_is_supported API - Enable bus mastering for devices using CAPP DMA mode - Move cxl_afu_get / cxl_afu_put to base - Allow a default context to be associated with an external pci_dev - Do not create vPHB if there are no AFU configuration records - powerpc/powernv: Add support for the cxl kernel api on the real phb - Add support for using the kernel API with a real PHB - Add kernel APIs to get & set the max irqs per context - Add preliminary workaround for CX4 interrupt limitation - Add support for interrupts on the Mellanox CX4 - Workaround PE=0 hardware limitation in Mellanox CX4 - powerpc/powernv: Fix pci-cxl.c build when CONFIG_MODULES=n selftests: - Test unaligned copy and paste from Chris Smart - Load Monitor Register Tests from Jack Miller - Cyril Bur - exec() with suspended transaction - Use signed long to read perf_event_paranoid - Fix usage message in context_switch - Fix generation of vector instructions/types in context_switch - Michael Ellerman - Use "Delta" rather than "Error" in normal output - Import Anton's mmap & futex micro benchmarks - Add a test for PROT_SAO" * tag 'powerpc-4.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (263 commits) powerpc/mm: Parenthesise IS_ENABLED() in if condition tty/hvc: Use opal irqchip interface if available tty/hvc: Use IRQF_SHARED for OPAL hvc consoles selftests/powerpc: exec() with suspended transaction powerpc: Improve comment explaining why we modify VRSAVE powerpc/mm: Drop unused externs for hpte_init_beat[_v3]() powerpc/mm: Rename hpte_init_lpar() and move the fallback to a header powerpc/mm: Fix build break when PPC_NATIVE=n crypto: vmx - Convert to CPU feature based module autoloading powerpc: Add module autoloading based on CPU features powerpc/powernv/ioda: Fix endianness when reading TCEs powerpc/mm: Add memory barrier in __hugepte_alloc() powerpc/modules: Never restore r2 for a mprofile-kernel style mcount() call powerpc/ftrace: Separate the heuristics for checking call sites powerpc: Merge 32-bit and 64-bit setup_arch() powerpc/64: Make a few boot functions __init powerpc: Re-order setup_panic() powerpc: Re-order the call to smp_setup_cpu_maps() powerpc/32: Move cache info inits to a separate function powerpc/64: Move the content of setup_system() to setup_arch() ...
Diffstat (limited to 'drivers/misc/cxl/native.c')
-rw-r--r--drivers/misc/cxl/native.c207
1 files changed, 165 insertions, 42 deletions
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 55d8a1459f28..3bcdaee11ba1 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -21,10 +21,10 @@
21#include "cxl.h" 21#include "cxl.h"
22#include "trace.h" 22#include "trace.h"
23 23
24static int afu_control(struct cxl_afu *afu, u64 command, 24static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
25 u64 result, u64 mask, bool enabled) 25 u64 result, u64 mask, bool enabled)
26{ 26{
27 u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 27 u64 AFU_Cntl;
28 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 28 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
29 int rc = 0; 29 int rc = 0;
30 30
@@ -33,7 +33,8 @@ static int afu_control(struct cxl_afu *afu, u64 command,
33 33
34 trace_cxl_afu_ctrl(afu, command); 34 trace_cxl_afu_ctrl(afu, command);
35 35
36 cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command); 36 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
37 cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command);
37 38
38 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 39 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
39 while ((AFU_Cntl & mask) != result) { 40 while ((AFU_Cntl & mask) != result) {
@@ -54,6 +55,16 @@ static int afu_control(struct cxl_afu *afu, u64 command,
54 cpu_relax(); 55 cpu_relax();
55 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 56 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
56 }; 57 };
58
59 if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
60 /*
61 * Workaround for a bug in the XSL used in the Mellanox CX4
62 * that fails to clear the RA bit after an AFU reset,
63 * preventing subsequent AFU resets from working.
64 */
65 cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA);
66 }
67
57 pr_devel("AFU command complete: %llx\n", command); 68 pr_devel("AFU command complete: %llx\n", command);
58 afu->enabled = enabled; 69 afu->enabled = enabled;
59out: 70out:
@@ -67,7 +78,7 @@ static int afu_enable(struct cxl_afu *afu)
67{ 78{
68 pr_devel("AFU enable request\n"); 79 pr_devel("AFU enable request\n");
69 80
70 return afu_control(afu, CXL_AFU_Cntl_An_E, 81 return afu_control(afu, CXL_AFU_Cntl_An_E, 0,
71 CXL_AFU_Cntl_An_ES_Enabled, 82 CXL_AFU_Cntl_An_ES_Enabled,
72 CXL_AFU_Cntl_An_ES_MASK, true); 83 CXL_AFU_Cntl_An_ES_MASK, true);
73} 84}
@@ -76,7 +87,8 @@ int cxl_afu_disable(struct cxl_afu *afu)
76{ 87{
77 pr_devel("AFU disable request\n"); 88 pr_devel("AFU disable request\n");
78 89
79 return afu_control(afu, 0, CXL_AFU_Cntl_An_ES_Disabled, 90 return afu_control(afu, 0, CXL_AFU_Cntl_An_E,
91 CXL_AFU_Cntl_An_ES_Disabled,
80 CXL_AFU_Cntl_An_ES_MASK, false); 92 CXL_AFU_Cntl_An_ES_MASK, false);
81} 93}
82 94
@@ -85,7 +97,7 @@ static int native_afu_reset(struct cxl_afu *afu)
85{ 97{
86 pr_devel("AFU reset request\n"); 98 pr_devel("AFU reset request\n");
87 99
88 return afu_control(afu, CXL_AFU_Cntl_An_RA, 100 return afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
89 CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled, 101 CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
90 CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK, 102 CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
91 false); 103 false);
@@ -189,7 +201,7 @@ int cxl_alloc_spa(struct cxl_afu *afu)
189 unsigned spa_size; 201 unsigned spa_size;
190 202
191 /* Work out how many pages to allocate */ 203 /* Work out how many pages to allocate */
192 afu->native->spa_order = 0; 204 afu->native->spa_order = -1;
193 do { 205 do {
194 afu->native->spa_order++; 206 afu->native->spa_order++;
195 spa_size = (1 << afu->native->spa_order) * PAGE_SIZE; 207 spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
@@ -430,7 +442,6 @@ static int remove_process_element(struct cxl_context *ctx)
430 return rc; 442 return rc;
431} 443}
432 444
433
434void cxl_assign_psn_space(struct cxl_context *ctx) 445void cxl_assign_psn_space(struct cxl_context *ctx)
435{ 446{
436 if (!ctx->afu->pp_size || ctx->master) { 447 if (!ctx->afu->pp_size || ctx->master) {
@@ -507,10 +518,39 @@ static u64 calculate_sr(struct cxl_context *ctx)
507 return sr; 518 return sr;
508} 519}
509 520
521static void update_ivtes_directed(struct cxl_context *ctx)
522{
523 bool need_update = (ctx->status == STARTED);
524 int r;
525
526 if (need_update) {
527 WARN_ON(terminate_process_element(ctx));
528 WARN_ON(remove_process_element(ctx));
529 }
530
531 for (r = 0; r < CXL_IRQ_RANGES; r++) {
532 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
533 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
534 }
535
536 /*
537 * Theoretically we could use the update llcmd, instead of a
538 * terminate/remove/add (or if an atomic update was required we could
539 * do a suspend/update/resume), however it seems there might be issues
540 * with the update llcmd on some cards (including those using an XSL on
541 * an ASIC) so for now it's safest to go with the commands that are
542 * known to work. In the future if we come across a situation where the
543 * card may be performing transactions using the same PE while we are
544 * doing this update we might need to revisit this.
545 */
546 if (need_update)
547 WARN_ON(add_process_element(ctx));
548}
549
510static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) 550static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
511{ 551{
512 u32 pid; 552 u32 pid;
513 int r, result; 553 int result;
514 554
515 cxl_assign_psn_space(ctx); 555 cxl_assign_psn_space(ctx);
516 556
@@ -545,10 +585,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
545 ctx->irqs.range[0] = 1; 585 ctx->irqs.range[0] = 1;
546 } 586 }
547 587
548 for (r = 0; r < CXL_IRQ_RANGES; r++) { 588 update_ivtes_directed(ctx);
549 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
550 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
551 }
552 589
553 ctx->elem->common.amr = cpu_to_be64(amr); 590 ctx->elem->common.amr = cpu_to_be64(amr);
554 ctx->elem->common.wed = cpu_to_be64(wed); 591 ctx->elem->common.wed = cpu_to_be64(wed);
@@ -570,7 +607,33 @@ static int deactivate_afu_directed(struct cxl_afu *afu)
570 cxl_sysfs_afu_m_remove(afu); 607 cxl_sysfs_afu_m_remove(afu);
571 cxl_chardev_afu_remove(afu); 608 cxl_chardev_afu_remove(afu);
572 609
573 cxl_ops->afu_reset(afu); 610 /*
611 * The CAIA section 2.2.1 indicates that the procedure for starting and
612 * stopping an AFU in AFU directed mode is AFU specific, which is not
613 * ideal since this code is generic and with one exception has no
614 * knowledge of the AFU. This is in contrast to the procedure for
615 * disabling a dedicated process AFU, which is documented to just
616 * require a reset. The architecture does indicate that both an AFU
617 * reset and an AFU disable should result in the AFU being disabled and
618 * we do both followed by a PSL purge for safety.
619 *
620 * Notably we used to have some issues with the disable sequence on PSL
621 * cards, which is why we ended up using this heavy weight procedure in
622 * the first place, however a bug was discovered that had rendered the
623 * disable operation ineffective, so it is conceivable that was the
624 * sole explanation for those difficulties. Careful regression testing
625 * is recommended if anyone attempts to remove or reorder these
626 * operations.
627 *
628 * The XSL on the Mellanox CX4 behaves a little differently from the
629 * PSL based cards and will time out an AFU reset if the AFU is still
630 * enabled. That card is special in that we do have a means to identify
631 * it from this code, so in that case we skip the reset and just use a
632 * disable/purge to avoid the timeout and corresponding noise in the
633 * kernel log.
634 */
635 if (afu->adapter->native->sl_ops->needs_reset_before_disable)
636 cxl_ops->afu_reset(afu);
574 cxl_afu_disable(afu); 637 cxl_afu_disable(afu);
575 cxl_psl_purge(afu); 638 cxl_psl_purge(afu);
576 639
@@ -600,6 +663,22 @@ static int activate_dedicated_process(struct cxl_afu *afu)
600 return cxl_chardev_d_afu_add(afu); 663 return cxl_chardev_d_afu_add(afu);
601} 664}
602 665
666static void update_ivtes_dedicated(struct cxl_context *ctx)
667{
668 struct cxl_afu *afu = ctx->afu;
669
670 cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
671 (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
672 (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
673 (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
674 ((u64)ctx->irqs.offset[3] & 0xffff));
675 cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
676 (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
677 (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
678 (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
679 ((u64)ctx->irqs.range[3] & 0xffff));
680}
681
603static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr) 682static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
604{ 683{
605 struct cxl_afu *afu = ctx->afu; 684 struct cxl_afu *afu = ctx->afu;
@@ -618,16 +697,7 @@ static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
618 697
619 cxl_prefault(ctx, wed); 698 cxl_prefault(ctx, wed);
620 699
621 cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 700 update_ivtes_dedicated(ctx);
622 (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
623 (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
624 (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
625 ((u64)ctx->irqs.offset[3] & 0xffff));
626 cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
627 (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
628 (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
629 (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
630 ((u64)ctx->irqs.range[3] & 0xffff));
631 701
632 cxl_p2n_write(afu, CXL_PSL_AMR_An, amr); 702 cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
633 703
@@ -703,12 +773,37 @@ static int native_attach_process(struct cxl_context *ctx, bool kernel,
703 773
704static inline int detach_process_native_dedicated(struct cxl_context *ctx) 774static inline int detach_process_native_dedicated(struct cxl_context *ctx)
705{ 775{
776 /*
777 * The CAIA section 2.1.1 indicates that we need to do an AFU reset to
778 * stop the AFU in dedicated mode (we therefore do not make that
779 * optional like we do in the afu directed path). It does not indicate
780 * that we need to do an explicit disable (which should occur
781 * implicitly as part of the reset) or purge, but we do these as well
782 * to be on the safe side.
783 *
784 * Notably we used to have some issues with the disable sequence
785 * (before the sequence was spelled out in the architecture) which is
786 * why we were so heavy weight in the first place, however a bug was
787 * discovered that had rendered the disable operation ineffective, so
788 * it is conceivable that was the sole explanation for those
789 * difficulties. Point is, we should be careful and do some regression
790 * testing if we ever attempt to remove any part of this procedure.
791 */
706 cxl_ops->afu_reset(ctx->afu); 792 cxl_ops->afu_reset(ctx->afu);
707 cxl_afu_disable(ctx->afu); 793 cxl_afu_disable(ctx->afu);
708 cxl_psl_purge(ctx->afu); 794 cxl_psl_purge(ctx->afu);
709 return 0; 795 return 0;
710} 796}
711 797
798static void native_update_ivtes(struct cxl_context *ctx)
799{
800 if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
801 return update_ivtes_directed(ctx);
802 if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
803 return update_ivtes_dedicated(ctx);
804 WARN(1, "native_update_ivtes: Bad mode\n");
805}
806
712static inline int detach_process_native_afu_directed(struct cxl_context *ctx) 807static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
713{ 808{
714 if (!ctx->pe_inserted) 809 if (!ctx->pe_inserted)
@@ -754,26 +849,38 @@ static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
754 return 0; 849 return 0;
755} 850}
756 851
757static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx, 852void cxl_native_psl_irq_dump_regs(struct cxl_context *ctx)
758 u64 dsisr, u64 errstat)
759{ 853{
760 u64 fir1, fir2, fir_slice, serr, afu_debug; 854 u64 fir1, fir2, fir_slice, serr, afu_debug;
761 855
762 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1); 856 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
763 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2); 857 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
764 fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An); 858 fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
765 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
766 afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An); 859 afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
767 860
768 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
769 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); 861 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
770 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2); 862 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
771 dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); 863 if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
864 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
865 cxl_afu_decode_psl_serr(ctx->afu, serr);
866 }
772 dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); 867 dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
773 dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); 868 dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
869}
870
871static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
872 u64 dsisr, u64 errstat)
873{
874
875 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
774 876
775 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); 877 if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers)
776 cxl_stop_trace(ctx->afu->adapter); 878 ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx);
879
880 if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) {
881 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
882 ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter);
883 }
777 884
778 return cxl_ops->ack_irq(ctx, 0, errstat); 885 return cxl_ops->ack_irq(ctx, 0, errstat);
779} 886}
@@ -849,41 +956,56 @@ void native_irq_wait(struct cxl_context *ctx)
849static irqreturn_t native_slice_irq_err(int irq, void *data) 956static irqreturn_t native_slice_irq_err(int irq, void *data)
850{ 957{
851 struct cxl_afu *afu = data; 958 struct cxl_afu *afu = data;
852 u64 fir_slice, errstat, serr, afu_debug; 959 u64 fir_slice, errstat, serr, afu_debug, afu_error, dsisr;
853
854 WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
855 960
961 /*
962 * slice err interrupt is only used with full PSL (no XSL)
963 */
856 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); 964 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
857 fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An); 965 fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
858 errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); 966 errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
859 afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An); 967 afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
860 dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); 968 afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
969 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
970 cxl_afu_decode_psl_serr(afu, serr);
861 dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); 971 dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
862 dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat); 972 dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
863 dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); 973 dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
974 dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
975 dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
864 976
865 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); 977 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
866 978
867 return IRQ_HANDLED; 979 return IRQ_HANDLED;
868} 980}
869 981
982void cxl_native_err_irq_dump_regs(struct cxl *adapter)
983{
984 u64 fir1, fir2;
985
986 fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
987 fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
988
989 dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
990}
991
870static irqreturn_t native_irq_err(int irq, void *data) 992static irqreturn_t native_irq_err(int irq, void *data)
871{ 993{
872 struct cxl *adapter = data; 994 struct cxl *adapter = data;
873 u64 fir1, fir2, err_ivte; 995 u64 err_ivte;
874 996
875 WARN(1, "CXL ERROR interrupt %i\n", irq); 997 WARN(1, "CXL ERROR interrupt %i\n", irq);
876 998
877 err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE); 999 err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
878 dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte); 1000 dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
879 1001
880 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n"); 1002 if (adapter->native->sl_ops->debugfs_stop_trace) {
881 cxl_stop_trace(adapter); 1003 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
882 1004 adapter->native->sl_ops->debugfs_stop_trace(adapter);
883 fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1); 1005 }
884 fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
885 1006
886 dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2); 1007 if (adapter->native->sl_ops->err_irq_dump_registers)
1008 adapter->native->sl_ops->err_irq_dump_registers(adapter);
887 1009
888 return IRQ_HANDLED; 1010 return IRQ_HANDLED;
889} 1011}
@@ -1128,6 +1250,7 @@ const struct cxl_backend_ops cxl_native_ops = {
1128 .irq_wait = native_irq_wait, 1250 .irq_wait = native_irq_wait,
1129 .attach_process = native_attach_process, 1251 .attach_process = native_attach_process,
1130 .detach_process = native_detach_process, 1252 .detach_process = native_detach_process,
1253 .update_ivtes = native_update_ivtes,
1131 .support_attributes = native_support_attributes, 1254 .support_attributes = native_support_attributes,
1132 .link_ok = cxl_adapter_link_ok, 1255 .link_ok = cxl_adapter_link_ok,
1133 .release_afu = cxl_pci_release_afu, 1256 .release_afu = cxl_pci_release_afu,