diff options
author | Christophe Lombard <clombard@linux.vnet.ibm.com> | 2017-04-12 10:34:07 -0400 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2017-04-13 09:34:31 -0400 |
commit | f24be42aab37c6d07c05126673138e06223a6399 (patch) | |
tree | 778de3d0c4b2dd8d80be7edb262174d1755ee8d9 | |
parent | abd1d99bb3da42d6c7341c14986f5b8f4dcc6bd5 (diff) |
cxl: Add psl9 specific code
The new Coherent Accelerator Interface Architecture, level 2, for the
IBM POWER9 brings new content and features:
- POWER9 Service Layer
- Registers
- Radix mode
- Process element entry
- Dedicated-Shared Process Programming Model
- Translation Fault Handling
- CAPP
- Memory Context ID
If a valid mm_struct is found the memory context id is used for each
transaction associated with the process handle. The PSL uses the
context ID to find the corresponding process element.
Signed-off-by: Christophe Lombard <clombard@linux.vnet.ibm.com>
Acked-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
[mpe: Fixup comment formatting, unsplit long strings]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r-- | Documentation/powerpc/cxl.txt | 15 | ||||
-rw-r--r-- | drivers/misc/cxl/context.c | 19 | ||||
-rw-r--r-- | drivers/misc/cxl/cxl.h | 138 | ||||
-rw-r--r-- | drivers/misc/cxl/debugfs.c | 19 | ||||
-rw-r--r-- | drivers/misc/cxl/fault.c | 64 | ||||
-rw-r--r-- | drivers/misc/cxl/guest.c | 8 | ||||
-rw-r--r-- | drivers/misc/cxl/irq.c | 51 | ||||
-rw-r--r-- | drivers/misc/cxl/native.c | 224 | ||||
-rw-r--r-- | drivers/misc/cxl/pci.c | 259 | ||||
-rw-r--r-- | drivers/misc/cxl/trace.h | 43 |
10 files changed, 764 insertions, 76 deletions
diff --git a/Documentation/powerpc/cxl.txt b/Documentation/powerpc/cxl.txt index d5506ba0fef7..c5e8d5098ed3 100644 --- a/Documentation/powerpc/cxl.txt +++ b/Documentation/powerpc/cxl.txt | |||
@@ -21,7 +21,7 @@ Introduction | |||
21 | Hardware overview | 21 | Hardware overview |
22 | ================= | 22 | ================= |
23 | 23 | ||
24 | POWER8 FPGA | 24 | POWER8/9 FPGA |
25 | +----------+ +---------+ | 25 | +----------+ +---------+ |
26 | | | | | | 26 | | | | | |
27 | | CPU | | AFU | | 27 | | CPU | | AFU | |
@@ -34,7 +34,7 @@ Hardware overview | |||
34 | | | CAPP |<------>| | | 34 | | | CAPP |<------>| | |
35 | +---+------+ PCIE +---------+ | 35 | +---+------+ PCIE +---------+ |
36 | 36 | ||
37 | The POWER8 chip has a Coherently Attached Processor Proxy (CAPP) | 37 | The POWER8/9 chip has a Coherently Attached Processor Proxy (CAPP) |
38 | unit which is part of the PCIe Host Bridge (PHB). This is managed | 38 | unit which is part of the PCIe Host Bridge (PHB). This is managed |
39 | by Linux by calls into OPAL. Linux doesn't directly program the | 39 | by Linux by calls into OPAL. Linux doesn't directly program the |
40 | CAPP. | 40 | CAPP. |
@@ -59,6 +59,17 @@ Hardware overview | |||
59 | the fault. The context to which this fault is serviced is based on | 59 | the fault. The context to which this fault is serviced is based on |
60 | who owns that acceleration function. | 60 | who owns that acceleration function. |
61 | 61 | ||
62 | POWER8 <-----> PSL Version 8 is compliant to the CAIA Version 1.0. | ||
63 | POWER9 <-----> PSL Version 9 is compliant to the CAIA Version 2.0. | ||
64 | This PSL Version 9 provides new features such as: | ||
65 | * Interaction with the nest MMU on the P9 chip. | ||
66 | * Native DMA support. | ||
67 | * Supports sending ASB_Notify messages for host thread wakeup. | ||
68 | * Supports Atomic operations. | ||
69 | * .... | ||
70 | |||
71 | Cards with a PSL9 won't work on a POWER8 system and cards with a | ||
72 | PSL8 won't work on a POWER9 system. | ||
62 | 73 | ||
63 | AFU Modes | 74 | AFU Modes |
64 | ========= | 75 | ========= |
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index ac2531e4ad32..4472ce11f98d 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c | |||
@@ -188,13 +188,26 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) | |||
188 | if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { | 188 | if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { |
189 | if (start + len > ctx->afu->adapter->ps_size) | 189 | if (start + len > ctx->afu->adapter->ps_size) |
190 | return -EINVAL; | 190 | return -EINVAL; |
191 | |||
192 | if (cxl_is_psl9(ctx->afu)) { | ||
193 | /* | ||
194 | * Make sure there is a valid problem state | ||
195 | * area space for this AFU. | ||
196 | */ | ||
197 | if (ctx->master && !ctx->afu->psa) { | ||
198 | pr_devel("AFU doesn't support mmio space\n"); | ||
199 | return -EINVAL; | ||
200 | } | ||
201 | |||
202 | /* Can't mmap until the AFU is enabled */ | ||
203 | if (!ctx->afu->enabled) | ||
204 | return -EBUSY; | ||
205 | } | ||
191 | } else { | 206 | } else { |
192 | if (start + len > ctx->psn_size) | 207 | if (start + len > ctx->psn_size) |
193 | return -EINVAL; | 208 | return -EINVAL; |
194 | } | ||
195 | 209 | ||
196 | if (ctx->afu->current_mode != CXL_MODE_DEDICATED) { | 210 | /* Make sure there is a valid per process space for this AFU */ |
197 | /* make sure there is a valid per process space for this AFU */ | ||
198 | if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { | 211 | if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { |
199 | pr_devel("AFU doesn't support mmio space\n"); | 212 | pr_devel("AFU doesn't support mmio space\n"); |
200 | return -EINVAL; | 213 | return -EINVAL; |
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 82335c0f7ac9..452e209c5e67 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h | |||
@@ -63,7 +63,7 @@ typedef struct { | |||
63 | /* Memory maps. Ref CXL Appendix A */ | 63 | /* Memory maps. Ref CXL Appendix A */ |
64 | 64 | ||
65 | /* PSL Privilege 1 Memory Map */ | 65 | /* PSL Privilege 1 Memory Map */ |
66 | /* Configuration and Control area */ | 66 | /* Configuration and Control area - CAIA 1&2 */ |
67 | static const cxl_p1_reg_t CXL_PSL_CtxTime = {0x0000}; | 67 | static const cxl_p1_reg_t CXL_PSL_CtxTime = {0x0000}; |
68 | static const cxl_p1_reg_t CXL_PSL_ErrIVTE = {0x0008}; | 68 | static const cxl_p1_reg_t CXL_PSL_ErrIVTE = {0x0008}; |
69 | static const cxl_p1_reg_t CXL_PSL_KEY1 = {0x0010}; | 69 | static const cxl_p1_reg_t CXL_PSL_KEY1 = {0x0010}; |
@@ -98,11 +98,29 @@ static const cxl_p1_reg_t CXL_XSL_Timebase = {0x0100}; | |||
98 | static const cxl_p1_reg_t CXL_XSL_TB_CTLSTAT = {0x0108}; | 98 | static const cxl_p1_reg_t CXL_XSL_TB_CTLSTAT = {0x0108}; |
99 | static const cxl_p1_reg_t CXL_XSL_FEC = {0x0158}; | 99 | static const cxl_p1_reg_t CXL_XSL_FEC = {0x0158}; |
100 | static const cxl_p1_reg_t CXL_XSL_DSNCTL = {0x0168}; | 100 | static const cxl_p1_reg_t CXL_XSL_DSNCTL = {0x0168}; |
101 | /* PSL registers - CAIA 2 */ | ||
102 | static const cxl_p1_reg_t CXL_PSL9_CONTROL = {0x0020}; | ||
103 | static const cxl_p1_reg_t CXL_XSL9_DSNCTL = {0x0168}; | ||
104 | static const cxl_p1_reg_t CXL_PSL9_FIR1 = {0x0300}; | ||
105 | static const cxl_p1_reg_t CXL_PSL9_FIR2 = {0x0308}; | ||
106 | static const cxl_p1_reg_t CXL_PSL9_Timebase = {0x0310}; | ||
107 | static const cxl_p1_reg_t CXL_PSL9_DEBUG = {0x0320}; | ||
108 | static const cxl_p1_reg_t CXL_PSL9_FIR_CNTL = {0x0348}; | ||
109 | static const cxl_p1_reg_t CXL_PSL9_DSNDCTL = {0x0350}; | ||
110 | static const cxl_p1_reg_t CXL_PSL9_TB_CTLSTAT = {0x0340}; | ||
111 | static const cxl_p1_reg_t CXL_PSL9_TRACECFG = {0x0368}; | ||
112 | static const cxl_p1_reg_t CXL_PSL9_APCDEDALLOC = {0x0378}; | ||
113 | static const cxl_p1_reg_t CXL_PSL9_APCDEDTYPE = {0x0380}; | ||
114 | static const cxl_p1_reg_t CXL_PSL9_TNR_ADDR = {0x0388}; | ||
115 | static const cxl_p1_reg_t CXL_PSL9_GP_CT = {0x0398}; | ||
116 | static const cxl_p1_reg_t CXL_XSL9_IERAT = {0x0588}; | ||
117 | static const cxl_p1_reg_t CXL_XSL9_ILPP = {0x0590}; | ||
118 | |||
101 | /* 0x7F00:7FFF Reserved PCIe MSI-X Pending Bit Array area */ | 119 | /* 0x7F00:7FFF Reserved PCIe MSI-X Pending Bit Array area */ |
102 | /* 0x8000:FFFF Reserved PCIe MSI-X Table Area */ | 120 | /* 0x8000:FFFF Reserved PCIe MSI-X Table Area */ |
103 | 121 | ||
104 | /* PSL Slice Privilege 1 Memory Map */ | 122 | /* PSL Slice Privilege 1 Memory Map */ |
105 | /* Configuration Area */ | 123 | /* Configuration Area - CAIA 1&2 */ |
106 | static const cxl_p1n_reg_t CXL_PSL_SR_An = {0x00}; | 124 | static const cxl_p1n_reg_t CXL_PSL_SR_An = {0x00}; |
107 | static const cxl_p1n_reg_t CXL_PSL_LPID_An = {0x08}; | 125 | static const cxl_p1n_reg_t CXL_PSL_LPID_An = {0x08}; |
108 | static const cxl_p1n_reg_t CXL_PSL_AMBAR_An = {0x10}; | 126 | static const cxl_p1n_reg_t CXL_PSL_AMBAR_An = {0x10}; |
@@ -111,17 +129,18 @@ static const cxl_p1n_reg_t CXL_PSL_ID_An = {0x20}; | |||
111 | static const cxl_p1n_reg_t CXL_PSL_SERR_An = {0x28}; | 129 | static const cxl_p1n_reg_t CXL_PSL_SERR_An = {0x28}; |
112 | /* Memory Management and Lookaside Buffer Management - CAIA 1*/ | 130 | /* Memory Management and Lookaside Buffer Management - CAIA 1*/ |
113 | static const cxl_p1n_reg_t CXL_PSL_SDR_An = {0x30}; | 131 | static const cxl_p1n_reg_t CXL_PSL_SDR_An = {0x30}; |
132 | /* Memory Management and Lookaside Buffer Management - CAIA 1&2 */ | ||
114 | static const cxl_p1n_reg_t CXL_PSL_AMOR_An = {0x38}; | 133 | static const cxl_p1n_reg_t CXL_PSL_AMOR_An = {0x38}; |
115 | /* Pointer Area */ | 134 | /* Pointer Area - CAIA 1&2 */ |
116 | static const cxl_p1n_reg_t CXL_HAURP_An = {0x80}; | 135 | static const cxl_p1n_reg_t CXL_HAURP_An = {0x80}; |
117 | static const cxl_p1n_reg_t CXL_PSL_SPAP_An = {0x88}; | 136 | static const cxl_p1n_reg_t CXL_PSL_SPAP_An = {0x88}; |
118 | static const cxl_p1n_reg_t CXL_PSL_LLCMD_An = {0x90}; | 137 | static const cxl_p1n_reg_t CXL_PSL_LLCMD_An = {0x90}; |
119 | /* Control Area */ | 138 | /* Control Area - CAIA 1&2 */ |
120 | static const cxl_p1n_reg_t CXL_PSL_SCNTL_An = {0xA0}; | 139 | static const cxl_p1n_reg_t CXL_PSL_SCNTL_An = {0xA0}; |
121 | static const cxl_p1n_reg_t CXL_PSL_CtxTime_An = {0xA8}; | 140 | static const cxl_p1n_reg_t CXL_PSL_CtxTime_An = {0xA8}; |
122 | static const cxl_p1n_reg_t CXL_PSL_IVTE_Offset_An = {0xB0}; | 141 | static const cxl_p1n_reg_t CXL_PSL_IVTE_Offset_An = {0xB0}; |
123 | static const cxl_p1n_reg_t CXL_PSL_IVTE_Limit_An = {0xB8}; | 142 | static const cxl_p1n_reg_t CXL_PSL_IVTE_Limit_An = {0xB8}; |
124 | /* 0xC0:FF Implementation Dependent Area */ | 143 | /* 0xC0:FF Implementation Dependent Area - CAIA 1&2 */ |
125 | static const cxl_p1n_reg_t CXL_PSL_FIR_SLICE_An = {0xC0}; | 144 | static const cxl_p1n_reg_t CXL_PSL_FIR_SLICE_An = {0xC0}; |
126 | static const cxl_p1n_reg_t CXL_AFU_DEBUG_An = {0xC8}; | 145 | static const cxl_p1n_reg_t CXL_AFU_DEBUG_An = {0xC8}; |
127 | /* 0xC0:FF Implementation Dependent Area - CAIA 1 */ | 146 | /* 0xC0:FF Implementation Dependent Area - CAIA 1 */ |
@@ -131,7 +150,7 @@ static const cxl_p1n_reg_t CXL_PSL_RXCTL_A = {0xE0}; | |||
131 | static const cxl_p1n_reg_t CXL_PSL_SLICE_TRACE = {0xE8}; | 150 | static const cxl_p1n_reg_t CXL_PSL_SLICE_TRACE = {0xE8}; |
132 | 151 | ||
133 | /* PSL Slice Privilege 2 Memory Map */ | 152 | /* PSL Slice Privilege 2 Memory Map */ |
134 | /* Configuration and Control Area */ | 153 | /* Configuration and Control Area - CAIA 1&2 */ |
135 | static const cxl_p2n_reg_t CXL_PSL_PID_TID_An = {0x000}; | 154 | static const cxl_p2n_reg_t CXL_PSL_PID_TID_An = {0x000}; |
136 | static const cxl_p2n_reg_t CXL_CSRP_An = {0x008}; | 155 | static const cxl_p2n_reg_t CXL_CSRP_An = {0x008}; |
137 | /* Configuration and Control Area - CAIA 1 */ | 156 | /* Configuration and Control Area - CAIA 1 */ |
@@ -145,17 +164,17 @@ static const cxl_p2n_reg_t CXL_PSL_AMR_An = {0x030}; | |||
145 | static const cxl_p2n_reg_t CXL_SLBIE_An = {0x040}; | 164 | static const cxl_p2n_reg_t CXL_SLBIE_An = {0x040}; |
146 | static const cxl_p2n_reg_t CXL_SLBIA_An = {0x048}; | 165 | static const cxl_p2n_reg_t CXL_SLBIA_An = {0x048}; |
147 | static const cxl_p2n_reg_t CXL_SLBI_Select_An = {0x050}; | 166 | static const cxl_p2n_reg_t CXL_SLBI_Select_An = {0x050}; |
148 | /* Interrupt Registers */ | 167 | /* Interrupt Registers - CAIA 1&2 */ |
149 | static const cxl_p2n_reg_t CXL_PSL_DSISR_An = {0x060}; | 168 | static const cxl_p2n_reg_t CXL_PSL_DSISR_An = {0x060}; |
150 | static const cxl_p2n_reg_t CXL_PSL_DAR_An = {0x068}; | 169 | static const cxl_p2n_reg_t CXL_PSL_DAR_An = {0x068}; |
151 | static const cxl_p2n_reg_t CXL_PSL_DSR_An = {0x070}; | 170 | static const cxl_p2n_reg_t CXL_PSL_DSR_An = {0x070}; |
152 | static const cxl_p2n_reg_t CXL_PSL_TFC_An = {0x078}; | 171 | static const cxl_p2n_reg_t CXL_PSL_TFC_An = {0x078}; |
153 | static const cxl_p2n_reg_t CXL_PSL_PEHandle_An = {0x080}; | 172 | static const cxl_p2n_reg_t CXL_PSL_PEHandle_An = {0x080}; |
154 | static const cxl_p2n_reg_t CXL_PSL_ErrStat_An = {0x088}; | 173 | static const cxl_p2n_reg_t CXL_PSL_ErrStat_An = {0x088}; |
155 | /* AFU Registers */ | 174 | /* AFU Registers - CAIA 1&2 */ |
156 | static const cxl_p2n_reg_t CXL_AFU_Cntl_An = {0x090}; | 175 | static const cxl_p2n_reg_t CXL_AFU_Cntl_An = {0x090}; |
157 | static const cxl_p2n_reg_t CXL_AFU_ERR_An = {0x098}; | 176 | static const cxl_p2n_reg_t CXL_AFU_ERR_An = {0x098}; |
158 | /* Work Element Descriptor */ | 177 | /* Work Element Descriptor - CAIA 1&2 */ |
159 | static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; | 178 | static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; |
160 | /* 0x0C0:FFF Implementation Dependent Area */ | 179 | /* 0x0C0:FFF Implementation Dependent Area */ |
161 | 180 | ||
@@ -182,6 +201,10 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; | |||
182 | #define CXL_PSL_SR_An_SF MSR_SF /* 64bit */ | 201 | #define CXL_PSL_SR_An_SF MSR_SF /* 64bit */ |
183 | #define CXL_PSL_SR_An_TA (1ull << (63-1)) /* Tags active, GA1: 0 */ | 202 | #define CXL_PSL_SR_An_TA (1ull << (63-1)) /* Tags active, GA1: 0 */ |
184 | #define CXL_PSL_SR_An_HV MSR_HV /* Hypervisor, GA1: 0 */ | 203 | #define CXL_PSL_SR_An_HV MSR_HV /* Hypervisor, GA1: 0 */ |
204 | #define CXL_PSL_SR_An_XLAT_hpt (0ull << (63-6))/* Hashed page table (HPT) mode */ | ||
205 | #define CXL_PSL_SR_An_XLAT_roh (2ull << (63-6))/* Radix on HPT mode */ | ||
206 | #define CXL_PSL_SR_An_XLAT_ror (3ull << (63-6))/* Radix on Radix mode */ | ||
207 | #define CXL_PSL_SR_An_BOT (1ull << (63-10)) /* Use the in-memory segment table */ | ||
185 | #define CXL_PSL_SR_An_PR MSR_PR /* Problem state, GA1: 1 */ | 208 | #define CXL_PSL_SR_An_PR MSR_PR /* Problem state, GA1: 1 */ |
186 | #define CXL_PSL_SR_An_ISL (1ull << (63-53)) /* Ignore Segment Large Page */ | 209 | #define CXL_PSL_SR_An_ISL (1ull << (63-53)) /* Ignore Segment Large Page */ |
187 | #define CXL_PSL_SR_An_TC (1ull << (63-54)) /* Page Table secondary hash */ | 210 | #define CXL_PSL_SR_An_TC (1ull << (63-54)) /* Page Table secondary hash */ |
@@ -298,12 +321,39 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; | |||
298 | #define CXL_PSL_DSISR_An_S DSISR_ISSTORE /* Access was afu_wr or afu_zero */ | 321 | #define CXL_PSL_DSISR_An_S DSISR_ISSTORE /* Access was afu_wr or afu_zero */ |
299 | #define CXL_PSL_DSISR_An_K DSISR_KEYFAULT /* Access not permitted by virtual page class key protection */ | 322 | #define CXL_PSL_DSISR_An_K DSISR_KEYFAULT /* Access not permitted by virtual page class key protection */ |
300 | 323 | ||
324 | /****** CXL_PSL_DSISR_An - CAIA 2 ****************************************************/ | ||
325 | #define CXL_PSL9_DSISR_An_TF (1ull << (63-3)) /* Translation fault */ | ||
326 | #define CXL_PSL9_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */ | ||
327 | #define CXL_PSL9_DSISR_An_AE (1ull << (63-5)) /* AFU Error */ | ||
328 | #define CXL_PSL9_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */ | ||
329 | #define CXL_PSL9_DSISR_An_S (1ull << (63-38)) /* TF for a write operation */ | ||
330 | #define CXL_PSL9_DSISR_PENDING (CXL_PSL9_DSISR_An_TF | CXL_PSL9_DSISR_An_PE | CXL_PSL9_DSISR_An_AE | CXL_PSL9_DSISR_An_OC) | ||
331 | /* | ||
332 | * NOTE: Bits 56:63 (Checkout Response Status) are valid when DSISR_An[TF] = 1 | ||
333 | * Status (0:7) Encoding | ||
334 | */ | ||
335 | #define CXL_PSL9_DSISR_An_CO_MASK 0x00000000000000ffULL | ||
336 | #define CXL_PSL9_DSISR_An_SF 0x0000000000000080ULL /* Segment Fault 0b10000000 */ | ||
337 | #define CXL_PSL9_DSISR_An_PF_SLR 0x0000000000000088ULL /* PTE not found (Single Level Radix) 0b10001000 */ | ||
338 | #define CXL_PSL9_DSISR_An_PF_RGC 0x000000000000008CULL /* PTE not found (Radix Guest (child)) 0b10001100 */ | ||
339 | #define CXL_PSL9_DSISR_An_PF_RGP 0x0000000000000090ULL /* PTE not found (Radix Guest (parent)) 0b10010000 */ | ||
340 | #define CXL_PSL9_DSISR_An_PF_HRH 0x0000000000000094ULL /* PTE not found (HPT/Radix Host) 0b10010100 */ | ||
341 | #define CXL_PSL9_DSISR_An_PF_STEG 0x000000000000009CULL /* PTE not found (STEG VA) 0b10011100 */ | ||
342 | |||
301 | /****** CXL_PSL_TFC_An ******************************************************/ | 343 | /****** CXL_PSL_TFC_An ******************************************************/ |
302 | #define CXL_PSL_TFC_An_A (1ull << (63-28)) /* Acknowledge non-translation fault */ | 344 | #define CXL_PSL_TFC_An_A (1ull << (63-28)) /* Acknowledge non-translation fault */ |
303 | #define CXL_PSL_TFC_An_C (1ull << (63-29)) /* Continue (abort transaction) */ | 345 | #define CXL_PSL_TFC_An_C (1ull << (63-29)) /* Continue (abort transaction) */ |
304 | #define CXL_PSL_TFC_An_AE (1ull << (63-30)) /* Restart PSL with address error */ | 346 | #define CXL_PSL_TFC_An_AE (1ull << (63-30)) /* Restart PSL with address error */ |
305 | #define CXL_PSL_TFC_An_R (1ull << (63-31)) /* Restart PSL transaction */ | 347 | #define CXL_PSL_TFC_An_R (1ull << (63-31)) /* Restart PSL transaction */ |
306 | 348 | ||
349 | /****** CXL_XSL9_IERAT_ERAT - CAIA 2 **********************************/ | ||
350 | #define CXL_XSL9_IERAT_MLPID (1ull << (63-0)) /* Match LPID */ | ||
351 | #define CXL_XSL9_IERAT_MPID (1ull << (63-1)) /* Match PID */ | ||
352 | #define CXL_XSL9_IERAT_PRS (1ull << (63-4)) /* PRS bit for Radix invalidations */ | ||
353 | #define CXL_XSL9_IERAT_INVR (1ull << (63-3)) /* Invalidate Radix */ | ||
354 | #define CXL_XSL9_IERAT_IALL (1ull << (63-8)) /* Invalidate All */ | ||
355 | #define CXL_XSL9_IERAT_IINPROG (1ull << (63-63)) /* Invalidate in progress */ | ||
356 | |||
307 | /* cxl_process_element->software_status */ | 357 | /* cxl_process_element->software_status */ |
308 | #define CXL_PE_SOFTWARE_STATE_V (1ul << (31 - 0)) /* Valid */ | 358 | #define CXL_PE_SOFTWARE_STATE_V (1ul << (31 - 0)) /* Valid */ |
309 | #define CXL_PE_SOFTWARE_STATE_C (1ul << (31 - 29)) /* Complete */ | 359 | #define CXL_PE_SOFTWARE_STATE_C (1ul << (31 - 29)) /* Complete */ |
@@ -654,25 +704,38 @@ int cxl_pci_reset(struct cxl *adapter); | |||
654 | void cxl_pci_release_afu(struct device *dev); | 704 | void cxl_pci_release_afu(struct device *dev); |
655 | ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len); | 705 | ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len); |
656 | 706 | ||
657 | /* common == phyp + powernv */ | 707 | /* common == phyp + powernv - CAIA 1&2 */ |
658 | struct cxl_process_element_common { | 708 | struct cxl_process_element_common { |
659 | __be32 tid; | 709 | __be32 tid; |
660 | __be32 pid; | 710 | __be32 pid; |
661 | __be64 csrp; | 711 | __be64 csrp; |
662 | __be64 aurp0; | 712 | union { |
663 | __be64 aurp1; | 713 | struct { |
664 | __be64 sstp0; | 714 | __be64 aurp0; |
665 | __be64 sstp1; | 715 | __be64 aurp1; |
716 | __be64 sstp0; | ||
717 | __be64 sstp1; | ||
718 | } psl8; /* CAIA 1 */ | ||
719 | struct { | ||
720 | u8 reserved2[8]; | ||
721 | u8 reserved3[8]; | ||
722 | u8 reserved4[8]; | ||
723 | u8 reserved5[8]; | ||
724 | } psl9; /* CAIA 2 */ | ||
725 | } u; | ||
666 | __be64 amr; | 726 | __be64 amr; |
667 | u8 reserved3[4]; | 727 | u8 reserved6[4]; |
668 | __be64 wed; | 728 | __be64 wed; |
669 | } __packed; | 729 | } __packed; |
670 | 730 | ||
671 | /* just powernv */ | 731 | /* just powernv - CAIA 1&2 */ |
672 | struct cxl_process_element { | 732 | struct cxl_process_element { |
673 | __be64 sr; | 733 | __be64 sr; |
674 | __be64 SPOffset; | 734 | __be64 SPOffset; |
675 | __be64 sdr; | 735 | union { |
736 | __be64 sdr; /* CAIA 1 */ | ||
737 | u8 reserved1[8]; /* CAIA 2 */ | ||
738 | } u; | ||
676 | __be64 haurp; | 739 | __be64 haurp; |
677 | __be32 ctxtime; | 740 | __be32 ctxtime; |
678 | __be16 ivte_offsets[4]; | 741 | __be16 ivte_offsets[4]; |
@@ -761,6 +824,16 @@ static inline bool cxl_is_power8(void) | |||
761 | return false; | 824 | return false; |
762 | } | 825 | } |
763 | 826 | ||
827 | static inline bool cxl_is_power9(void) | ||
828 | { | ||
829 | /* intermediate solution */ | ||
830 | if (!cxl_is_power8() && | ||
831 | (cpu_has_feature(CPU_FTRS_POWER9) || | ||
832 | cpu_has_feature(CPU_FTR_POWER9_DD1))) | ||
833 | return true; | ||
834 | return false; | ||
835 | } | ||
836 | |||
764 | static inline bool cxl_is_psl8(struct cxl_afu *afu) | 837 | static inline bool cxl_is_psl8(struct cxl_afu *afu) |
765 | { | 838 | { |
766 | if (afu->adapter->caia_major == 1) | 839 | if (afu->adapter->caia_major == 1) |
@@ -768,6 +841,13 @@ static inline bool cxl_is_psl8(struct cxl_afu *afu) | |||
768 | return false; | 841 | return false; |
769 | } | 842 | } |
770 | 843 | ||
844 | static inline bool cxl_is_psl9(struct cxl_afu *afu) | ||
845 | { | ||
846 | if (afu->adapter->caia_major == 2) | ||
847 | return true; | ||
848 | return false; | ||
849 | } | ||
850 | |||
771 | ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, | 851 | ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf, |
772 | loff_t off, size_t count); | 852 | loff_t off, size_t count); |
773 | 853 | ||
@@ -794,7 +874,6 @@ int cxl_update_properties(struct device_node *dn, struct property *new_prop); | |||
794 | 874 | ||
795 | void cxl_remove_adapter_nr(struct cxl *adapter); | 875 | void cxl_remove_adapter_nr(struct cxl *adapter); |
796 | 876 | ||
797 | int cxl_alloc_spa(struct cxl_afu *afu); | ||
798 | void cxl_release_spa(struct cxl_afu *afu); | 877 | void cxl_release_spa(struct cxl_afu *afu); |
799 | 878 | ||
800 | dev_t cxl_get_dev(void); | 879 | dev_t cxl_get_dev(void); |
@@ -832,9 +911,13 @@ int afu_register_irqs(struct cxl_context *ctx, u32 count); | |||
832 | void afu_release_irqs(struct cxl_context *ctx, void *cookie); | 911 | void afu_release_irqs(struct cxl_context *ctx, void *cookie); |
833 | void afu_irq_name_free(struct cxl_context *ctx); | 912 | void afu_irq_name_free(struct cxl_context *ctx); |
834 | 913 | ||
914 | int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr); | ||
835 | int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr); | 915 | int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr); |
916 | int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu); | ||
836 | int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu); | 917 | int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu); |
918 | int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr); | ||
837 | int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr); | 919 | int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr); |
920 | void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx); | ||
838 | void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx); | 921 | void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx); |
839 | 922 | ||
840 | #ifdef CONFIG_DEBUG_FS | 923 | #ifdef CONFIG_DEBUG_FS |
@@ -845,9 +928,12 @@ int cxl_debugfs_adapter_add(struct cxl *adapter); | |||
845 | void cxl_debugfs_adapter_remove(struct cxl *adapter); | 928 | void cxl_debugfs_adapter_remove(struct cxl *adapter); |
846 | int cxl_debugfs_afu_add(struct cxl_afu *afu); | 929 | int cxl_debugfs_afu_add(struct cxl_afu *afu); |
847 | void cxl_debugfs_afu_remove(struct cxl_afu *afu); | 930 | void cxl_debugfs_afu_remove(struct cxl_afu *afu); |
931 | void cxl_stop_trace_psl9(struct cxl *cxl); | ||
848 | void cxl_stop_trace_psl8(struct cxl *cxl); | 932 | void cxl_stop_trace_psl8(struct cxl *cxl); |
933 | void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir); | ||
849 | void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir); | 934 | void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir); |
850 | void cxl_debugfs_add_adapter_regs_xsl(struct cxl *adapter, struct dentry *dir); | 935 | void cxl_debugfs_add_adapter_regs_xsl(struct cxl *adapter, struct dentry *dir); |
936 | void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir); | ||
851 | void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir); | 937 | void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir); |
852 | 938 | ||
853 | #else /* CONFIG_DEBUG_FS */ | 939 | #else /* CONFIG_DEBUG_FS */ |
@@ -879,10 +965,19 @@ static inline void cxl_debugfs_afu_remove(struct cxl_afu *afu) | |||
879 | { | 965 | { |
880 | } | 966 | } |
881 | 967 | ||
968 | static inline void cxl_stop_trace_psl9(struct cxl *cxl) | ||
969 | { | ||
970 | } | ||
971 | |||
882 | static inline void cxl_stop_trace_psl8(struct cxl *cxl) | 972 | static inline void cxl_stop_trace_psl8(struct cxl *cxl) |
883 | { | 973 | { |
884 | } | 974 | } |
885 | 975 | ||
976 | static inline void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, | ||
977 | struct dentry *dir) | ||
978 | { | ||
979 | } | ||
980 | |||
886 | static inline void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, | 981 | static inline void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, |
887 | struct dentry *dir) | 982 | struct dentry *dir) |
888 | { | 983 | { |
@@ -893,6 +988,10 @@ static inline void cxl_debugfs_add_adapter_regs_xsl(struct cxl *adapter, | |||
893 | { | 988 | { |
894 | } | 989 | } |
895 | 990 | ||
991 | static inline void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir) | ||
992 | { | ||
993 | } | ||
994 | |||
896 | static inline void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir) | 995 | static inline void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir) |
897 | { | 996 | { |
898 | } | 997 | } |
@@ -938,7 +1037,9 @@ struct cxl_irq_info { | |||
938 | }; | 1037 | }; |
939 | 1038 | ||
940 | void cxl_assign_psn_space(struct cxl_context *ctx); | 1039 | void cxl_assign_psn_space(struct cxl_context *ctx); |
1040 | int cxl_invalidate_all_psl9(struct cxl *adapter); | ||
941 | int cxl_invalidate_all_psl8(struct cxl *adapter); | 1041 | int cxl_invalidate_all_psl8(struct cxl *adapter); |
1042 | irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info); | ||
942 | irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info); | 1043 | irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info); |
943 | irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info); | 1044 | irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info); |
944 | int cxl_register_one_irq(struct cxl *adapter, irq_handler_t handler, | 1045 | int cxl_register_one_irq(struct cxl *adapter, irq_handler_t handler, |
@@ -951,6 +1052,7 @@ int cxl_data_cache_flush(struct cxl *adapter); | |||
951 | int cxl_afu_disable(struct cxl_afu *afu); | 1052 | int cxl_afu_disable(struct cxl_afu *afu); |
952 | int cxl_psl_purge(struct cxl_afu *afu); | 1053 | int cxl_psl_purge(struct cxl_afu *afu); |
953 | 1054 | ||
1055 | void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx); | ||
954 | void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx); | 1056 | void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx); |
955 | void cxl_native_err_irq_dump_regs(struct cxl *adapter); | 1057 | void cxl_native_err_irq_dump_regs(struct cxl *adapter); |
956 | int cxl_pci_vphb_add(struct cxl_afu *afu); | 1058 | int cxl_pci_vphb_add(struct cxl_afu *afu); |
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c index 43a1a27c54d1..eae9d749f967 100644 --- a/drivers/misc/cxl/debugfs.c +++ b/drivers/misc/cxl/debugfs.c | |||
@@ -15,6 +15,12 @@ | |||
15 | 15 | ||
16 | static struct dentry *cxl_debugfs; | 16 | static struct dentry *cxl_debugfs; |
17 | 17 | ||
18 | void cxl_stop_trace_psl9(struct cxl *adapter) | ||
19 | { | ||
20 | /* Stop the trace */ | ||
21 | cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x4480000000000000ULL); | ||
22 | } | ||
23 | |||
18 | void cxl_stop_trace_psl8(struct cxl *adapter) | 24 | void cxl_stop_trace_psl8(struct cxl *adapter) |
19 | { | 25 | { |
20 | int slice; | 26 | int slice; |
@@ -53,6 +59,14 @@ static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode, | |||
53 | (void __force *)value, &fops_io_x64); | 59 | (void __force *)value, &fops_io_x64); |
54 | } | 60 | } |
55 | 61 | ||
62 | void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir) | ||
63 | { | ||
64 | debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR1)); | ||
65 | debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR2)); | ||
66 | debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR_CNTL)); | ||
67 | debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_TRACECFG)); | ||
68 | } | ||
69 | |||
56 | void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir) | 70 | void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir) |
57 | { | 71 | { |
58 | debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1)); | 72 | debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1)); |
@@ -92,6 +106,11 @@ void cxl_debugfs_adapter_remove(struct cxl *adapter) | |||
92 | debugfs_remove_recursive(adapter->debugfs); | 106 | debugfs_remove_recursive(adapter->debugfs); |
93 | } | 107 | } |
94 | 108 | ||
109 | void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir) | ||
110 | { | ||
111 | debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An)); | ||
112 | } | ||
113 | |||
95 | void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir) | 114 | void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir) |
96 | { | 115 | { |
97 | debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An)); | 116 | debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An)); |
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c index e6f8f05446be..5344448f514e 100644 --- a/drivers/misc/cxl/fault.c +++ b/drivers/misc/cxl/fault.c | |||
@@ -146,25 +146,26 @@ static void cxl_handle_page_fault(struct cxl_context *ctx, | |||
146 | return cxl_ack_ae(ctx); | 146 | return cxl_ack_ae(ctx); |
147 | } | 147 | } |
148 | 148 | ||
149 | /* | 149 | if (!radix_enabled()) { |
150 | * update_mmu_cache() will not have loaded the hash since current->trap | 150 | /* |
151 | * is not a 0x400 or 0x300, so just call hash_page_mm() here. | 151 | * update_mmu_cache() will not have loaded the hash since current->trap |
152 | */ | 152 | * is not a 0x400 or 0x300, so just call hash_page_mm() here. |
153 | access = _PAGE_PRESENT | _PAGE_READ; | 153 | */ |
154 | if (dsisr & CXL_PSL_DSISR_An_S) | 154 | access = _PAGE_PRESENT | _PAGE_READ; |
155 | access |= _PAGE_WRITE; | 155 | if (dsisr & CXL_PSL_DSISR_An_S) |
156 | 156 | access |= _PAGE_WRITE; | |
157 | access |= _PAGE_PRIVILEGED; | 157 | |
158 | if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID)) | 158 | access |= _PAGE_PRIVILEGED; |
159 | access &= ~_PAGE_PRIVILEGED; | 159 | if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID)) |
160 | 160 | access &= ~_PAGE_PRIVILEGED; | |
161 | if (dsisr & DSISR_NOHPTE) | 161 | |
162 | inv_flags |= HPTE_NOHPTE_UPDATE; | 162 | if (dsisr & DSISR_NOHPTE) |
163 | 163 | inv_flags |= HPTE_NOHPTE_UPDATE; | |
164 | local_irq_save(flags); | 164 | |
165 | hash_page_mm(mm, dar, access, 0x300, inv_flags); | 165 | local_irq_save(flags); |
166 | local_irq_restore(flags); | 166 | hash_page_mm(mm, dar, access, 0x300, inv_flags); |
167 | 167 | local_irq_restore(flags); | |
168 | } | ||
168 | pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); | 169 | pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); |
169 | cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); | 170 | cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); |
170 | } | 171 | } |
@@ -184,7 +185,28 @@ static struct mm_struct *get_mem_context(struct cxl_context *ctx) | |||
184 | return ctx->mm; | 185 | return ctx->mm; |
185 | } | 186 | } |
186 | 187 | ||
188 | static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr) | ||
189 | { | ||
190 | if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DS)) | ||
191 | return true; | ||
192 | |||
193 | return false; | ||
194 | } | ||
195 | |||
196 | static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr) | ||
197 | { | ||
198 | if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DM)) | ||
199 | return true; | ||
200 | |||
201 | if ((cxl_is_psl9(ctx->afu)) && | ||
202 | ((dsisr & CXL_PSL9_DSISR_An_CO_MASK) & | ||
203 | (CXL_PSL9_DSISR_An_PF_SLR | CXL_PSL9_DSISR_An_PF_RGC | | ||
204 | CXL_PSL9_DSISR_An_PF_RGP | CXL_PSL9_DSISR_An_PF_HRH | | ||
205 | CXL_PSL9_DSISR_An_PF_STEG))) | ||
206 | return true; | ||
187 | 207 | ||
208 | return false; | ||
209 | } | ||
188 | 210 | ||
189 | void cxl_handle_fault(struct work_struct *fault_work) | 211 | void cxl_handle_fault(struct work_struct *fault_work) |
190 | { | 212 | { |
@@ -230,9 +252,9 @@ void cxl_handle_fault(struct work_struct *fault_work) | |||
230 | } | 252 | } |
231 | } | 253 | } |
232 | 254 | ||
233 | if (dsisr & CXL_PSL_DSISR_An_DS) | 255 | if (cxl_is_segment_miss(ctx, dsisr)) |
234 | cxl_handle_segment_miss(ctx, mm, dar); | 256 | cxl_handle_segment_miss(ctx, mm, dar); |
235 | else if (dsisr & CXL_PSL_DSISR_An_DM) | 257 | else if (cxl_is_page_fault(ctx, dsisr)) |
236 | cxl_handle_page_fault(ctx, mm, dsisr, dar); | 258 | cxl_handle_page_fault(ctx, mm, dsisr, dar); |
237 | else | 259 | else |
238 | WARN(1, "cxl_handle_fault has nothing to handle\n"); | 260 | WARN(1, "cxl_handle_fault has nothing to handle\n"); |
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c index 3ad73817b566..f58b4b6c79f2 100644 --- a/drivers/misc/cxl/guest.c +++ b/drivers/misc/cxl/guest.c | |||
@@ -551,13 +551,13 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) | |||
551 | elem->common.tid = cpu_to_be32(0); /* Unused */ | 551 | elem->common.tid = cpu_to_be32(0); /* Unused */ |
552 | elem->common.pid = cpu_to_be32(pid); | 552 | elem->common.pid = cpu_to_be32(pid); |
553 | elem->common.csrp = cpu_to_be64(0); /* disable */ | 553 | elem->common.csrp = cpu_to_be64(0); /* disable */ |
554 | elem->common.aurp0 = cpu_to_be64(0); /* disable */ | 554 | elem->common.u.psl8.aurp0 = cpu_to_be64(0); /* disable */ |
555 | elem->common.aurp1 = cpu_to_be64(0); /* disable */ | 555 | elem->common.u.psl8.aurp1 = cpu_to_be64(0); /* disable */ |
556 | 556 | ||
557 | cxl_prefault(ctx, wed); | 557 | cxl_prefault(ctx, wed); |
558 | 558 | ||
559 | elem->common.sstp0 = cpu_to_be64(ctx->sstp0); | 559 | elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0); |
560 | elem->common.sstp1 = cpu_to_be64(ctx->sstp1); | 560 | elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1); |
561 | 561 | ||
562 | /* | 562 | /* |
563 | * Ensure we have at least one interrupt allocated to take faults for | 563 | * Ensure we have at least one interrupt allocated to take faults for |
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c index fa9f8a2543a1..ce08a9f22308 100644 --- a/drivers/misc/cxl/irq.c +++ b/drivers/misc/cxl/irq.c | |||
@@ -34,6 +34,57 @@ static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 da | |||
34 | return IRQ_HANDLED; | 34 | return IRQ_HANDLED; |
35 | } | 35 | } |
36 | 36 | ||
37 | irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info) | ||
38 | { | ||
39 | u64 dsisr, dar; | ||
40 | |||
41 | dsisr = irq_info->dsisr; | ||
42 | dar = irq_info->dar; | ||
43 | |||
44 | trace_cxl_psl9_irq(ctx, irq, dsisr, dar); | ||
45 | |||
46 | pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); | ||
47 | |||
48 | if (dsisr & CXL_PSL9_DSISR_An_TF) { | ||
49 | pr_devel("CXL interrupt: Scheduling translation fault handling for later (pe: %i)\n", ctx->pe); | ||
50 | return schedule_cxl_fault(ctx, dsisr, dar); | ||
51 | } | ||
52 | |||
53 | if (dsisr & CXL_PSL9_DSISR_An_PE) | ||
54 | return cxl_ops->handle_psl_slice_error(ctx, dsisr, | ||
55 | irq_info->errstat); | ||
56 | if (dsisr & CXL_PSL9_DSISR_An_AE) { | ||
57 | pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err); | ||
58 | |||
59 | if (ctx->pending_afu_err) { | ||
60 | /* | ||
61 | * This shouldn't happen - the PSL treats these errors | ||
62 | * as fatal and will have reset the AFU, so there's not | ||
63 | * much point buffering multiple AFU errors. | ||
64 | * OTOH if we DO ever see a storm of these come in it's | ||
65 | * probably best that we log them somewhere: | ||
66 | */ | ||
67 | dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error undelivered to pe %i: 0x%016llx\n", | ||
68 | ctx->pe, irq_info->afu_err); | ||
69 | } else { | ||
70 | spin_lock(&ctx->lock); | ||
71 | ctx->afu_err = irq_info->afu_err; | ||
72 | ctx->pending_afu_err = 1; | ||
73 | spin_unlock(&ctx->lock); | ||
74 | |||
75 | wake_up_all(&ctx->wq); | ||
76 | } | ||
77 | |||
78 | cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0); | ||
79 | return IRQ_HANDLED; | ||
80 | } | ||
81 | if (dsisr & CXL_PSL9_DSISR_An_OC) | ||
82 | pr_devel("CXL interrupt: OS Context Warning\n"); | ||
83 | |||
84 | WARN(1, "Unhandled CXL PSL IRQ\n"); | ||
85 | return IRQ_HANDLED; | ||
86 | } | ||
87 | |||
37 | irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info) | 88 | irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info) |
38 | { | 89 | { |
39 | u64 dsisr, dar; | 90 | u64 dsisr, dar; |
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 193769f56075..194c58ec83b8 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c | |||
@@ -120,6 +120,7 @@ int cxl_psl_purge(struct cxl_afu *afu) | |||
120 | u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); | 120 | u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); |
121 | u64 dsisr, dar; | 121 | u64 dsisr, dar; |
122 | u64 start, end; | 122 | u64 start, end; |
123 | u64 trans_fault = 0x0ULL; | ||
123 | unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); | 124 | unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); |
124 | int rc = 0; | 125 | int rc = 0; |
125 | 126 | ||
@@ -127,6 +128,11 @@ int cxl_psl_purge(struct cxl_afu *afu) | |||
127 | 128 | ||
128 | pr_devel("PSL purge request\n"); | 129 | pr_devel("PSL purge request\n"); |
129 | 130 | ||
131 | if (cxl_is_psl8(afu)) | ||
132 | trans_fault = CXL_PSL_DSISR_TRANS; | ||
133 | if (cxl_is_psl9(afu)) | ||
134 | trans_fault = CXL_PSL9_DSISR_An_TF; | ||
135 | |||
130 | if (!cxl_ops->link_ok(afu->adapter, afu)) { | 136 | if (!cxl_ops->link_ok(afu->adapter, afu)) { |
131 | dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n"); | 137 | dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n"); |
132 | rc = -EIO; | 138 | rc = -EIO; |
@@ -158,7 +164,7 @@ int cxl_psl_purge(struct cxl_afu *afu) | |||
158 | pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n", | 164 | pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n", |
159 | PSL_CNTL, dsisr); | 165 | PSL_CNTL, dsisr); |
160 | 166 | ||
161 | if (dsisr & CXL_PSL_DSISR_TRANS) { | 167 | if (dsisr & trans_fault) { |
162 | dar = cxl_p2n_read(afu, CXL_PSL_DAR_An); | 168 | dar = cxl_p2n_read(afu, CXL_PSL_DAR_An); |
163 | dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", | 169 | dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", |
164 | dsisr, dar); | 170 | dsisr, dar); |
@@ -200,7 +206,7 @@ static int spa_max_procs(int spa_size) | |||
200 | return ((spa_size / 8) - 96) / 17; | 206 | return ((spa_size / 8) - 96) / 17; |
201 | } | 207 | } |
202 | 208 | ||
203 | int cxl_alloc_spa(struct cxl_afu *afu) | 209 | static int cxl_alloc_spa(struct cxl_afu *afu, int mode) |
204 | { | 210 | { |
205 | unsigned spa_size; | 211 | unsigned spa_size; |
206 | 212 | ||
@@ -213,7 +219,8 @@ int cxl_alloc_spa(struct cxl_afu *afu) | |||
213 | if (spa_size > 0x100000) { | 219 | if (spa_size > 0x100000) { |
214 | dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n", | 220 | dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n", |
215 | afu->native->spa_max_procs, afu->native->spa_size); | 221 | afu->native->spa_max_procs, afu->native->spa_size); |
216 | afu->num_procs = afu->native->spa_max_procs; | 222 | if (mode != CXL_MODE_DEDICATED) |
223 | afu->num_procs = afu->native->spa_max_procs; | ||
217 | break; | 224 | break; |
218 | } | 225 | } |
219 | 226 | ||
@@ -262,6 +269,36 @@ void cxl_release_spa(struct cxl_afu *afu) | |||
262 | } | 269 | } |
263 | } | 270 | } |
264 | 271 | ||
272 | /* | ||
273 | * Invalidation of all ERAT entries is no longer required by CAIA2. Use | ||
274 | * only for debug. | ||
275 | */ | ||
276 | int cxl_invalidate_all_psl9(struct cxl *adapter) | ||
277 | { | ||
278 | unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); | ||
279 | u64 ierat; | ||
280 | |||
281 | pr_devel("CXL adapter - invalidation of all ERAT entries\n"); | ||
282 | |||
283 | /* Invalidates all ERAT entries for Radix or HPT */ | ||
284 | ierat = CXL_XSL9_IERAT_IALL; | ||
285 | if (radix_enabled()) | ||
286 | ierat |= CXL_XSL9_IERAT_INVR; | ||
287 | cxl_p1_write(adapter, CXL_XSL9_IERAT, ierat); | ||
288 | |||
289 | while (cxl_p1_read(adapter, CXL_XSL9_IERAT) & CXL_XSL9_IERAT_IINPROG) { | ||
290 | if (time_after_eq(jiffies, timeout)) { | ||
291 | dev_warn(&adapter->dev, | ||
292 | "WARNING: CXL adapter invalidation of all ERAT entries timed out!\n"); | ||
293 | return -EBUSY; | ||
294 | } | ||
295 | if (!cxl_ops->link_ok(adapter, NULL)) | ||
296 | return -EIO; | ||
297 | cpu_relax(); | ||
298 | } | ||
299 | return 0; | ||
300 | } | ||
301 | |||
265 | int cxl_invalidate_all_psl8(struct cxl *adapter) | 302 | int cxl_invalidate_all_psl8(struct cxl *adapter) |
266 | { | 303 | { |
267 | unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); | 304 | unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); |
@@ -498,7 +535,7 @@ static int activate_afu_directed(struct cxl_afu *afu) | |||
498 | 535 | ||
499 | afu->num_procs = afu->max_procs_virtualised; | 536 | afu->num_procs = afu->max_procs_virtualised; |
500 | if (afu->native->spa == NULL) { | 537 | if (afu->native->spa == NULL) { |
501 | if (cxl_alloc_spa(afu)) | 538 | if (cxl_alloc_spa(afu, CXL_MODE_DIRECTED)) |
502 | return -ENOMEM; | 539 | return -ENOMEM; |
503 | } | 540 | } |
504 | attach_spa(afu); | 541 | attach_spa(afu); |
@@ -548,10 +585,19 @@ static u64 calculate_sr(struct cxl_context *ctx) | |||
548 | sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV; | 585 | sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV; |
549 | } else { | 586 | } else { |
550 | sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R; | 587 | sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R; |
551 | sr &= ~(CXL_PSL_SR_An_HV); | 588 | if (radix_enabled()) |
589 | sr |= CXL_PSL_SR_An_HV; | ||
590 | else | ||
591 | sr &= ~(CXL_PSL_SR_An_HV); | ||
552 | if (!test_tsk_thread_flag(current, TIF_32BIT)) | 592 | if (!test_tsk_thread_flag(current, TIF_32BIT)) |
553 | sr |= CXL_PSL_SR_An_SF; | 593 | sr |= CXL_PSL_SR_An_SF; |
554 | } | 594 | } |
595 | if (cxl_is_psl9(ctx->afu)) { | ||
596 | if (radix_enabled()) | ||
597 | sr |= CXL_PSL_SR_An_XLAT_ror; | ||
598 | else | ||
599 | sr |= CXL_PSL_SR_An_XLAT_hpt; | ||
600 | } | ||
555 | return sr; | 601 | return sr; |
556 | } | 602 | } |
557 | 603 | ||
@@ -584,6 +630,70 @@ static void update_ivtes_directed(struct cxl_context *ctx) | |||
584 | WARN_ON(add_process_element(ctx)); | 630 | WARN_ON(add_process_element(ctx)); |
585 | } | 631 | } |
586 | 632 | ||
633 | static int process_element_entry_psl9(struct cxl_context *ctx, u64 wed, u64 amr) | ||
634 | { | ||
635 | u32 pid; | ||
636 | |||
637 | cxl_assign_psn_space(ctx); | ||
638 | |||
639 | ctx->elem->ctxtime = 0; /* disable */ | ||
640 | ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); | ||
641 | ctx->elem->haurp = 0; /* disable */ | ||
642 | |||
643 | if (ctx->kernel) | ||
644 | pid = 0; | ||
645 | else { | ||
646 | if (ctx->mm == NULL) { | ||
647 | pr_devel("%s: unable to get mm for pe=%d pid=%i\n", | ||
648 | __func__, ctx->pe, pid_nr(ctx->pid)); | ||
649 | return -EINVAL; | ||
650 | } | ||
651 | pid = ctx->mm->context.id; | ||
652 | } | ||
653 | |||
654 | ctx->elem->common.tid = 0; | ||
655 | ctx->elem->common.pid = cpu_to_be32(pid); | ||
656 | |||
657 | ctx->elem->sr = cpu_to_be64(calculate_sr(ctx)); | ||
658 | |||
659 | ctx->elem->common.csrp = 0; /* disable */ | ||
660 | |||
661 | cxl_prefault(ctx, wed); | ||
662 | |||
663 | /* | ||
664 | * Ensure we have the multiplexed PSL interrupt set up to take faults | ||
665 | * for kernel contexts that may not have allocated any AFU IRQs at all: | ||
666 | */ | ||
667 | if (ctx->irqs.range[0] == 0) { | ||
668 | ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; | ||
669 | ctx->irqs.range[0] = 1; | ||
670 | } | ||
671 | |||
672 | ctx->elem->common.amr = cpu_to_be64(amr); | ||
673 | ctx->elem->common.wed = cpu_to_be64(wed); | ||
674 | |||
675 | return 0; | ||
676 | } | ||
677 | |||
678 | int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr) | ||
679 | { | ||
680 | int result; | ||
681 | |||
682 | /* fill the process element entry */ | ||
683 | result = process_element_entry_psl9(ctx, wed, amr); | ||
684 | if (result) | ||
685 | return result; | ||
686 | |||
687 | update_ivtes_directed(ctx); | ||
688 | |||
689 | /* first guy needs to enable */ | ||
690 | result = cxl_ops->afu_check_and_enable(ctx->afu); | ||
691 | if (result) | ||
692 | return result; | ||
693 | |||
694 | return add_process_element(ctx); | ||
695 | } | ||
696 | |||
587 | int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr) | 697 | int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr) |
588 | { | 698 | { |
589 | u32 pid; | 699 | u32 pid; |
@@ -594,7 +704,7 @@ int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr) | |||
594 | ctx->elem->ctxtime = 0; /* disable */ | 704 | ctx->elem->ctxtime = 0; /* disable */ |
595 | ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); | 705 | ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); |
596 | ctx->elem->haurp = 0; /* disable */ | 706 | ctx->elem->haurp = 0; /* disable */ |
597 | ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1)); | 707 | ctx->elem->u.sdr = cpu_to_be64(mfspr(SPRN_SDR1)); |
598 | 708 | ||
599 | pid = current->pid; | 709 | pid = current->pid; |
600 | if (ctx->kernel) | 710 | if (ctx->kernel) |
@@ -605,13 +715,13 @@ int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr) | |||
605 | ctx->elem->sr = cpu_to_be64(calculate_sr(ctx)); | 715 | ctx->elem->sr = cpu_to_be64(calculate_sr(ctx)); |
606 | 716 | ||
607 | ctx->elem->common.csrp = 0; /* disable */ | 717 | ctx->elem->common.csrp = 0; /* disable */ |
608 | ctx->elem->common.aurp0 = 0; /* disable */ | 718 | ctx->elem->common.u.psl8.aurp0 = 0; /* disable */ |
609 | ctx->elem->common.aurp1 = 0; /* disable */ | 719 | ctx->elem->common.u.psl8.aurp1 = 0; /* disable */ |
610 | 720 | ||
611 | cxl_prefault(ctx, wed); | 721 | cxl_prefault(ctx, wed); |
612 | 722 | ||
613 | ctx->elem->common.sstp0 = cpu_to_be64(ctx->sstp0); | 723 | ctx->elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0); |
614 | ctx->elem->common.sstp1 = cpu_to_be64(ctx->sstp1); | 724 | ctx->elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1); |
615 | 725 | ||
616 | /* | 726 | /* |
617 | * Ensure we have the multiplexed PSL interrupt set up to take faults | 727 | * Ensure we have the multiplexed PSL interrupt set up to take faults |
@@ -677,6 +787,32 @@ static int deactivate_afu_directed(struct cxl_afu *afu) | |||
677 | return 0; | 787 | return 0; |
678 | } | 788 | } |
679 | 789 | ||
790 | int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu) | ||
791 | { | ||
792 | dev_info(&afu->dev, "Activating dedicated process mode\n"); | ||
793 | |||
794 | /* | ||
795 | * If XSL is set to dedicated mode (Set in PSL_SCNTL reg), the | ||
796 | * XSL and AFU are programmed to work with a single context. | ||
797 | * The context information should be configured in the SPA area | ||
798 | * index 0 (so PSL_SPAP must be configured before enabling the | ||
799 | * AFU). | ||
800 | */ | ||
801 | afu->num_procs = 1; | ||
802 | if (afu->native->spa == NULL) { | ||
803 | if (cxl_alloc_spa(afu, CXL_MODE_DEDICATED)) | ||
804 | return -ENOMEM; | ||
805 | } | ||
806 | attach_spa(afu); | ||
807 | |||
808 | cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process); | ||
809 | cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L); | ||
810 | |||
811 | afu->current_mode = CXL_MODE_DEDICATED; | ||
812 | |||
813 | return cxl_chardev_d_afu_add(afu); | ||
814 | } | ||
815 | |||
680 | int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu) | 816 | int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu) |
681 | { | 817 | { |
682 | dev_info(&afu->dev, "Activating dedicated process mode\n"); | 818 | dev_info(&afu->dev, "Activating dedicated process mode\n"); |
@@ -700,6 +836,16 @@ int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu) | |||
700 | return cxl_chardev_d_afu_add(afu); | 836 | return cxl_chardev_d_afu_add(afu); |
701 | } | 837 | } |
702 | 838 | ||
839 | void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx) | ||
840 | { | ||
841 | int r; | ||
842 | |||
843 | for (r = 0; r < CXL_IRQ_RANGES; r++) { | ||
844 | ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); | ||
845 | ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); | ||
846 | } | ||
847 | } | ||
848 | |||
703 | void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx) | 849 | void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx) |
704 | { | 850 | { |
705 | struct cxl_afu *afu = ctx->afu; | 851 | struct cxl_afu *afu = ctx->afu; |
@@ -716,6 +862,26 @@ void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx) | |||
716 | ((u64)ctx->irqs.range[3] & 0xffff)); | 862 | ((u64)ctx->irqs.range[3] & 0xffff)); |
717 | } | 863 | } |
718 | 864 | ||
865 | int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr) | ||
866 | { | ||
867 | struct cxl_afu *afu = ctx->afu; | ||
868 | int result; | ||
869 | |||
870 | /* fill the process element entry */ | ||
871 | result = process_element_entry_psl9(ctx, wed, amr); | ||
872 | if (result) | ||
873 | return result; | ||
874 | |||
875 | if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes) | ||
876 | afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); | ||
877 | |||
878 | result = cxl_ops->afu_reset(afu); | ||
879 | if (result) | ||
880 | return result; | ||
881 | |||
882 | return afu_enable(afu); | ||
883 | } | ||
884 | |||
719 | int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr) | 885 | int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr) |
720 | { | 886 | { |
721 | struct cxl_afu *afu = ctx->afu; | 887 | struct cxl_afu *afu = ctx->afu; |
@@ -887,6 +1053,21 @@ static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info) | |||
887 | return 0; | 1053 | return 0; |
888 | } | 1054 | } |
889 | 1055 | ||
1056 | void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx) | ||
1057 | { | ||
1058 | u64 fir1, fir2, serr; | ||
1059 | |||
1060 | fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1); | ||
1061 | fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR2); | ||
1062 | |||
1063 | dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); | ||
1064 | dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2); | ||
1065 | if (ctx->afu->adapter->native->sl_ops->register_serr_irq) { | ||
1066 | serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); | ||
1067 | cxl_afu_decode_psl_serr(ctx->afu, serr); | ||
1068 | } | ||
1069 | } | ||
1070 | |||
890 | void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx) | 1071 | void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx) |
891 | { | 1072 | { |
892 | u64 fir1, fir2, fir_slice, serr, afu_debug; | 1073 | u64 fir1, fir2, fir_slice, serr, afu_debug; |
@@ -923,9 +1104,20 @@ static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx, | |||
923 | return cxl_ops->ack_irq(ctx, 0, errstat); | 1104 | return cxl_ops->ack_irq(ctx, 0, errstat); |
924 | } | 1105 | } |
925 | 1106 | ||
1107 | static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr) | ||
1108 | { | ||
1109 | if ((cxl_is_psl8(afu)) && (dsisr & CXL_PSL_DSISR_TRANS)) | ||
1110 | return true; | ||
1111 | |||
1112 | if ((cxl_is_psl9(afu)) && (dsisr & CXL_PSL9_DSISR_An_TF)) | ||
1113 | return true; | ||
1114 | |||
1115 | return false; | ||
1116 | } | ||
1117 | |||
926 | irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info) | 1118 | irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info) |
927 | { | 1119 | { |
928 | if (irq_info->dsisr & CXL_PSL_DSISR_TRANS) | 1120 | if (cxl_is_translation_fault(afu, irq_info->dsisr)) |
929 | cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); | 1121 | cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); |
930 | else | 1122 | else |
931 | cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); | 1123 | cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); |
@@ -994,6 +1186,9 @@ static void native_irq_wait(struct cxl_context *ctx) | |||
994 | if (cxl_is_psl8(ctx->afu) && | 1186 | if (cxl_is_psl8(ctx->afu) && |
995 | ((dsisr & CXL_PSL_DSISR_PENDING) == 0)) | 1187 | ((dsisr & CXL_PSL_DSISR_PENDING) == 0)) |
996 | return; | 1188 | return; |
1189 | if (cxl_is_psl9(ctx->afu) && | ||
1190 | ((dsisr & CXL_PSL9_DSISR_PENDING) == 0)) | ||
1191 | return; | ||
997 | /* | 1192 | /* |
998 | * We are waiting for the workqueue to process our | 1193 | * We are waiting for the workqueue to process our |
999 | * irq, so need to let that run here. | 1194 | * irq, so need to let that run here. |
@@ -1122,6 +1317,13 @@ int cxl_native_register_serr_irq(struct cxl_afu *afu) | |||
1122 | serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); | 1317 | serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); |
1123 | if (cxl_is_power8()) | 1318 | if (cxl_is_power8()) |
1124 | serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); | 1319 | serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); |
1320 | if (cxl_is_power9()) { | ||
1321 | /* | ||
1322 | * By default, all errors are masked. So don't set all masks. | ||
1323 | * Slice errors will be transfered. | ||
1324 | */ | ||
1325 | serr = (serr & ~0xff0000007fffffffULL) | (afu->serr_hwirq & 0xffff); | ||
1326 | } | ||
1125 | cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); | 1327 | cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); |
1126 | 1328 | ||
1127 | return 0; | 1329 | return 0; |
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 4a1b407ca514..976956f3fc9e 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c | |||
@@ -60,7 +60,7 @@ | |||
60 | #define CXL_VSEC_PROTOCOL_MASK 0xe0 | 60 | #define CXL_VSEC_PROTOCOL_MASK 0xe0 |
61 | #define CXL_VSEC_PROTOCOL_1024TB 0x80 | 61 | #define CXL_VSEC_PROTOCOL_1024TB 0x80 |
62 | #define CXL_VSEC_PROTOCOL_512TB 0x40 | 62 | #define CXL_VSEC_PROTOCOL_512TB 0x40 |
63 | #define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8 uses this */ | 63 | #define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8/9 uses this */ |
64 | #define CXL_VSEC_PROTOCOL_ENABLE 0x01 | 64 | #define CXL_VSEC_PROTOCOL_ENABLE 0x01 |
65 | 65 | ||
66 | #define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \ | 66 | #define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \ |
@@ -326,14 +326,18 @@ static void dump_afu_descriptor(struct cxl_afu *afu) | |||
326 | 326 | ||
327 | #define P8_CAPP_UNIT0_ID 0xBA | 327 | #define P8_CAPP_UNIT0_ID 0xBA |
328 | #define P8_CAPP_UNIT1_ID 0XBE | 328 | #define P8_CAPP_UNIT1_ID 0XBE |
329 | #define P9_CAPP_UNIT0_ID 0xC0 | ||
330 | #define P9_CAPP_UNIT1_ID 0xE0 | ||
329 | 331 | ||
330 | static u64 get_capp_unit_id(struct device_node *np) | 332 | static int get_phb_index(struct device_node *np, u32 *phb_index) |
331 | { | 333 | { |
332 | u32 phb_index; | 334 | if (of_property_read_u32(np, "ibm,phb-index", phb_index)) |
333 | 335 | return -ENODEV; | |
334 | if (of_property_read_u32(np, "ibm,phb-index", &phb_index)) | 336 | return 0; |
335 | return 0; | 337 | } |
336 | 338 | ||
339 | static u64 get_capp_unit_id(struct device_node *np, u32 phb_index) | ||
340 | { | ||
337 | /* | 341 | /* |
338 | * POWER 8: | 342 | * POWER 8: |
339 | * - For chips other than POWER8NVL, we only have CAPP 0, | 343 | * - For chips other than POWER8NVL, we only have CAPP 0, |
@@ -352,11 +356,27 @@ static u64 get_capp_unit_id(struct device_node *np) | |||
352 | return P8_CAPP_UNIT1_ID; | 356 | return P8_CAPP_UNIT1_ID; |
353 | } | 357 | } |
354 | 358 | ||
359 | /* | ||
360 | * POWER 9: | ||
361 | * PEC0 (PHB0). Capp ID = CAPP0 (0b1100_0000) | ||
362 | * PEC1 (PHB1 - PHB2). No capi mode | ||
363 | * PEC2 (PHB3 - PHB4 - PHB5): Capi mode on PHB3 only. Capp ID = CAPP1 (0b1110_0000) | ||
364 | */ | ||
365 | if (cxl_is_power9()) { | ||
366 | if (phb_index == 0) | ||
367 | return P9_CAPP_UNIT0_ID; | ||
368 | |||
369 | if (phb_index == 3) | ||
370 | return P9_CAPP_UNIT1_ID; | ||
371 | } | ||
372 | |||
355 | return 0; | 373 | return 0; |
356 | } | 374 | } |
357 | 375 | ||
358 | static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id) | 376 | static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, |
377 | u32 *phb_index, u64 *capp_unit_id) | ||
359 | { | 378 | { |
379 | int rc; | ||
360 | struct device_node *np; | 380 | struct device_node *np; |
361 | const __be32 *prop; | 381 | const __be32 *prop; |
362 | 382 | ||
@@ -367,8 +387,16 @@ static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id | |||
367 | np = of_get_next_parent(np); | 387 | np = of_get_next_parent(np); |
368 | if (!np) | 388 | if (!np) |
369 | return -ENODEV; | 389 | return -ENODEV; |
390 | |||
370 | *chipid = be32_to_cpup(prop); | 391 | *chipid = be32_to_cpup(prop); |
371 | *capp_unit_id = get_capp_unit_id(np); | 392 | |
393 | rc = get_phb_index(np, phb_index); | ||
394 | if (rc) { | ||
395 | pr_err("cxl: invalid phb index\n"); | ||
396 | return rc; | ||
397 | } | ||
398 | |||
399 | *capp_unit_id = get_capp_unit_id(np, *phb_index); | ||
372 | of_node_put(np); | 400 | of_node_put(np); |
373 | if (!*capp_unit_id) { | 401 | if (!*capp_unit_id) { |
374 | pr_err("cxl: invalid capp unit id\n"); | 402 | pr_err("cxl: invalid capp unit id\n"); |
@@ -378,14 +406,104 @@ static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id | |||
378 | return 0; | 406 | return 0; |
379 | } | 407 | } |
380 | 408 | ||
409 | static int init_implementation_adapter_regs_psl9(struct cxl *adapter, struct pci_dev *dev) | ||
410 | { | ||
411 | u64 xsl_dsnctl, psl_fircntl; | ||
412 | u64 chipid; | ||
413 | u32 phb_index; | ||
414 | u64 capp_unit_id; | ||
415 | int rc; | ||
416 | |||
417 | rc = calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id); | ||
418 | if (rc) | ||
419 | return rc; | ||
420 | |||
421 | /* | ||
422 | * CAPI Identifier bits [0:7] | ||
423 | * bit 61:60 MSI bits --> 0 | ||
424 | * bit 59 TVT selector --> 0 | ||
425 | */ | ||
426 | |||
427 | /* | ||
428 | * Tell XSL where to route data to. | ||
429 | * The field chipid should match the PHB CAPI_CMPM register | ||
430 | */ | ||
431 | xsl_dsnctl = ((u64)0x2 << (63-7)); /* Bit 57 */ | ||
432 | xsl_dsnctl |= (capp_unit_id << (63-15)); | ||
433 | |||
434 | /* nMMU_ID Defaults to: b’000001001’*/ | ||
435 | xsl_dsnctl |= ((u64)0x09 << (63-28)); | ||
436 | |||
437 | if (cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1)) { | ||
438 | /* | ||
439 | * Used to identify CAPI packets which should be sorted into | ||
440 | * the Non-Blocking queues by the PHB. This field should match | ||
441 | * the PHB PBL_NBW_CMPM register | ||
442 | * nbwind=0x03, bits [57:58], must include capi indicator. | ||
443 | * Not supported on P9 DD1. | ||
444 | */ | ||
445 | xsl_dsnctl |= ((u64)0x03 << (63-47)); | ||
446 | |||
447 | /* | ||
448 | * Upper 16b address bits of ASB_Notify messages sent to the | ||
449 | * system. Need to match the PHB’s ASN Compare/Mask Register. | ||
450 | * Not supported on P9 DD1. | ||
451 | */ | ||
452 | xsl_dsnctl |= ((u64)0x04 << (63-55)); | ||
453 | } | ||
454 | |||
455 | cxl_p1_write(adapter, CXL_XSL9_DSNCTL, xsl_dsnctl); | ||
456 | |||
457 | /* Set fir_cntl to recommended value for production env */ | ||
458 | psl_fircntl = (0x2ULL << (63-3)); /* ce_report */ | ||
459 | psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */ | ||
460 | psl_fircntl |= 0x1ULL; /* ce_thresh */ | ||
461 | cxl_p1_write(adapter, CXL_PSL9_FIR_CNTL, psl_fircntl); | ||
462 | |||
463 | /* vccredits=0x1 pcklat=0x4 */ | ||
464 | cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0000000000001810ULL); | ||
465 | |||
466 | /* | ||
467 | * For debugging with trace arrays. | ||
468 | * Configure RX trace 0 segmented mode. | ||
469 | * Configure CT trace 0 segmented mode. | ||
470 | * Configure LA0 trace 0 segmented mode. | ||
471 | * Configure LA1 trace 0 segmented mode. | ||
472 | */ | ||
473 | cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x8040800080000000ULL); | ||
474 | cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x8040800080000003ULL); | ||
475 | cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x8040800080000005ULL); | ||
476 | cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x8040800080000006ULL); | ||
477 | |||
478 | /* | ||
479 | * A response to an ASB_Notify request is returned by the | ||
480 | * system as an MMIO write to the address defined in | ||
481 | * the PSL_TNR_ADDR register | ||
482 | */ | ||
483 | /* PSL_TNR_ADDR */ | ||
484 | |||
485 | /* NORST */ | ||
486 | cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0x8000000000000000ULL); | ||
487 | |||
488 | /* allocate the apc machines */ | ||
489 | cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000003FFFF0000ULL); | ||
490 | |||
491 | /* Disable vc dd1 fix */ | ||
492 | if ((cxl_is_power9() && cpu_has_feature(CPU_FTR_POWER9_DD1))) | ||
493 | cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0400000000000001ULL); | ||
494 | |||
495 | return 0; | ||
496 | } | ||
497 | |||
381 | static int init_implementation_adapter_regs_psl8(struct cxl *adapter, struct pci_dev *dev) | 498 | static int init_implementation_adapter_regs_psl8(struct cxl *adapter, struct pci_dev *dev) |
382 | { | 499 | { |
383 | u64 psl_dsnctl, psl_fircntl; | 500 | u64 psl_dsnctl, psl_fircntl; |
384 | u64 chipid; | 501 | u64 chipid; |
502 | u32 phb_index; | ||
385 | u64 capp_unit_id; | 503 | u64 capp_unit_id; |
386 | int rc; | 504 | int rc; |
387 | 505 | ||
388 | rc = calc_capp_routing(dev, &chipid, &capp_unit_id); | 506 | rc = calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id); |
389 | if (rc) | 507 | if (rc) |
390 | return rc; | 508 | return rc; |
391 | 509 | ||
@@ -414,10 +532,11 @@ static int init_implementation_adapter_regs_xsl(struct cxl *adapter, struct pci_ | |||
414 | { | 532 | { |
415 | u64 xsl_dsnctl; | 533 | u64 xsl_dsnctl; |
416 | u64 chipid; | 534 | u64 chipid; |
535 | u32 phb_index; | ||
417 | u64 capp_unit_id; | 536 | u64 capp_unit_id; |
418 | int rc; | 537 | int rc; |
419 | 538 | ||
420 | rc = calc_capp_routing(dev, &chipid, &capp_unit_id); | 539 | rc = calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id); |
421 | if (rc) | 540 | if (rc) |
422 | return rc; | 541 | return rc; |
423 | 542 | ||
@@ -435,6 +554,12 @@ static int init_implementation_adapter_regs_xsl(struct cxl *adapter, struct pci_ | |||
435 | /* For the PSL this is a multiple for 0 < n <= 7: */ | 554 | /* For the PSL this is a multiple for 0 < n <= 7: */ |
436 | #define PSL_2048_250MHZ_CYCLES 1 | 555 | #define PSL_2048_250MHZ_CYCLES 1 |
437 | 556 | ||
557 | static void write_timebase_ctrl_psl9(struct cxl *adapter) | ||
558 | { | ||
559 | cxl_p1_write(adapter, CXL_PSL9_TB_CTLSTAT, | ||
560 | TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES)); | ||
561 | } | ||
562 | |||
438 | static void write_timebase_ctrl_psl8(struct cxl *adapter) | 563 | static void write_timebase_ctrl_psl8(struct cxl *adapter) |
439 | { | 564 | { |
440 | cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT, | 565 | cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT, |
@@ -456,6 +581,11 @@ static void write_timebase_ctrl_xsl(struct cxl *adapter) | |||
456 | TBSYNC_CNT(XSL_4000_CLOCKS)); | 581 | TBSYNC_CNT(XSL_4000_CLOCKS)); |
457 | } | 582 | } |
458 | 583 | ||
584 | static u64 timebase_read_psl9(struct cxl *adapter) | ||
585 | { | ||
586 | return cxl_p1_read(adapter, CXL_PSL9_Timebase); | ||
587 | } | ||
588 | |||
459 | static u64 timebase_read_psl8(struct cxl *adapter) | 589 | static u64 timebase_read_psl8(struct cxl *adapter) |
460 | { | 590 | { |
461 | return cxl_p1_read(adapter, CXL_PSL_Timebase); | 591 | return cxl_p1_read(adapter, CXL_PSL_Timebase); |
@@ -514,6 +644,11 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) | |||
514 | return; | 644 | return; |
515 | } | 645 | } |
516 | 646 | ||
647 | static int init_implementation_afu_regs_psl9(struct cxl_afu *afu) | ||
648 | { | ||
649 | return 0; | ||
650 | } | ||
651 | |||
517 | static int init_implementation_afu_regs_psl8(struct cxl_afu *afu) | 652 | static int init_implementation_afu_regs_psl8(struct cxl_afu *afu) |
518 | { | 653 | { |
519 | /* read/write masks for this slice */ | 654 | /* read/write masks for this slice */ |
@@ -612,7 +747,7 @@ static int setup_cxl_bars(struct pci_dev *dev) | |||
612 | /* | 747 | /* |
613 | * BAR 4/5 has a special meaning for CXL and must be programmed with a | 748 | * BAR 4/5 has a special meaning for CXL and must be programmed with a |
614 | * special value corresponding to the CXL protocol address range. | 749 | * special value corresponding to the CXL protocol address range. |
615 | * For POWER 8 that means bits 48:49 must be set to 10 | 750 | * For POWER 8/9 that means bits 48:49 must be set to 10 |
616 | */ | 751 | */ |
617 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000); | 752 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000); |
618 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000); | 753 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000); |
@@ -997,6 +1132,52 @@ static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu) | |||
997 | return 0; | 1132 | return 0; |
998 | } | 1133 | } |
999 | 1134 | ||
1135 | static int sanitise_afu_regs_psl9(struct cxl_afu *afu) | ||
1136 | { | ||
1137 | u64 reg; | ||
1138 | |||
1139 | /* | ||
1140 | * Clear out any regs that contain either an IVTE or address or may be | ||
1141 | * waiting on an acknowledgment to try to be a bit safer as we bring | ||
1142 | * it online | ||
1143 | */ | ||
1144 | reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An); | ||
1145 | if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { | ||
1146 | dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg); | ||
1147 | if (cxl_ops->afu_reset(afu)) | ||
1148 | return -EIO; | ||
1149 | if (cxl_afu_disable(afu)) | ||
1150 | return -EIO; | ||
1151 | if (cxl_psl_purge(afu)) | ||
1152 | return -EIO; | ||
1153 | } | ||
1154 | cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000); | ||
1155 | cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000); | ||
1156 | reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An); | ||
1157 | if (reg) { | ||
1158 | dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg); | ||
1159 | if (reg & CXL_PSL9_DSISR_An_TF) | ||
1160 | cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); | ||
1161 | else | ||
1162 | cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); | ||
1163 | } | ||
1164 | if (afu->adapter->native->sl_ops->register_serr_irq) { | ||
1165 | reg = cxl_p1n_read(afu, CXL_PSL_SERR_An); | ||
1166 | if (reg) { | ||
1167 | if (reg & ~0x000000007fffffff) | ||
1168 | dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg); | ||
1169 | cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff); | ||
1170 | } | ||
1171 | } | ||
1172 | reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); | ||
1173 | if (reg) { | ||
1174 | dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg); | ||
1175 | cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg); | ||
1176 | } | ||
1177 | |||
1178 | return 0; | ||
1179 | } | ||
1180 | |||
1000 | static int sanitise_afu_regs_psl8(struct cxl_afu *afu) | 1181 | static int sanitise_afu_regs_psl8(struct cxl_afu *afu) |
1001 | { | 1182 | { |
1002 | u64 reg; | 1183 | u64 reg; |
@@ -1254,10 +1435,10 @@ int cxl_pci_reset(struct cxl *adapter) | |||
1254 | 1435 | ||
1255 | /* | 1436 | /* |
1256 | * The adapter is about to be reset, so ignore errors. | 1437 | * The adapter is about to be reset, so ignore errors. |
1257 | * Not supported on P9 DD1 but don't forget to enable it | 1438 | * Not supported on P9 DD1 |
1258 | * on P9 DD2 | ||
1259 | */ | 1439 | */ |
1260 | if (cxl_is_power8()) | 1440 | if ((cxl_is_power8()) || |
1441 | ((cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1)))) | ||
1261 | cxl_data_cache_flush(adapter); | 1442 | cxl_data_cache_flush(adapter); |
1262 | 1443 | ||
1263 | /* pcie_warm_reset requests a fundamental pci reset which includes a | 1444 | /* pcie_warm_reset requests a fundamental pci reset which includes a |
@@ -1393,6 +1574,9 @@ static bool cxl_compatible_caia_version(struct cxl *adapter) | |||
1393 | if (cxl_is_power8() && (adapter->caia_major == 1)) | 1574 | if (cxl_is_power8() && (adapter->caia_major == 1)) |
1394 | return true; | 1575 | return true; |
1395 | 1576 | ||
1577 | if (cxl_is_power9() && (adapter->caia_major == 2)) | ||
1578 | return true; | ||
1579 | |||
1396 | return false; | 1580 | return false; |
1397 | } | 1581 | } |
1398 | 1582 | ||
@@ -1460,8 +1644,12 @@ static int sanitise_adapter_regs(struct cxl *adapter) | |||
1460 | /* Clear PSL tberror bit by writing 1 to it */ | 1644 | /* Clear PSL tberror bit by writing 1 to it */ |
1461 | cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror); | 1645 | cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror); |
1462 | 1646 | ||
1463 | if (adapter->native->sl_ops->invalidate_all) | 1647 | if (adapter->native->sl_ops->invalidate_all) { |
1648 | /* do not invalidate ERAT entries when not reloading on PERST */ | ||
1649 | if (cxl_is_power9() && (adapter->perst_loads_image)) | ||
1650 | return 0; | ||
1464 | rc = adapter->native->sl_ops->invalidate_all(adapter); | 1651 | rc = adapter->native->sl_ops->invalidate_all(adapter); |
1652 | } | ||
1465 | 1653 | ||
1466 | return rc; | 1654 | return rc; |
1467 | } | 1655 | } |
@@ -1546,6 +1734,30 @@ static void cxl_deconfigure_adapter(struct cxl *adapter) | |||
1546 | pci_disable_device(pdev); | 1734 | pci_disable_device(pdev); |
1547 | } | 1735 | } |
1548 | 1736 | ||
1737 | static const struct cxl_service_layer_ops psl9_ops = { | ||
1738 | .adapter_regs_init = init_implementation_adapter_regs_psl9, | ||
1739 | .invalidate_all = cxl_invalidate_all_psl9, | ||
1740 | .afu_regs_init = init_implementation_afu_regs_psl9, | ||
1741 | .sanitise_afu_regs = sanitise_afu_regs_psl9, | ||
1742 | .register_serr_irq = cxl_native_register_serr_irq, | ||
1743 | .release_serr_irq = cxl_native_release_serr_irq, | ||
1744 | .handle_interrupt = cxl_irq_psl9, | ||
1745 | .fail_irq = cxl_fail_irq_psl, | ||
1746 | .activate_dedicated_process = cxl_activate_dedicated_process_psl9, | ||
1747 | .attach_afu_directed = cxl_attach_afu_directed_psl9, | ||
1748 | .attach_dedicated_process = cxl_attach_dedicated_process_psl9, | ||
1749 | .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl9, | ||
1750 | .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9, | ||
1751 | .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9, | ||
1752 | .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9, | ||
1753 | .err_irq_dump_registers = cxl_native_err_irq_dump_regs, | ||
1754 | .debugfs_stop_trace = cxl_stop_trace_psl9, | ||
1755 | .write_timebase_ctrl = write_timebase_ctrl_psl9, | ||
1756 | .timebase_read = timebase_read_psl9, | ||
1757 | .capi_mode = OPAL_PHB_CAPI_MODE_CAPI, | ||
1758 | .needs_reset_before_disable = true, | ||
1759 | }; | ||
1760 | |||
1549 | static const struct cxl_service_layer_ops psl8_ops = { | 1761 | static const struct cxl_service_layer_ops psl8_ops = { |
1550 | .adapter_regs_init = init_implementation_adapter_regs_psl8, | 1762 | .adapter_regs_init = init_implementation_adapter_regs_psl8, |
1551 | .invalidate_all = cxl_invalidate_all_psl8, | 1763 | .invalidate_all = cxl_invalidate_all_psl8, |
@@ -1597,6 +1809,9 @@ static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) | |||
1597 | if (cxl_is_power8()) { | 1809 | if (cxl_is_power8()) { |
1598 | dev_info(&dev->dev, "Device uses a PSL8\n"); | 1810 | dev_info(&dev->dev, "Device uses a PSL8\n"); |
1599 | adapter->native->sl_ops = &psl8_ops; | 1811 | adapter->native->sl_ops = &psl8_ops; |
1812 | } else { | ||
1813 | dev_info(&dev->dev, "Device uses a PSL9\n"); | ||
1814 | adapter->native->sl_ops = &psl9_ops; | ||
1600 | } | 1815 | } |
1601 | } | 1816 | } |
1602 | } | 1817 | } |
@@ -1667,8 +1882,13 @@ static void cxl_pci_remove_adapter(struct cxl *adapter) | |||
1667 | cxl_sysfs_adapter_remove(adapter); | 1882 | cxl_sysfs_adapter_remove(adapter); |
1668 | cxl_debugfs_adapter_remove(adapter); | 1883 | cxl_debugfs_adapter_remove(adapter); |
1669 | 1884 | ||
1670 | /* Flush adapter datacache as its about to be removed */ | 1885 | /* |
1671 | cxl_data_cache_flush(adapter); | 1886 | * Flush adapter datacache as its about to be removed. |
1887 | * Not supported on P9 DD1. | ||
1888 | */ | ||
1889 | if ((cxl_is_power8()) || | ||
1890 | ((cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1)))) | ||
1891 | cxl_data_cache_flush(adapter); | ||
1672 | 1892 | ||
1673 | cxl_deconfigure_adapter(adapter); | 1893 | cxl_deconfigure_adapter(adapter); |
1674 | 1894 | ||
@@ -1752,6 +1972,11 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
1752 | return -ENODEV; | 1972 | return -ENODEV; |
1753 | } | 1973 | } |
1754 | 1974 | ||
1975 | if (cxl_is_power9() && !radix_enabled()) { | ||
1976 | dev_info(&dev->dev, "Only Radix mode supported\n"); | ||
1977 | return -ENODEV; | ||
1978 | } | ||
1979 | |||
1755 | if (cxl_verbose) | 1980 | if (cxl_verbose) |
1756 | dump_cxl_config_space(dev); | 1981 | dump_cxl_config_space(dev); |
1757 | 1982 | ||
diff --git a/drivers/misc/cxl/trace.h b/drivers/misc/cxl/trace.h index 751d6119683e..b8e300af0e55 100644 --- a/drivers/misc/cxl/trace.h +++ b/drivers/misc/cxl/trace.h | |||
@@ -17,6 +17,15 @@ | |||
17 | 17 | ||
18 | #include "cxl.h" | 18 | #include "cxl.h" |
19 | 19 | ||
20 | #define dsisr_psl9_flags(flags) \ | ||
21 | __print_flags(flags, "|", \ | ||
22 | { CXL_PSL9_DSISR_An_CO_MASK, "FR" }, \ | ||
23 | { CXL_PSL9_DSISR_An_TF, "TF" }, \ | ||
24 | { CXL_PSL9_DSISR_An_PE, "PE" }, \ | ||
25 | { CXL_PSL9_DSISR_An_AE, "AE" }, \ | ||
26 | { CXL_PSL9_DSISR_An_OC, "OC" }, \ | ||
27 | { CXL_PSL9_DSISR_An_S, "S" }) | ||
28 | |||
20 | #define DSISR_FLAGS \ | 29 | #define DSISR_FLAGS \ |
21 | { CXL_PSL_DSISR_An_DS, "DS" }, \ | 30 | { CXL_PSL_DSISR_An_DS, "DS" }, \ |
22 | { CXL_PSL_DSISR_An_DM, "DM" }, \ | 31 | { CXL_PSL_DSISR_An_DM, "DM" }, \ |
@@ -154,6 +163,40 @@ TRACE_EVENT(cxl_afu_irq, | |||
154 | ) | 163 | ) |
155 | ); | 164 | ); |
156 | 165 | ||
166 | TRACE_EVENT(cxl_psl9_irq, | ||
167 | TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar), | ||
168 | |||
169 | TP_ARGS(ctx, irq, dsisr, dar), | ||
170 | |||
171 | TP_STRUCT__entry( | ||
172 | __field(u8, card) | ||
173 | __field(u8, afu) | ||
174 | __field(u16, pe) | ||
175 | __field(int, irq) | ||
176 | __field(u64, dsisr) | ||
177 | __field(u64, dar) | ||
178 | ), | ||
179 | |||
180 | TP_fast_assign( | ||
181 | __entry->card = ctx->afu->adapter->adapter_num; | ||
182 | __entry->afu = ctx->afu->slice; | ||
183 | __entry->pe = ctx->pe; | ||
184 | __entry->irq = irq; | ||
185 | __entry->dsisr = dsisr; | ||
186 | __entry->dar = dar; | ||
187 | ), | ||
188 | |||
189 | TP_printk("afu%i.%i pe=%i irq=%i dsisr=0x%016llx dsisr=%s dar=0x%016llx", | ||
190 | __entry->card, | ||
191 | __entry->afu, | ||
192 | __entry->pe, | ||
193 | __entry->irq, | ||
194 | __entry->dsisr, | ||
195 | dsisr_psl9_flags(__entry->dsisr), | ||
196 | __entry->dar | ||
197 | ) | ||
198 | ); | ||
199 | |||
157 | TRACE_EVENT(cxl_psl_irq, | 200 | TRACE_EVENT(cxl_psl_irq, |
158 | TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar), | 201 | TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar), |
159 | 202 | ||