aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/cxl/Makefile5
-rw-r--r--drivers/misc/cxl/cxl.h22
-rw-r--r--drivers/misc/cxl/fault.c11
-rw-r--r--drivers/misc/cxl/file.c7
-rw-r--r--drivers/misc/cxl/irq.c7
-rw-r--r--drivers/misc/cxl/main.c2
-rw-r--r--drivers/misc/cxl/native.c39
-rw-r--r--drivers/misc/cxl/pci.c123
-rw-r--r--drivers/misc/cxl/sysfs.c236
-rw-r--r--drivers/misc/cxl/trace.c13
-rw-r--r--drivers/misc/cxl/trace.h459
11 files changed, 898 insertions, 26 deletions
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index 165e98fef2c2..edb494d3ff27 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -1,3 +1,6 @@
1cxl-y += main.o file.o irq.o fault.o native.o context.o sysfs.o debugfs.o pci.o 1cxl-y += main.o file.o irq.o fault.o native.o context.o sysfs.o debugfs.o pci.o trace.o
2obj-$(CONFIG_CXL) += cxl.o 2obj-$(CONFIG_CXL) += cxl.o
3obj-$(CONFIG_CXL_BASE) += base.o 3obj-$(CONFIG_CXL_BASE) += base.o
4
5# For tracepoints to include our trace.h from tracepoint infrastructure:
6CFLAGS_trace.o := -I$(src)
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 28078f8894a5..a1cee4767ec6 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -287,6 +287,13 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
287#define CXL_PE_SOFTWARE_STATE_S (1ul << (31 - 30)) /* Suspend */ 287#define CXL_PE_SOFTWARE_STATE_S (1ul << (31 - 30)) /* Suspend */
288#define CXL_PE_SOFTWARE_STATE_T (1ul << (31 - 31)) /* Terminate */ 288#define CXL_PE_SOFTWARE_STATE_T (1ul << (31 - 31)) /* Terminate */
289 289
290/****** CXL_PSL_RXCTL_An (Implementation Specific) **************************
291 * Controls AFU Hang Pulse, which sets the timeout for the AFU to respond to
292 * the PSL for any response (except MMIO). Timeouts will occur between 1x to 2x
293 * of the hang pulse frequency.
294 */
295#define CXL_PSL_RXCTL_AFUHP_4S 0x7000000000000000ULL
296
290/* SPA->sw_command_status */ 297/* SPA->sw_command_status */
291#define CXL_SPA_SW_CMD_MASK 0xffff000000000000ULL 298#define CXL_SPA_SW_CMD_MASK 0xffff000000000000ULL
292#define CXL_SPA_SW_CMD_TERMINATE 0x0001000000000000ULL 299#define CXL_SPA_SW_CMD_TERMINATE 0x0001000000000000ULL
@@ -375,6 +382,10 @@ struct cxl_afu {
375 int slice; 382 int slice;
376 int modes_supported; 383 int modes_supported;
377 int current_mode; 384 int current_mode;
385 int crs_num;
386 u64 crs_len;
387 u64 crs_offset;
388 struct list_head crs;
378 enum prefault_modes prefault_mode; 389 enum prefault_modes prefault_mode;
379 bool psa; 390 bool psa;
380 bool pp_psa; 391 bool pp_psa;
@@ -481,6 +492,8 @@ void cxl_release_one_irq(struct cxl *adapter, int hwirq);
481int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num); 492int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num);
482void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter); 493void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter);
483int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq); 494int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq);
495int cxl_update_image_control(struct cxl *adapter);
496int cxl_reset(struct cxl *adapter);
484 497
485/* common == phyp + powernv */ 498/* common == phyp + powernv */
486struct cxl_process_element_common { 499struct cxl_process_element_common {
@@ -542,6 +555,15 @@ static inline void __iomem *_cxl_p2n_addr(struct cxl_afu *afu, cxl_p2n_reg_t reg
542#define cxl_p2n_read(afu, reg) \ 555#define cxl_p2n_read(afu, reg) \
543 in_be64(_cxl_p2n_addr(afu, reg)) 556 in_be64(_cxl_p2n_addr(afu, reg))
544 557
558
559#define cxl_afu_cr_read64(afu, cr, off) \
560 in_le64((afu)->afu_desc_mmio + (afu)->crs_offset + ((cr) * (afu)->crs_len) + (off))
561#define cxl_afu_cr_read32(afu, cr, off) \
562 in_le32((afu)->afu_desc_mmio + (afu)->crs_offset + ((cr) * (afu)->crs_len) + (off))
563u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off);
564u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off);
565
566
545struct cxl_calls { 567struct cxl_calls {
546 void (*cxl_slbia)(struct mm_struct *mm); 568 void (*cxl_slbia)(struct mm_struct *mm);
547 struct module *owner; 569 struct module *owner;
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index f8684bca2d79..5286b8b704f5 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -20,6 +20,7 @@
20#include <asm/mmu.h> 20#include <asm/mmu.h>
21 21
22#include "cxl.h" 22#include "cxl.h"
23#include "trace.h"
23 24
24static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb) 25static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
25{ 26{
@@ -75,6 +76,7 @@ static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
75 76
76 pr_devel("CXL Populating SST[%li]: %#llx %#llx\n", 77 pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
77 sste - ctx->sstp, slb->vsid, slb->esid); 78 sste - ctx->sstp, slb->vsid, slb->esid);
79 trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid);
78 80
79 sste->vsid_data = cpu_to_be64(slb->vsid); 81 sste->vsid_data = cpu_to_be64(slb->vsid);
80 sste->esid_data = cpu_to_be64(slb->esid); 82 sste->esid_data = cpu_to_be64(slb->esid);
@@ -116,6 +118,7 @@ static int cxl_handle_segment_miss(struct cxl_context *ctx,
116 int rc; 118 int rc;
117 119
118 pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea); 120 pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea);
121 trace_cxl_ste_miss(ctx, ea);
119 122
120 if ((rc = cxl_fault_segment(ctx, mm, ea))) 123 if ((rc = cxl_fault_segment(ctx, mm, ea)))
121 cxl_ack_ae(ctx); 124 cxl_ack_ae(ctx);
@@ -135,6 +138,8 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
135 int result; 138 int result;
136 unsigned long access, flags, inv_flags = 0; 139 unsigned long access, flags, inv_flags = 0;
137 140
141 trace_cxl_pte_miss(ctx, dsisr, dar);
142
138 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { 143 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
139 pr_devel("copro_handle_mm_fault failed: %#x\n", result); 144 pr_devel("copro_handle_mm_fault failed: %#x\n", result);
140 return cxl_ack_ae(ctx); 145 return cxl_ack_ae(ctx);
@@ -180,6 +185,12 @@ void cxl_handle_fault(struct work_struct *fault_work)
180 return; 185 return;
181 } 186 }
182 187
188 /* Early return if the context is being / has been detached */
189 if (ctx->status == CLOSED) {
190 cxl_ack_ae(ctx);
191 return;
192 }
193
183 pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. " 194 pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
184 "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar); 195 "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
185 196
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index b15d8113877c..2364bcadb9a9 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -23,6 +23,7 @@
23#include <asm/copro.h> 23#include <asm/copro.h>
24 24
25#include "cxl.h" 25#include "cxl.h"
26#include "trace.h"
26 27
27#define CXL_NUM_MINORS 256 /* Total to reserve */ 28#define CXL_NUM_MINORS 256 /* Total to reserve */
28#define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */ 29#define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */
@@ -186,9 +187,13 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
186 */ 187 */
187 ctx->pid = get_pid(get_task_pid(current, PIDTYPE_PID)); 188 ctx->pid = get_pid(get_task_pid(current, PIDTYPE_PID));
188 189
190 trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
191
189 if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor, 192 if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor,
190 amr))) 193 amr))) {
194 afu_release_irqs(ctx);
191 goto out; 195 goto out;
196 }
192 197
193 ctx->status = STARTED; 198 ctx->status = STARTED;
194 rc = 0; 199 rc = 0;
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index c294925f73ee..c8929c526691 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -17,6 +17,7 @@
17#include <misc/cxl.h> 17#include <misc/cxl.h>
18 18
19#include "cxl.h" 19#include "cxl.h"
20#include "trace.h"
20 21
21/* XXX: This is implementation specific */ 22/* XXX: This is implementation specific */
22static irqreturn_t handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, u64 errstat) 23static irqreturn_t handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, u64 errstat)
@@ -100,6 +101,8 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
100 dsisr = irq_info->dsisr; 101 dsisr = irq_info->dsisr;
101 dar = irq_info->dar; 102 dar = irq_info->dar;
102 103
104 trace_cxl_psl_irq(ctx, irq, dsisr, dar);
105
103 pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); 106 pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
104 107
105 if (dsisr & CXL_PSL_DSISR_An_DS) { 108 if (dsisr & CXL_PSL_DSISR_An_DS) {
@@ -167,6 +170,7 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
167 } 170 }
168 171
169 cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0); 172 cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
173 return IRQ_HANDLED;
170 } 174 }
171 if (dsisr & CXL_PSL_DSISR_An_OC) 175 if (dsisr & CXL_PSL_DSISR_An_OC)
172 pr_devel("CXL interrupt: OS Context Warning\n"); 176 pr_devel("CXL interrupt: OS Context Warning\n");
@@ -237,6 +241,7 @@ static irqreturn_t cxl_irq_afu(int irq, void *data)
237 return IRQ_HANDLED; 241 return IRQ_HANDLED;
238 } 242 }
239 243
244 trace_cxl_afu_irq(ctx, afu_irq, irq, hwirq);
240 pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n", 245 pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n",
241 afu_irq, ctx->pe, irq, hwirq); 246 afu_irq, ctx->pe, irq, hwirq);
242 247
@@ -436,7 +441,7 @@ int afu_register_irqs(struct cxl_context *ctx, u32 count)
436 */ 441 */
437 INIT_LIST_HEAD(&ctx->irq_names); 442 INIT_LIST_HEAD(&ctx->irq_names);
438 for (r = 1; r < CXL_IRQ_RANGES; r++) { 443 for (r = 1; r < CXL_IRQ_RANGES; r++) {
439 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 444 for (i = 0; i < ctx->irqs.range[r]; i++) {
440 irq_name = kmalloc(sizeof(struct cxl_irq_name), 445 irq_name = kmalloc(sizeof(struct cxl_irq_name),
441 GFP_KERNEL); 446 GFP_KERNEL);
442 if (!irq_name) 447 if (!irq_name)
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index 4cde9b661642..8ccddceead66 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -23,6 +23,7 @@
23#include <misc/cxl.h> 23#include <misc/cxl.h>
24 24
25#include "cxl.h" 25#include "cxl.h"
26#include "trace.h"
26 27
27static DEFINE_SPINLOCK(adapter_idr_lock); 28static DEFINE_SPINLOCK(adapter_idr_lock);
28static DEFINE_IDR(cxl_adapter_idr); 29static DEFINE_IDR(cxl_adapter_idr);
@@ -48,6 +49,7 @@ static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm)
48 ctx->afu->adapter->adapter_num, ctx->afu->slice, ctx->pe); 49 ctx->afu->adapter->adapter_num, ctx->afu->slice, ctx->pe);
49 50
50 spin_lock_irqsave(&ctx->sste_lock, flags); 51 spin_lock_irqsave(&ctx->sste_lock, flags);
52 trace_cxl_slbia(ctx);
51 memset(ctx->sstp, 0, ctx->sst_size); 53 memset(ctx->sstp, 0, ctx->sst_size);
52 spin_unlock_irqrestore(&ctx->sste_lock, flags); 54 spin_unlock_irqrestore(&ctx->sste_lock, flags);
53 mb(); 55 mb();
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index f2b37b41a0da..29185fc61276 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -18,24 +18,28 @@
18#include <misc/cxl.h> 18#include <misc/cxl.h>
19 19
20#include "cxl.h" 20#include "cxl.h"
21#include "trace.h"
21 22
22static int afu_control(struct cxl_afu *afu, u64 command, 23static int afu_control(struct cxl_afu *afu, u64 command,
23 u64 result, u64 mask, bool enabled) 24 u64 result, u64 mask, bool enabled)
24{ 25{
25 u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 26 u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
26 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 27 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
28 int rc = 0;
27 29
28 spin_lock(&afu->afu_cntl_lock); 30 spin_lock(&afu->afu_cntl_lock);
29 pr_devel("AFU command starting: %llx\n", command); 31 pr_devel("AFU command starting: %llx\n", command);
30 32
33 trace_cxl_afu_ctrl(afu, command);
34
31 cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command); 35 cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command);
32 36
33 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 37 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
34 while ((AFU_Cntl & mask) != result) { 38 while ((AFU_Cntl & mask) != result) {
35 if (time_after_eq(jiffies, timeout)) { 39 if (time_after_eq(jiffies, timeout)) {
36 dev_warn(&afu->dev, "WARNING: AFU control timed out!\n"); 40 dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
37 spin_unlock(&afu->afu_cntl_lock); 41 rc = -EBUSY;
38 return -EBUSY; 42 goto out;
39 } 43 }
40 pr_devel_ratelimited("AFU control... (0x%.16llx)\n", 44 pr_devel_ratelimited("AFU control... (0x%.16llx)\n",
41 AFU_Cntl | command); 45 AFU_Cntl | command);
@@ -44,9 +48,11 @@ static int afu_control(struct cxl_afu *afu, u64 command,
44 }; 48 };
45 pr_devel("AFU command complete: %llx\n", command); 49 pr_devel("AFU command complete: %llx\n", command);
46 afu->enabled = enabled; 50 afu->enabled = enabled;
51out:
52 trace_cxl_afu_ctrl_done(afu, command, rc);
47 spin_unlock(&afu->afu_cntl_lock); 53 spin_unlock(&afu->afu_cntl_lock);
48 54
49 return 0; 55 return rc;
50} 56}
51 57
52static int afu_enable(struct cxl_afu *afu) 58static int afu_enable(struct cxl_afu *afu)
@@ -91,6 +97,9 @@ int cxl_psl_purge(struct cxl_afu *afu)
91 u64 dsisr, dar; 97 u64 dsisr, dar;
92 u64 start, end; 98 u64 start, end;
93 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 99 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
100 int rc = 0;
101
102 trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc);
94 103
95 pr_devel("PSL purge request\n"); 104 pr_devel("PSL purge request\n");
96 105
@@ -107,7 +116,8 @@ int cxl_psl_purge(struct cxl_afu *afu)
107 == CXL_PSL_SCNTL_An_Ps_Pending) { 116 == CXL_PSL_SCNTL_An_Ps_Pending) {
108 if (time_after_eq(jiffies, timeout)) { 117 if (time_after_eq(jiffies, timeout)) {
109 dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n"); 118 dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
110 return -EBUSY; 119 rc = -EBUSY;
120 goto out;
111 } 121 }
112 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); 122 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
113 pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%.16llx PSL_DSISR: 0x%.16llx\n", PSL_CNTL, dsisr); 123 pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%.16llx PSL_DSISR: 0x%.16llx\n", PSL_CNTL, dsisr);
@@ -128,7 +138,9 @@ int cxl_psl_purge(struct cxl_afu *afu)
128 138
129 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, 139 cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
130 PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc); 140 PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
131 return 0; 141out:
142 trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc);
143 return rc;
132} 144}
133 145
134static int spa_max_procs(int spa_size) 146static int spa_max_procs(int spa_size)
@@ -185,6 +197,7 @@ static int alloc_spa(struct cxl_afu *afu)
185 197
186static void release_spa(struct cxl_afu *afu) 198static void release_spa(struct cxl_afu *afu)
187{ 199{
200 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);
188 free_pages((unsigned long) afu->spa, afu->spa_order); 201 free_pages((unsigned long) afu->spa, afu->spa_order);
189} 202}
190 203
@@ -278,6 +291,9 @@ static int do_process_element_cmd(struct cxl_context *ctx,
278{ 291{
279 u64 state; 292 u64 state;
280 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 293 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
294 int rc = 0;
295
296 trace_cxl_llcmd(ctx, cmd);
281 297
282 WARN_ON(!ctx->afu->enabled); 298 WARN_ON(!ctx->afu->enabled);
283 299
@@ -289,12 +305,14 @@ static int do_process_element_cmd(struct cxl_context *ctx,
289 while (1) { 305 while (1) {
290 if (time_after_eq(jiffies, timeout)) { 306 if (time_after_eq(jiffies, timeout)) {
291 dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n"); 307 dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
292 return -EBUSY; 308 rc = -EBUSY;
309 goto out;
293 } 310 }
294 state = be64_to_cpup(ctx->afu->sw_command_status); 311 state = be64_to_cpup(ctx->afu->sw_command_status);
295 if (state == ~0ULL) { 312 if (state == ~0ULL) {
296 pr_err("cxl: Error adding process element to AFU\n"); 313 pr_err("cxl: Error adding process element to AFU\n");
297 return -1; 314 rc = -1;
315 goto out;
298 } 316 }
299 if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) == 317 if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) ==
300 (cmd | (cmd >> 16) | ctx->pe)) 318 (cmd | (cmd >> 16) | ctx->pe))
@@ -309,7 +327,9 @@ static int do_process_element_cmd(struct cxl_context *ctx,
309 schedule(); 327 schedule();
310 328
311 } 329 }
312 return 0; 330out:
331 trace_cxl_llcmd_done(ctx, cmd, rc);
332 return rc;
313} 333}
314 334
315static int add_process_element(struct cxl_context *ctx) 335static int add_process_element(struct cxl_context *ctx)
@@ -629,6 +649,8 @@ static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
629 649
630int cxl_detach_process(struct cxl_context *ctx) 650int cxl_detach_process(struct cxl_context *ctx)
631{ 651{
652 trace_cxl_detach(ctx);
653
632 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) 654 if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
633 return detach_process_native_dedicated(ctx); 655 return detach_process_native_dedicated(ctx);
634 656
@@ -667,6 +689,7 @@ static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
667 689
668int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) 690int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
669{ 691{
692 trace_cxl_psl_irq_ack(ctx, tfc);
670 if (tfc) 693 if (tfc)
671 cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc); 694 cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
672 if (psl_reset_mask) 695 if (psl_reset_mask)
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 0f2cc9f8b4db..1ef01647265f 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -21,6 +21,7 @@
21#include <asm/msi_bitmap.h> 21#include <asm/msi_bitmap.h>
22#include <asm/pci-bridge.h> /* for struct pci_controller */ 22#include <asm/pci-bridge.h> /* for struct pci_controller */
23#include <asm/pnv-pci.h> 23#include <asm/pnv-pci.h>
24#include <asm/io.h>
24 25
25#include "cxl.h" 26#include "cxl.h"
26 27
@@ -113,6 +114,24 @@
113#define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63) 114#define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
114#define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48) 115#define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48)
115 116
117u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off)
118{
119 u64 aligned_off = off & ~0x3L;
120 u32 val;
121
122 val = cxl_afu_cr_read32(afu, cr, aligned_off);
123 return (val >> ((off & 0x2) * 8)) & 0xffff;
124}
125
126u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off)
127{
128 u64 aligned_off = off & ~0x3L;
129 u32 val;
130
131 val = cxl_afu_cr_read32(afu, cr, aligned_off);
132 return (val >> ((off & 0x3) * 8)) & 0xff;
133}
134
116static DEFINE_PCI_DEVICE_TABLE(cxl_pci_tbl) = { 135static DEFINE_PCI_DEVICE_TABLE(cxl_pci_tbl) = {
117 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), }, 136 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
118 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), }, 137 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
@@ -316,7 +335,7 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev
316 u64 psl_dsnctl; 335 u64 psl_dsnctl;
317 u64 chipid; 336 u64 chipid;
318 337
319 if (!(np = pnv_pci_to_phb_node(dev))) 338 if (!(np = pnv_pci_get_phb_node(dev)))
320 return -ENODEV; 339 return -ENODEV;
321 340
322 while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL))) 341 while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
@@ -348,7 +367,7 @@ static int init_implementation_afu_regs(struct cxl_afu *afu)
348 cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL); 367 cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL);
349 /* for debugging with trace arrays */ 368 /* for debugging with trace arrays */
350 cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL); 369 cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL);
351 cxl_p1n_write(afu, CXL_PSL_RXCTL_A, 0xF000000000000000ULL); 370 cxl_p1n_write(afu, CXL_PSL_RXCTL_A, CXL_PSL_RXCTL_AFUHP_4S);
352 371
353 return 0; 372 return 0;
354} 373}
@@ -361,6 +380,41 @@ int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq,
361 return pnv_cxl_ioda_msi_setup(dev, hwirq, virq); 380 return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
362} 381}
363 382
383int cxl_update_image_control(struct cxl *adapter)
384{
385 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
386 int rc;
387 int vsec;
388 u8 image_state;
389
390 if (!(vsec = find_cxl_vsec(dev))) {
391 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
392 return -ENODEV;
393 }
394
395 if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) {
396 dev_err(&dev->dev, "failed to read image state: %i\n", rc);
397 return rc;
398 }
399
400 if (adapter->perst_loads_image)
401 image_state |= CXL_VSEC_PERST_LOADS_IMAGE;
402 else
403 image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE;
404
405 if (adapter->perst_select_user)
406 image_state |= CXL_VSEC_PERST_SELECT_USER;
407 else
408 image_state &= ~CXL_VSEC_PERST_SELECT_USER;
409
410 if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) {
411 dev_err(&dev->dev, "failed to update image control: %i\n", rc);
412 return rc;
413 }
414
415 return 0;
416}
417
364int cxl_alloc_one_irq(struct cxl *adapter) 418int cxl_alloc_one_irq(struct cxl *adapter)
365{ 419{
366 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); 420 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
@@ -520,6 +574,7 @@ static int cxl_read_afu_descriptor(struct cxl_afu *afu)
520 val = AFUD_READ_INFO(afu); 574 val = AFUD_READ_INFO(afu);
521 afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val); 575 afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val);
522 afu->max_procs_virtualised = AFUD_NUM_PROCS(val); 576 afu->max_procs_virtualised = AFUD_NUM_PROCS(val);
577 afu->crs_num = AFUD_NUM_CRS(val);
523 578
524 if (AFUD_AFU_DIRECTED(val)) 579 if (AFUD_AFU_DIRECTED(val))
525 afu->modes_supported |= CXL_MODE_DIRECTED; 580 afu->modes_supported |= CXL_MODE_DIRECTED;
@@ -534,11 +589,17 @@ static int cxl_read_afu_descriptor(struct cxl_afu *afu)
534 if ((afu->pp_psa = AFUD_PPPSA_PP(val))) 589 if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
535 afu->pp_offset = AFUD_READ_PPPSA_OFF(afu); 590 afu->pp_offset = AFUD_READ_PPPSA_OFF(afu);
536 591
592 val = AFUD_READ_CR(afu);
593 afu->crs_len = AFUD_CR_LEN(val) * 256;
594 afu->crs_offset = AFUD_READ_CR_OFF(afu);
595
537 return 0; 596 return 0;
538} 597}
539 598
540static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu) 599static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
541{ 600{
601 int i;
602
542 if (afu->psa && afu->adapter->ps_size < 603 if (afu->psa && afu->adapter->ps_size <
543 (afu->pp_offset + afu->pp_size*afu->max_procs_virtualised)) { 604 (afu->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
544 dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n"); 605 dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
@@ -548,6 +609,13 @@ static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
548 if (afu->pp_psa && (afu->pp_size < PAGE_SIZE)) 609 if (afu->pp_psa && (afu->pp_size < PAGE_SIZE))
549 dev_warn(&afu->dev, "AFU uses < PAGE_SIZE per-process PSA!"); 610 dev_warn(&afu->dev, "AFU uses < PAGE_SIZE per-process PSA!");
550 611
612 for (i = 0; i < afu->crs_num; i++) {
613 if ((cxl_afu_cr_read32(afu, i, 0) == 0)) {
614 dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i);
615 return -EINVAL;
616 }
617 }
618
551 return 0; 619 return 0;
552} 620}
553 621
@@ -706,6 +774,42 @@ static void cxl_remove_afu(struct cxl_afu *afu)
706 device_unregister(&afu->dev); 774 device_unregister(&afu->dev);
707} 775}
708 776
777int cxl_reset(struct cxl *adapter)
778{
779 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
780 int rc;
781 int i;
782 u32 val;
783
784 dev_info(&dev->dev, "CXL reset\n");
785
786 for (i = 0; i < adapter->slices; i++)
787 cxl_remove_afu(adapter->afu[i]);
788
789 /* pcie_warm_reset requests a fundamental pci reset which includes a
790 * PERST assert/deassert. PERST triggers a loading of the image
791 * if "user" or "factory" is selected in sysfs */
792 if ((rc = pci_set_pcie_reset_state(dev, pcie_warm_reset))) {
793 dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n");
794 return rc;
795 }
796
797 /* the PERST done above fences the PHB. So, reset depends on EEH
798 * to unbind the driver, tell Sapphire to reinit the PHB, and rebind
799 * the driver. Do an mmio read explictly to ensure EEH notices the
800 * fenced PHB. Retry for a few seconds before giving up. */
801 i = 0;
802 while (((val = mmio_read32be(adapter->p1_mmio)) != 0xffffffff) &&
803 (i < 5)) {
804 msleep(500);
805 i++;
806 }
807
808 if (val != 0xffffffff)
809 dev_err(&dev->dev, "cxl: PERST failed to trigger EEH\n");
810
811 return rc;
812}
709 813
710static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev) 814static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
711{ 815{
@@ -770,8 +874,8 @@ static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
770 CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image); 874 CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
771 CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state); 875 CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
772 adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED); 876 adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
773 adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE); 877 adapter->perst_loads_image = true;
774 adapter->perst_select_user = !!(image_state & CXL_VSEC_PERST_SELECT_USER); 878 adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
775 879
776 CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices); 880 CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
777 CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off); 881 CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
@@ -879,6 +983,9 @@ static struct cxl *cxl_init_adapter(struct pci_dev *dev)
879 if ((rc = cxl_vsec_looks_ok(adapter, dev))) 983 if ((rc = cxl_vsec_looks_ok(adapter, dev)))
880 goto err2; 984 goto err2;
881 985
986 if ((rc = cxl_update_image_control(adapter)))
987 goto err2;
988
882 if ((rc = cxl_map_adapter_regs(adapter, dev))) 989 if ((rc = cxl_map_adapter_regs(adapter, dev)))
883 goto err2; 990 goto err2;
884 991
@@ -888,9 +995,15 @@ static struct cxl *cxl_init_adapter(struct pci_dev *dev)
888 if ((rc = init_implementation_adapter_regs(adapter, dev))) 995 if ((rc = init_implementation_adapter_regs(adapter, dev)))
889 goto err3; 996 goto err3;
890 997
891 if ((rc = pnv_phb_to_cxl(dev))) 998 if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI)))
892 goto err3; 999 goto err3;
893 1000
1001 /* If recovery happened, the last step is to turn on snooping.
1002 * In the non-recovery case this has no effect */
1003 if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON))) {
1004 goto err3;
1005 }
1006
894 if ((rc = cxl_register_psl_err_irq(adapter))) 1007 if ((rc = cxl_register_psl_err_irq(adapter)))
895 goto err3; 1008 goto err3;
896 1009
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 461bdbd5d483..d0c38c7bc0c4 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/device.h> 11#include <linux/device.h>
12#include <linux/sysfs.h> 12#include <linux/sysfs.h>
13#include <linux/pci_regs.h>
13 14
14#include "cxl.h" 15#include "cxl.h"
15 16
@@ -56,11 +57,68 @@ static ssize_t image_loaded_show(struct device *device,
56 return scnprintf(buf, PAGE_SIZE, "factory\n"); 57 return scnprintf(buf, PAGE_SIZE, "factory\n");
57} 58}
58 59
60static ssize_t reset_adapter_store(struct device *device,
61 struct device_attribute *attr,
62 const char *buf, size_t count)
63{
64 struct cxl *adapter = to_cxl_adapter(device);
65 int rc;
66 int val;
67
68 rc = sscanf(buf, "%i", &val);
69 if ((rc != 1) || (val != 1))
70 return -EINVAL;
71
72 if ((rc = cxl_reset(adapter)))
73 return rc;
74 return count;
75}
76
77static ssize_t load_image_on_perst_show(struct device *device,
78 struct device_attribute *attr,
79 char *buf)
80{
81 struct cxl *adapter = to_cxl_adapter(device);
82
83 if (!adapter->perst_loads_image)
84 return scnprintf(buf, PAGE_SIZE, "none\n");
85
86 if (adapter->perst_select_user)
87 return scnprintf(buf, PAGE_SIZE, "user\n");
88 return scnprintf(buf, PAGE_SIZE, "factory\n");
89}
90
91static ssize_t load_image_on_perst_store(struct device *device,
92 struct device_attribute *attr,
93 const char *buf, size_t count)
94{
95 struct cxl *adapter = to_cxl_adapter(device);
96 int rc;
97
98 if (!strncmp(buf, "none", 4))
99 adapter->perst_loads_image = false;
100 else if (!strncmp(buf, "user", 4)) {
101 adapter->perst_select_user = true;
102 adapter->perst_loads_image = true;
103 } else if (!strncmp(buf, "factory", 7)) {
104 adapter->perst_select_user = false;
105 adapter->perst_loads_image = true;
106 } else
107 return -EINVAL;
108
109 if ((rc = cxl_update_image_control(adapter)))
110 return rc;
111
112 return count;
113}
114
59static struct device_attribute adapter_attrs[] = { 115static struct device_attribute adapter_attrs[] = {
60 __ATTR_RO(caia_version), 116 __ATTR_RO(caia_version),
61 __ATTR_RO(psl_revision), 117 __ATTR_RO(psl_revision),
62 __ATTR_RO(base_image), 118 __ATTR_RO(base_image),
63 __ATTR_RO(image_loaded), 119 __ATTR_RO(image_loaded),
120 __ATTR_RW(load_image_on_perst),
121 __ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
64}; 122};
65 123
66 124
@@ -310,8 +368,6 @@ static struct device_attribute afu_attrs[] = {
310 __ATTR(reset, S_IWUSR, NULL, reset_store_afu), 368 __ATTR(reset, S_IWUSR, NULL, reset_store_afu),
311}; 369};
312 370
313
314
315int cxl_sysfs_adapter_add(struct cxl *adapter) 371int cxl_sysfs_adapter_add(struct cxl *adapter)
316{ 372{
317 int i, rc; 373 int i, rc;
@@ -334,31 +390,191 @@ void cxl_sysfs_adapter_remove(struct cxl *adapter)
334 device_remove_file(&adapter->dev, &adapter_attrs[i]); 390 device_remove_file(&adapter->dev, &adapter_attrs[i]);
335} 391}
336 392
393struct afu_config_record {
394 struct kobject kobj;
395 struct bin_attribute config_attr;
396 struct list_head list;
397 int cr;
398 u16 device;
399 u16 vendor;
400 u32 class;
401};
402
403#define to_cr(obj) container_of(obj, struct afu_config_record, kobj)
404
405static ssize_t vendor_show(struct kobject *kobj,
406 struct kobj_attribute *attr, char *buf)
407{
408 struct afu_config_record *cr = to_cr(kobj);
409
410 return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->vendor);
411}
412
413static ssize_t device_show(struct kobject *kobj,
414 struct kobj_attribute *attr, char *buf)
415{
416 struct afu_config_record *cr = to_cr(kobj);
417
418 return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->device);
419}
420
421static ssize_t class_show(struct kobject *kobj,
422 struct kobj_attribute *attr, char *buf)
423{
424 struct afu_config_record *cr = to_cr(kobj);
425
426 return scnprintf(buf, PAGE_SIZE, "0x%.6x\n", cr->class);
427}
428
429static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
430 struct bin_attribute *bin_attr, char *buf,
431 loff_t off, size_t count)
432{
433 struct afu_config_record *cr = to_cr(kobj);
434 struct cxl_afu *afu = to_cxl_afu(container_of(kobj->parent, struct device, kobj));
435
436 u64 i, j, val, size = afu->crs_len;
437
438 if (off > size)
439 return 0;
440 if (off + count > size)
441 count = size - off;
442
443 for (i = 0; i < count;) {
444 val = cxl_afu_cr_read64(afu, cr->cr, off & ~0x7);
445 for (j = off & 0x7; j < 8 && i < count; i++, j++, off++)
446 buf[i] = (val >> (j * 8)) & 0xff;
447 }
448
449 return count;
450}
451
452static struct kobj_attribute vendor_attribute =
453 __ATTR_RO(vendor);
454static struct kobj_attribute device_attribute =
455 __ATTR_RO(device);
456static struct kobj_attribute class_attribute =
457 __ATTR_RO(class);
458
459static struct attribute *afu_cr_attrs[] = {
460 &vendor_attribute.attr,
461 &device_attribute.attr,
462 &class_attribute.attr,
463 NULL,
464};
465
466static void release_afu_config_record(struct kobject *kobj)
467{
468 struct afu_config_record *cr = to_cr(kobj);
469
470 kfree(cr);
471}
472
473static struct kobj_type afu_config_record_type = {
474 .sysfs_ops = &kobj_sysfs_ops,
475 .release = release_afu_config_record,
476 .default_attrs = afu_cr_attrs,
477};
478
479static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx)
480{
481 struct afu_config_record *cr;
482 int rc;
483
484 cr = kzalloc(sizeof(struct afu_config_record), GFP_KERNEL);
485 if (!cr)
486 return ERR_PTR(-ENOMEM);
487
488 cr->cr = cr_idx;
489 cr->device = cxl_afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID);
490 cr->vendor = cxl_afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID);
491 cr->class = cxl_afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION) >> 8;
492
493 /*
494 * Export raw AFU PCIe like config record. For now this is read only by
495 * root - we can expand that later to be readable by non-root and maybe
496 * even writable provided we have a good use-case. Once we suport
497 * exposing AFUs through a virtual PHB they will get that for free from
498 * Linux' PCI infrastructure, but until then it's not clear that we
499 * need it for anything since the main use case is just identifying
500 * AFUs, which can be done via the vendor, device and class attributes.
501 */
502 sysfs_bin_attr_init(&cr->config_attr);
503 cr->config_attr.attr.name = "config";
504 cr->config_attr.attr.mode = S_IRUSR;
505 cr->config_attr.size = afu->crs_len;
506 cr->config_attr.read = afu_read_config;
507
508 rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
509 &afu->dev.kobj, "cr%i", cr->cr);
510 if (rc)
511 goto err;
512
513 rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
514 if (rc)
515 goto err1;
516
517 rc = kobject_uevent(&cr->kobj, KOBJ_ADD);
518 if (rc)
519 goto err2;
520
521 return cr;
522err2:
523 sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
524err1:
525 kobject_put(&cr->kobj);
526 return ERR_PTR(rc);
527err:
528 kfree(cr);
529 return ERR_PTR(rc);
530}
531
532void cxl_sysfs_afu_remove(struct cxl_afu *afu)
533{
534 struct afu_config_record *cr, *tmp;
535 int i;
536
537 for (i = 0; i < ARRAY_SIZE(afu_attrs); i++)
538 device_remove_file(&afu->dev, &afu_attrs[i]);
539
540 list_for_each_entry_safe(cr, tmp, &afu->crs, list) {
541 sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
542 kobject_put(&cr->kobj);
543 }
544}
545
337int cxl_sysfs_afu_add(struct cxl_afu *afu) 546int cxl_sysfs_afu_add(struct cxl_afu *afu)
338{ 547{
548 struct afu_config_record *cr;
339 int i, rc; 549 int i, rc;
340 550
551 INIT_LIST_HEAD(&afu->crs);
552
341 for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) { 553 for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
342 if ((rc = device_create_file(&afu->dev, &afu_attrs[i]))) 554 if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
343 goto err; 555 goto err;
344 } 556 }
345 557
558 for (i = 0; i < afu->crs_num; i++) {
559 cr = cxl_sysfs_afu_new_cr(afu, i);
560 if (IS_ERR(cr)) {
561 rc = PTR_ERR(cr);
562 goto err1;
563 }
564 list_add(&cr->list, &afu->crs);
565 }
566
346 return 0; 567 return 0;
347 568
569err1:
570 cxl_sysfs_afu_remove(afu);
571 return rc;
348err: 572err:
349 for (i--; i >= 0; i--) 573 for (i--; i >= 0; i--)
350 device_remove_file(&afu->dev, &afu_attrs[i]); 574 device_remove_file(&afu->dev, &afu_attrs[i]);
351 return rc; 575 return rc;
352} 576}
353 577
354void cxl_sysfs_afu_remove(struct cxl_afu *afu)
355{
356 int i;
357
358 for (i = 0; i < ARRAY_SIZE(afu_attrs); i++)
359 device_remove_file(&afu->dev, &afu_attrs[i]);
360}
361
362int cxl_sysfs_afu_m_add(struct cxl_afu *afu) 578int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
363{ 579{
364 int i, rc; 580 int i, rc;
diff --git a/drivers/misc/cxl/trace.c b/drivers/misc/cxl/trace.c
new file mode 100644
index 000000000000..c2b06d319e6e
--- /dev/null
+++ b/drivers/misc/cxl/trace.c
@@ -0,0 +1,13 @@
1/*
2 * Copyright 2015 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#ifndef __CHECKER__
11#define CREATE_TRACE_POINTS
12#include "trace.h"
13#endif
diff --git a/drivers/misc/cxl/trace.h b/drivers/misc/cxl/trace.h
new file mode 100644
index 000000000000..ae434d87887e
--- /dev/null
+++ b/drivers/misc/cxl/trace.h
@@ -0,0 +1,459 @@
1/*
2 * Copyright 2015 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#undef TRACE_SYSTEM
11#define TRACE_SYSTEM cxl
12
13#if !defined(_CXL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
14#define _CXL_TRACE_H
15
16#include <linux/tracepoint.h>
17
18#include "cxl.h"
19
20#define DSISR_FLAGS \
21 { CXL_PSL_DSISR_An_DS, "DS" }, \
22 { CXL_PSL_DSISR_An_DM, "DM" }, \
23 { CXL_PSL_DSISR_An_ST, "ST" }, \
24 { CXL_PSL_DSISR_An_UR, "UR" }, \
25 { CXL_PSL_DSISR_An_PE, "PE" }, \
26 { CXL_PSL_DSISR_An_AE, "AE" }, \
27 { CXL_PSL_DSISR_An_OC, "OC" }, \
28 { CXL_PSL_DSISR_An_M, "M" }, \
29 { CXL_PSL_DSISR_An_P, "P" }, \
30 { CXL_PSL_DSISR_An_A, "A" }, \
31 { CXL_PSL_DSISR_An_S, "S" }, \
32 { CXL_PSL_DSISR_An_K, "K" }
33
34#define TFC_FLAGS \
35 { CXL_PSL_TFC_An_A, "A" }, \
36 { CXL_PSL_TFC_An_C, "C" }, \
37 { CXL_PSL_TFC_An_AE, "AE" }, \
38 { CXL_PSL_TFC_An_R, "R" }
39
40#define LLCMD_NAMES \
41 { CXL_SPA_SW_CMD_TERMINATE, "TERMINATE" }, \
42 { CXL_SPA_SW_CMD_REMOVE, "REMOVE" }, \
43 { CXL_SPA_SW_CMD_SUSPEND, "SUSPEND" }, \
44 { CXL_SPA_SW_CMD_RESUME, "RESUME" }, \
45 { CXL_SPA_SW_CMD_ADD, "ADD" }, \
46 { CXL_SPA_SW_CMD_UPDATE, "UPDATE" }
47
48#define AFU_COMMANDS \
49 { 0, "DISABLE" }, \
50 { CXL_AFU_Cntl_An_E, "ENABLE" }, \
51 { CXL_AFU_Cntl_An_RA, "RESET" }
52
53#define PSL_COMMANDS \
54 { CXL_PSL_SCNTL_An_Pc, "PURGE" }, \
55 { CXL_PSL_SCNTL_An_Sc, "SUSPEND" }
56
57
58DECLARE_EVENT_CLASS(cxl_pe_class,
59 TP_PROTO(struct cxl_context *ctx),
60
61 TP_ARGS(ctx),
62
63 TP_STRUCT__entry(
64 __field(u8, card)
65 __field(u8, afu)
66 __field(u16, pe)
67 ),
68
69 TP_fast_assign(
70 __entry->card = ctx->afu->adapter->adapter_num;
71 __entry->afu = ctx->afu->slice;
72 __entry->pe = ctx->pe;
73 ),
74
75 TP_printk("afu%i.%i pe=%i",
76 __entry->card,
77 __entry->afu,
78 __entry->pe
79 )
80);
81
82
83TRACE_EVENT(cxl_attach,
84 TP_PROTO(struct cxl_context *ctx, u64 wed, s16 num_interrupts, u64 amr),
85
86 TP_ARGS(ctx, wed, num_interrupts, amr),
87
88 TP_STRUCT__entry(
89 __field(u8, card)
90 __field(u8, afu)
91 __field(u16, pe)
92 __field(pid_t, pid)
93 __field(u64, wed)
94 __field(u64, amr)
95 __field(s16, num_interrupts)
96 ),
97
98 TP_fast_assign(
99 __entry->card = ctx->afu->adapter->adapter_num;
100 __entry->afu = ctx->afu->slice;
101 __entry->pe = ctx->pe;
102 __entry->pid = pid_nr(ctx->pid);
103 __entry->wed = wed;
104 __entry->amr = amr;
105 __entry->num_interrupts = num_interrupts;
106 ),
107
108 TP_printk("afu%i.%i pid=%i pe=%i wed=0x%.16llx irqs=%i amr=0x%llx",
109 __entry->card,
110 __entry->afu,
111 __entry->pid,
112 __entry->pe,
113 __entry->wed,
114 __entry->num_interrupts,
115 __entry->amr
116 )
117);
118
119DEFINE_EVENT(cxl_pe_class, cxl_detach,
120 TP_PROTO(struct cxl_context *ctx),
121 TP_ARGS(ctx)
122);
123
124TRACE_EVENT(cxl_afu_irq,
125 TP_PROTO(struct cxl_context *ctx, int afu_irq, int virq, irq_hw_number_t hwirq),
126
127 TP_ARGS(ctx, afu_irq, virq, hwirq),
128
129 TP_STRUCT__entry(
130 __field(u8, card)
131 __field(u8, afu)
132 __field(u16, pe)
133 __field(u16, afu_irq)
134 __field(int, virq)
135 __field(irq_hw_number_t, hwirq)
136 ),
137
138 TP_fast_assign(
139 __entry->card = ctx->afu->adapter->adapter_num;
140 __entry->afu = ctx->afu->slice;
141 __entry->pe = ctx->pe;
142 __entry->afu_irq = afu_irq;
143 __entry->virq = virq;
144 __entry->hwirq = hwirq;
145 ),
146
147 TP_printk("afu%i.%i pe=%i afu_irq=%i virq=%i hwirq=0x%lx",
148 __entry->card,
149 __entry->afu,
150 __entry->pe,
151 __entry->afu_irq,
152 __entry->virq,
153 __entry->hwirq
154 )
155);
156
157TRACE_EVENT(cxl_psl_irq,
158 TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar),
159
160 TP_ARGS(ctx, irq, dsisr, dar),
161
162 TP_STRUCT__entry(
163 __field(u8, card)
164 __field(u8, afu)
165 __field(u16, pe)
166 __field(int, irq)
167 __field(u64, dsisr)
168 __field(u64, dar)
169 ),
170
171 TP_fast_assign(
172 __entry->card = ctx->afu->adapter->adapter_num;
173 __entry->afu = ctx->afu->slice;
174 __entry->pe = ctx->pe;
175 __entry->irq = irq;
176 __entry->dsisr = dsisr;
177 __entry->dar = dar;
178 ),
179
180 TP_printk("afu%i.%i pe=%i irq=%i dsisr=%s dar=0x%.16llx",
181 __entry->card,
182 __entry->afu,
183 __entry->pe,
184 __entry->irq,
185 __print_flags(__entry->dsisr, "|", DSISR_FLAGS),
186 __entry->dar
187 )
188);
189
190TRACE_EVENT(cxl_psl_irq_ack,
191 TP_PROTO(struct cxl_context *ctx, u64 tfc),
192
193 TP_ARGS(ctx, tfc),
194
195 TP_STRUCT__entry(
196 __field(u8, card)
197 __field(u8, afu)
198 __field(u16, pe)
199 __field(u64, tfc)
200 ),
201
202 TP_fast_assign(
203 __entry->card = ctx->afu->adapter->adapter_num;
204 __entry->afu = ctx->afu->slice;
205 __entry->pe = ctx->pe;
206 __entry->tfc = tfc;
207 ),
208
209 TP_printk("afu%i.%i pe=%i tfc=%s",
210 __entry->card,
211 __entry->afu,
212 __entry->pe,
213 __print_flags(__entry->tfc, "|", TFC_FLAGS)
214 )
215);
216
217TRACE_EVENT(cxl_ste_miss,
218 TP_PROTO(struct cxl_context *ctx, u64 dar),
219
220 TP_ARGS(ctx, dar),
221
222 TP_STRUCT__entry(
223 __field(u8, card)
224 __field(u8, afu)
225 __field(u16, pe)
226 __field(u64, dar)
227 ),
228
229 TP_fast_assign(
230 __entry->card = ctx->afu->adapter->adapter_num;
231 __entry->afu = ctx->afu->slice;
232 __entry->pe = ctx->pe;
233 __entry->dar = dar;
234 ),
235
236 TP_printk("afu%i.%i pe=%i dar=0x%.16llx",
237 __entry->card,
238 __entry->afu,
239 __entry->pe,
240 __entry->dar
241 )
242);
243
244TRACE_EVENT(cxl_ste_write,
245 TP_PROTO(struct cxl_context *ctx, unsigned int idx, u64 e, u64 v),
246
247 TP_ARGS(ctx, idx, e, v),
248
249 TP_STRUCT__entry(
250 __field(u8, card)
251 __field(u8, afu)
252 __field(u16, pe)
253 __field(unsigned int, idx)
254 __field(u64, e)
255 __field(u64, v)
256 ),
257
258 TP_fast_assign(
259 __entry->card = ctx->afu->adapter->adapter_num;
260 __entry->afu = ctx->afu->slice;
261 __entry->pe = ctx->pe;
262 __entry->idx = idx;
263 __entry->e = e;
264 __entry->v = v;
265 ),
266
267 TP_printk("afu%i.%i pe=%i SSTE[%i] E=0x%.16llx V=0x%.16llx",
268 __entry->card,
269 __entry->afu,
270 __entry->pe,
271 __entry->idx,
272 __entry->e,
273 __entry->v
274 )
275);
276
277TRACE_EVENT(cxl_pte_miss,
278 TP_PROTO(struct cxl_context *ctx, u64 dsisr, u64 dar),
279
280 TP_ARGS(ctx, dsisr, dar),
281
282 TP_STRUCT__entry(
283 __field(u8, card)
284 __field(u8, afu)
285 __field(u16, pe)
286 __field(u64, dsisr)
287 __field(u64, dar)
288 ),
289
290 TP_fast_assign(
291 __entry->card = ctx->afu->adapter->adapter_num;
292 __entry->afu = ctx->afu->slice;
293 __entry->pe = ctx->pe;
294 __entry->dsisr = dsisr;
295 __entry->dar = dar;
296 ),
297
298 TP_printk("afu%i.%i pe=%i dsisr=%s dar=0x%.16llx",
299 __entry->card,
300 __entry->afu,
301 __entry->pe,
302 __print_flags(__entry->dsisr, "|", DSISR_FLAGS),
303 __entry->dar
304 )
305);
306
307TRACE_EVENT(cxl_llcmd,
308 TP_PROTO(struct cxl_context *ctx, u64 cmd),
309
310 TP_ARGS(ctx, cmd),
311
312 TP_STRUCT__entry(
313 __field(u8, card)
314 __field(u8, afu)
315 __field(u16, pe)
316 __field(u64, cmd)
317 ),
318
319 TP_fast_assign(
320 __entry->card = ctx->afu->adapter->adapter_num;
321 __entry->afu = ctx->afu->slice;
322 __entry->pe = ctx->pe;
323 __entry->cmd = cmd;
324 ),
325
326 TP_printk("afu%i.%i pe=%i cmd=%s",
327 __entry->card,
328 __entry->afu,
329 __entry->pe,
330 __print_symbolic_u64(__entry->cmd, LLCMD_NAMES)
331 )
332);
333
334TRACE_EVENT(cxl_llcmd_done,
335 TP_PROTO(struct cxl_context *ctx, u64 cmd, int rc),
336
337 TP_ARGS(ctx, cmd, rc),
338
339 TP_STRUCT__entry(
340 __field(u8, card)
341 __field(u8, afu)
342 __field(u16, pe)
343 __field(u64, cmd)
344 __field(int, rc)
345 ),
346
347 TP_fast_assign(
348 __entry->card = ctx->afu->adapter->adapter_num;
349 __entry->afu = ctx->afu->slice;
350 __entry->pe = ctx->pe;
351 __entry->rc = rc;
352 __entry->cmd = cmd;
353 ),
354
355 TP_printk("afu%i.%i pe=%i cmd=%s rc=%i",
356 __entry->card,
357 __entry->afu,
358 __entry->pe,
359 __print_symbolic_u64(__entry->cmd, LLCMD_NAMES),
360 __entry->rc
361 )
362);
363
364DECLARE_EVENT_CLASS(cxl_afu_psl_ctrl,
365 TP_PROTO(struct cxl_afu *afu, u64 cmd),
366
367 TP_ARGS(afu, cmd),
368
369 TP_STRUCT__entry(
370 __field(u8, card)
371 __field(u8, afu)
372 __field(u64, cmd)
373 ),
374
375 TP_fast_assign(
376 __entry->card = afu->adapter->adapter_num;
377 __entry->afu = afu->slice;
378 __entry->cmd = cmd;
379 ),
380
381 TP_printk("afu%i.%i cmd=%s",
382 __entry->card,
383 __entry->afu,
384 __print_symbolic_u64(__entry->cmd, AFU_COMMANDS)
385 )
386);
387
388DECLARE_EVENT_CLASS(cxl_afu_psl_ctrl_done,
389 TP_PROTO(struct cxl_afu *afu, u64 cmd, int rc),
390
391 TP_ARGS(afu, cmd, rc),
392
393 TP_STRUCT__entry(
394 __field(u8, card)
395 __field(u8, afu)
396 __field(u64, cmd)
397 __field(int, rc)
398 ),
399
400 TP_fast_assign(
401 __entry->card = afu->adapter->adapter_num;
402 __entry->afu = afu->slice;
403 __entry->rc = rc;
404 __entry->cmd = cmd;
405 ),
406
407 TP_printk("afu%i.%i cmd=%s rc=%i",
408 __entry->card,
409 __entry->afu,
410 __print_symbolic_u64(__entry->cmd, AFU_COMMANDS),
411 __entry->rc
412 )
413);
414
415DEFINE_EVENT(cxl_afu_psl_ctrl, cxl_afu_ctrl,
416 TP_PROTO(struct cxl_afu *afu, u64 cmd),
417 TP_ARGS(afu, cmd)
418);
419
420DEFINE_EVENT(cxl_afu_psl_ctrl_done, cxl_afu_ctrl_done,
421 TP_PROTO(struct cxl_afu *afu, u64 cmd, int rc),
422 TP_ARGS(afu, cmd, rc)
423);
424
425DEFINE_EVENT_PRINT(cxl_afu_psl_ctrl, cxl_psl_ctrl,
426 TP_PROTO(struct cxl_afu *afu, u64 cmd),
427 TP_ARGS(afu, cmd),
428
429 TP_printk("psl%i.%i cmd=%s",
430 __entry->card,
431 __entry->afu,
432 __print_symbolic_u64(__entry->cmd, PSL_COMMANDS)
433 )
434);
435
436DEFINE_EVENT_PRINT(cxl_afu_psl_ctrl_done, cxl_psl_ctrl_done,
437 TP_PROTO(struct cxl_afu *afu, u64 cmd, int rc),
438 TP_ARGS(afu, cmd, rc),
439
440 TP_printk("psl%i.%i cmd=%s rc=%i",
441 __entry->card,
442 __entry->afu,
443 __print_symbolic_u64(__entry->cmd, PSL_COMMANDS),
444 __entry->rc
445 )
446);
447
448DEFINE_EVENT(cxl_pe_class, cxl_slbia,
449 TP_PROTO(struct cxl_context *ctx),
450 TP_ARGS(ctx)
451);
452
453#endif /* _CXL_TRACE_H */
454
455/* This part must be outside protection */
456#undef TRACE_INCLUDE_PATH
457#define TRACE_INCLUDE_PATH .
458#define TRACE_INCLUDE_FILE trace
459#include <trace/define_trace.h>