aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristophe Lombard <clombard@linux.vnet.ibm.com>2016-03-04 06:26:35 -0500
committerMichael Ellerman <mpe@ellerman.id.au>2016-03-08 21:05:54 -0500
commitcbffa3a5146a90f46806cef3a98b8be5833727e8 (patch)
tree888ecaf9a7b9640b62dc98cbf1a1844954c7983a
parent444c4ba4616503baf68cffbf6748047d308b8cd2 (diff)
cxl: Separate bare-metal fields in adapter and AFU data structures
Introduce sub-structures containing the bare-metal specific fields in the structures describing the adapter (struct cxl) and AFU (struct cxl_afu). Update all their references. Co-authored-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com> Signed-off-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com> Signed-off-by: Christophe Lombard <clombard@linux.vnet.ibm.com> Reviewed-by: Manoj Kumar <manoj@linux.vnet.ibm.com> Acked-by: Ian Munsie <imunsie@au1.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--drivers/misc/cxl/context.c2
-rw-r--r--drivers/misc/cxl/cxl.h84
-rw-r--r--drivers/misc/cxl/irq.c2
-rw-r--r--drivers/misc/cxl/main.c1
-rw-r--r--drivers/misc/cxl/native.c85
-rw-r--r--drivers/misc/cxl/pci.c95
-rw-r--r--drivers/misc/cxl/sysfs.c2
-rw-r--r--drivers/misc/cxl/vphb.c4
8 files changed, 164 insertions, 111 deletions
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 46f98441d7a1..200837f7612b 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -96,7 +96,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
96 96
97 ctx->pe = i; 97 ctx->pe = i;
98 if (cpu_has_feature(CPU_FTR_HVMODE)) 98 if (cpu_has_feature(CPU_FTR_HVMODE))
99 ctx->elem = &ctx->afu->spa[i]; 99 ctx->elem = &ctx->afu->native->spa[i];
100 ctx->pe_inserted = false; 100 ctx->pe_inserted = false;
101 101
102 /* 102 /*
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index ac655a698e41..3a1fabd41072 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -344,18 +344,44 @@ struct cxl_sste {
344#define to_cxl_adapter(d) container_of(d, struct cxl, dev) 344#define to_cxl_adapter(d) container_of(d, struct cxl, dev)
345#define to_cxl_afu(d) container_of(d, struct cxl_afu, dev) 345#define to_cxl_afu(d) container_of(d, struct cxl_afu, dev)
346 346
347struct cxl_afu { 347struct cxl_afu_native {
348 void __iomem *p1n_mmio;
349 void __iomem *afu_desc_mmio;
348 irq_hw_number_t psl_hwirq; 350 irq_hw_number_t psl_hwirq;
351 unsigned int psl_virq;
352 struct mutex spa_mutex;
353 /*
354 * Only the first part of the SPA is used for the process element
355 * linked list. The only other part that software needs to worry about
356 * is sw_command_status, which we store a separate pointer to.
357 * Everything else in the SPA is only used by hardware
358 */
359 struct cxl_process_element *spa;
360 __be64 *sw_command_status;
361 unsigned int spa_size;
362 int spa_order;
363 int spa_max_procs;
364 u64 pp_offset;
365};
366
367struct cxl_afu_guest {
368 u64 handle;
369 phys_addr_t p2n_phys;
370 u64 p2n_size;
371 int max_ints;
372};
373
374struct cxl_afu {
375 struct cxl_afu_native *native;
376 struct cxl_afu_guest *guest;
349 irq_hw_number_t serr_hwirq; 377 irq_hw_number_t serr_hwirq;
350 char *err_irq_name;
351 char *psl_irq_name;
352 unsigned int serr_virq; 378 unsigned int serr_virq;
353 void __iomem *p1n_mmio; 379 char *psl_irq_name;
380 char *err_irq_name;
354 void __iomem *p2n_mmio; 381 void __iomem *p2n_mmio;
355 phys_addr_t psn_phys; 382 phys_addr_t psn_phys;
356 u64 pp_offset;
357 u64 pp_size; 383 u64 pp_size;
358 void __iomem *afu_desc_mmio; 384
359 struct cxl *adapter; 385 struct cxl *adapter;
360 struct device dev; 386 struct device dev;
361 struct cdev afu_cdev_s, afu_cdev_m, afu_cdev_d; 387 struct cdev afu_cdev_s, afu_cdev_m, afu_cdev_d;
@@ -363,26 +389,12 @@ struct cxl_afu {
363 struct idr contexts_idr; 389 struct idr contexts_idr;
364 struct dentry *debugfs; 390 struct dentry *debugfs;
365 struct mutex contexts_lock; 391 struct mutex contexts_lock;
366 struct mutex spa_mutex;
367 spinlock_t afu_cntl_lock; 392 spinlock_t afu_cntl_lock;
368 393
369 /* AFU error buffer fields and bin attribute for sysfs */ 394 /* AFU error buffer fields and bin attribute for sysfs */
370 u64 eb_len, eb_offset; 395 u64 eb_len, eb_offset;
371 struct bin_attribute attr_eb; 396 struct bin_attribute attr_eb;
372 397
373 /*
374 * Only the first part of the SPA is used for the process element
375 * linked list. The only other part that software needs to worry about
376 * is sw_command_status, which we store a separate pointer to.
377 * Everything else in the SPA is only used by hardware
378 */
379 struct cxl_process_element *spa;
380 __be64 *sw_command_status;
381 unsigned int spa_size;
382 int spa_order;
383 int spa_max_procs;
384 unsigned int psl_virq;
385
386 /* pointer to the vphb */ 398 /* pointer to the vphb */
387 struct pci_controller *phb; 399 struct pci_controller *phb;
388 400
@@ -488,11 +500,34 @@ struct cxl_context {
488 struct rcu_head rcu; 500 struct rcu_head rcu;
489}; 501};
490 502
491struct cxl { 503struct cxl_native {
504 u64 afu_desc_off;
505 u64 afu_desc_size;
492 void __iomem *p1_mmio; 506 void __iomem *p1_mmio;
493 void __iomem *p2_mmio; 507 void __iomem *p2_mmio;
494 irq_hw_number_t err_hwirq; 508 irq_hw_number_t err_hwirq;
495 unsigned int err_virq; 509 unsigned int err_virq;
510 u64 ps_off;
511};
512
513struct cxl_guest {
514 struct platform_device *pdev;
515 int irq_nranges;
516 struct cdev cdev;
517 irq_hw_number_t irq_base_offset;
518 struct irq_avail *irq_avail;
519 spinlock_t irq_alloc_lock;
520 u64 handle;
521 char *status;
522 u16 vendor;
523 u16 device;
524 u16 subsystem_vendor;
525 u16 subsystem;
526};
527
528struct cxl {
529 struct cxl_native *native;
530 struct cxl_guest *guest;
496 spinlock_t afu_list_lock; 531 spinlock_t afu_list_lock;
497 struct cxl_afu *afu[CXL_MAX_SLICES]; 532 struct cxl_afu *afu[CXL_MAX_SLICES];
498 struct device dev; 533 struct device dev;
@@ -503,9 +538,6 @@ struct cxl {
503 struct bin_attribute cxl_attr; 538 struct bin_attribute cxl_attr;
504 int adapter_num; 539 int adapter_num;
505 int user_irqs; 540 int user_irqs;
506 u64 afu_desc_off;
507 u64 afu_desc_size;
508 u64 ps_off;
509 u64 ps_size; 541 u64 ps_size;
510 u16 psl_rev; 542 u16 psl_rev;
511 u16 base_image; 543 u16 base_image;
@@ -570,7 +602,7 @@ static inline bool cxl_adapter_link_ok(struct cxl *cxl)
570static inline void __iomem *_cxl_p1_addr(struct cxl *cxl, cxl_p1_reg_t reg) 602static inline void __iomem *_cxl_p1_addr(struct cxl *cxl, cxl_p1_reg_t reg)
571{ 603{
572 WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE)); 604 WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE));
573 return cxl->p1_mmio + cxl_reg_off(reg); 605 return cxl->native->p1_mmio + cxl_reg_off(reg);
574} 606}
575 607
576static inline void cxl_p1_write(struct cxl *cxl, cxl_p1_reg_t reg, u64 val) 608static inline void cxl_p1_write(struct cxl *cxl, cxl_p1_reg_t reg, u64 val)
@@ -590,7 +622,7 @@ static inline u64 cxl_p1_read(struct cxl *cxl, cxl_p1_reg_t reg)
590static inline void __iomem *_cxl_p1n_addr(struct cxl_afu *afu, cxl_p1n_reg_t reg) 622static inline void __iomem *_cxl_p1n_addr(struct cxl_afu *afu, cxl_p1n_reg_t reg)
591{ 623{
592 WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE)); 624 WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE));
593 return afu->p1n_mmio + cxl_reg_off(reg); 625 return afu->native->p1n_mmio + cxl_reg_off(reg);
594} 626}
595 627
596static inline void cxl_p1n_write(struct cxl_afu *afu, cxl_p1n_reg_t reg, u64 val) 628static inline void cxl_p1n_write(struct cxl_afu *afu, cxl_p1n_reg_t reg, u64 val)
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 3c04c14d1c60..be646dc41a2c 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -270,7 +270,7 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
270 270
271 if (cpu_has_feature(CPU_FTR_HVMODE)) { 271 if (cpu_has_feature(CPU_FTR_HVMODE)) {
272 /* Multiplexed PSL Interrupt */ 272 /* Multiplexed PSL Interrupt */
273 ctx->irqs.offset[0] = ctx->afu->psl_hwirq; 273 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
274 ctx->irqs.range[0] = 1; 274 ctx->irqs.range[0] = 1;
275 } 275 }
276 276
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index 814257b44534..927ba5a954f6 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -261,7 +261,6 @@ struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
261 idr_init(&afu->contexts_idr); 261 idr_init(&afu->contexts_idr);
262 mutex_init(&afu->contexts_lock); 262 mutex_init(&afu->contexts_lock);
263 spin_lock_init(&afu->afu_cntl_lock); 263 spin_lock_init(&afu->afu_cntl_lock);
264 mutex_init(&afu->spa_mutex);
265 264
266 afu->prefault_mode = CXL_PREFAULT_NONE; 265 afu->prefault_mode = CXL_PREFAULT_NONE;
267 afu->irqs_max = afu->adapter->user_irqs; 266 afu->irqs_max = afu->adapter->user_irqs;
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 7d526294112d..c0bca59a12aa 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -186,22 +186,22 @@ static int spa_max_procs(int spa_size)
186int cxl_alloc_spa(struct cxl_afu *afu) 186int cxl_alloc_spa(struct cxl_afu *afu)
187{ 187{
188 /* Work out how many pages to allocate */ 188 /* Work out how many pages to allocate */
189 afu->spa_order = 0; 189 afu->native->spa_order = 0;
190 do { 190 do {
191 afu->spa_order++; 191 afu->native->spa_order++;
192 afu->spa_size = (1 << afu->spa_order) * PAGE_SIZE; 192 afu->native->spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
193 afu->spa_max_procs = spa_max_procs(afu->spa_size); 193 afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
194 } while (afu->spa_max_procs < afu->num_procs); 194 } while (afu->native->spa_max_procs < afu->num_procs);
195 195
196 WARN_ON(afu->spa_size > 0x100000); /* Max size supported by the hardware */ 196 WARN_ON(afu->native->spa_size > 0x100000); /* Max size supported by the hardware */
197 197
198 if (!(afu->spa = (struct cxl_process_element *) 198 if (!(afu->native->spa = (struct cxl_process_element *)
199 __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->spa_order))) { 199 __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
200 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n"); 200 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
201 return -ENOMEM; 201 return -ENOMEM;
202 } 202 }
203 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n", 203 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
204 1<<afu->spa_order, afu->spa_max_procs, afu->num_procs); 204 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs);
205 205
206 return 0; 206 return 0;
207} 207}
@@ -210,13 +210,15 @@ static void attach_spa(struct cxl_afu *afu)
210{ 210{
211 u64 spap; 211 u64 spap;
212 212
213 afu->sw_command_status = (__be64 *)((char *)afu->spa + 213 afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa +
214 ((afu->spa_max_procs + 3) * 128)); 214 ((afu->native->spa_max_procs + 3) * 128));
215 215
216 spap = virt_to_phys(afu->spa) & CXL_PSL_SPAP_Addr; 216 spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr;
217 spap |= ((afu->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size; 217 spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
218 spap |= CXL_PSL_SPAP_V; 218 spap |= CXL_PSL_SPAP_V;
219 pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap); 219 pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
220 afu->native->spa, afu->native->spa_max_procs,
221 afu->native->sw_command_status, spap);
220 cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap); 222 cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
221} 223}
222 224
@@ -227,9 +229,10 @@ static inline void detach_spa(struct cxl_afu *afu)
227 229
228void cxl_release_spa(struct cxl_afu *afu) 230void cxl_release_spa(struct cxl_afu *afu)
229{ 231{
230 if (afu->spa) { 232 if (afu->native->spa) {
231 free_pages((unsigned long) afu->spa, afu->spa_order); 233 free_pages((unsigned long) afu->native->spa,
232 afu->spa = NULL; 234 afu->native->spa_order);
235 afu->native->spa = NULL;
233 } 236 }
234} 237}
235 238
@@ -291,7 +294,7 @@ static void slb_invalid(struct cxl_context *ctx)
291 struct cxl *adapter = ctx->afu->adapter; 294 struct cxl *adapter = ctx->afu->adapter;
292 u64 slbia; 295 u64 slbia;
293 296
294 WARN_ON(!mutex_is_locked(&ctx->afu->spa_mutex)); 297 WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex));
295 298
296 cxl_p1_write(adapter, CXL_PSL_LBISEL, 299 cxl_p1_write(adapter, CXL_PSL_LBISEL,
297 ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) | 300 ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
@@ -321,7 +324,7 @@ static int do_process_element_cmd(struct cxl_context *ctx,
321 324
322 ctx->elem->software_state = cpu_to_be32(pe_state); 325 ctx->elem->software_state = cpu_to_be32(pe_state);
323 smp_wmb(); 326 smp_wmb();
324 *(ctx->afu->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe); 327 *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
325 smp_mb(); 328 smp_mb();
326 cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe); 329 cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
327 while (1) { 330 while (1) {
@@ -335,7 +338,7 @@ static int do_process_element_cmd(struct cxl_context *ctx,
335 rc = -EIO; 338 rc = -EIO;
336 goto out; 339 goto out;
337 } 340 }
338 state = be64_to_cpup(ctx->afu->sw_command_status); 341 state = be64_to_cpup(ctx->afu->native->sw_command_status);
339 if (state == ~0ULL) { 342 if (state == ~0ULL) {
340 pr_err("cxl: Error adding process element to AFU\n"); 343 pr_err("cxl: Error adding process element to AFU\n");
341 rc = -1; 344 rc = -1;
@@ -363,12 +366,12 @@ static int add_process_element(struct cxl_context *ctx)
363{ 366{
364 int rc = 0; 367 int rc = 0;
365 368
366 mutex_lock(&ctx->afu->spa_mutex); 369 mutex_lock(&ctx->afu->native->spa_mutex);
367 pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe); 370 pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
368 if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V))) 371 if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
369 ctx->pe_inserted = true; 372 ctx->pe_inserted = true;
370 pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe); 373 pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
371 mutex_unlock(&ctx->afu->spa_mutex); 374 mutex_unlock(&ctx->afu->native->spa_mutex);
372 return rc; 375 return rc;
373} 376}
374 377
@@ -380,7 +383,7 @@ static int terminate_process_element(struct cxl_context *ctx)
380 if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V))) 383 if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
381 return rc; 384 return rc;
382 385
383 mutex_lock(&ctx->afu->spa_mutex); 386 mutex_lock(&ctx->afu->native->spa_mutex);
384 pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe); 387 pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
385 /* We could be asked to terminate when the hw is down. That 388 /* We could be asked to terminate when the hw is down. That
386 * should always succeed: it's not running if the hw has gone 389 * should always succeed: it's not running if the hw has gone
@@ -391,7 +394,7 @@ static int terminate_process_element(struct cxl_context *ctx)
391 CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T); 394 CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
392 ctx->elem->software_state = 0; /* Remove Valid bit */ 395 ctx->elem->software_state = 0; /* Remove Valid bit */
393 pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe); 396 pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
394 mutex_unlock(&ctx->afu->spa_mutex); 397 mutex_unlock(&ctx->afu->native->spa_mutex);
395 return rc; 398 return rc;
396} 399}
397 400
@@ -399,7 +402,7 @@ static int remove_process_element(struct cxl_context *ctx)
399{ 402{
400 int rc = 0; 403 int rc = 0;
401 404
402 mutex_lock(&ctx->afu->spa_mutex); 405 mutex_lock(&ctx->afu->native->spa_mutex);
403 pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe); 406 pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
404 407
405 /* We could be asked to remove when the hw is down. Again, if 408 /* We could be asked to remove when the hw is down. Again, if
@@ -412,7 +415,7 @@ static int remove_process_element(struct cxl_context *ctx)
412 ctx->pe_inserted = false; 415 ctx->pe_inserted = false;
413 slb_invalid(ctx); 416 slb_invalid(ctx);
414 pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe); 417 pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
415 mutex_unlock(&ctx->afu->spa_mutex); 418 mutex_unlock(&ctx->afu->native->spa_mutex);
416 419
417 return rc; 420 return rc;
418} 421}
@@ -425,7 +428,7 @@ void cxl_assign_psn_space(struct cxl_context *ctx)
425 ctx->psn_size = ctx->afu->adapter->ps_size; 428 ctx->psn_size = ctx->afu->adapter->ps_size;
426 } else { 429 } else {
427 ctx->psn_phys = ctx->afu->psn_phys + 430 ctx->psn_phys = ctx->afu->psn_phys +
428 (ctx->afu->pp_offset + ctx->afu->pp_size * ctx->pe); 431 (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe);
429 ctx->psn_size = ctx->afu->pp_size; 432 ctx->psn_size = ctx->afu->pp_size;
430 } 433 }
431} 434}
@@ -437,7 +440,7 @@ static int activate_afu_directed(struct cxl_afu *afu)
437 dev_info(&afu->dev, "Activating AFU directed mode\n"); 440 dev_info(&afu->dev, "Activating AFU directed mode\n");
438 441
439 afu->num_procs = afu->max_procs_virtualised; 442 afu->num_procs = afu->max_procs_virtualised;
440 if (afu->spa == NULL) { 443 if (afu->native->spa == NULL) {
441 if (cxl_alloc_spa(afu)) 444 if (cxl_alloc_spa(afu))
442 return -ENOMEM; 445 return -ENOMEM;
443 } 446 }
@@ -846,27 +849,27 @@ int cxl_native_register_psl_err_irq(struct cxl *adapter)
846 return -ENOMEM; 849 return -ENOMEM;
847 850
848 if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter, 851 if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter,
849 &adapter->err_hwirq, 852 &adapter->native->err_hwirq,
850 &adapter->err_virq, 853 &adapter->native->err_virq,
851 adapter->irq_name))) { 854 adapter->irq_name))) {
852 kfree(adapter->irq_name); 855 kfree(adapter->irq_name);
853 adapter->irq_name = NULL; 856 adapter->irq_name = NULL;
854 return rc; 857 return rc;
855 } 858 }
856 859
857 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff); 860 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff);
858 861
859 return 0; 862 return 0;
860} 863}
861 864
862void cxl_native_release_psl_err_irq(struct cxl *adapter) 865void cxl_native_release_psl_err_irq(struct cxl *adapter)
863{ 866{
864 if (adapter->err_virq != irq_find_mapping(NULL, adapter->err_hwirq)) 867 if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
865 return; 868 return;
866 869
867 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); 870 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
868 cxl_unmap_irq(adapter->err_virq, adapter); 871 cxl_unmap_irq(adapter->native->err_virq, adapter);
869 cxl_ops->release_one_irq(adapter, adapter->err_hwirq); 872 cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
870 kfree(adapter->irq_name); 873 kfree(adapter->irq_name);
871} 874}
872 875
@@ -915,8 +918,8 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu)
915 if (!afu->psl_irq_name) 918 if (!afu->psl_irq_name)
916 return -ENOMEM; 919 return -ENOMEM;
917 920
918 if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed, afu, 921 if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed,
919 &afu->psl_hwirq, &afu->psl_virq, 922 afu, &afu->native->psl_hwirq, &afu->native->psl_virq,
920 afu->psl_irq_name))) { 923 afu->psl_irq_name))) {
921 kfree(afu->psl_irq_name); 924 kfree(afu->psl_irq_name);
922 afu->psl_irq_name = NULL; 925 afu->psl_irq_name = NULL;
@@ -926,11 +929,11 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu)
926 929
927void cxl_native_release_psl_irq(struct cxl_afu *afu) 930void cxl_native_release_psl_irq(struct cxl_afu *afu)
928{ 931{
929 if (afu->psl_virq != irq_find_mapping(NULL, afu->psl_hwirq)) 932 if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
930 return; 933 return;
931 934
932 cxl_unmap_irq(afu->psl_virq, afu); 935 cxl_unmap_irq(afu->native->psl_virq, afu);
933 cxl_ops->release_one_irq(afu->adapter, afu->psl_hwirq); 936 cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
934 kfree(afu->psl_irq_name); 937 kfree(afu->psl_irq_name);
935} 938}
936 939
@@ -970,7 +973,7 @@ static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
970 return -EIO; 973 return -EIO;
971 if (unlikely(off >= afu->crs_len)) 974 if (unlikely(off >= afu->crs_len))
972 return -ERANGE; 975 return -ERANGE;
973 *out = in_le64(afu->afu_desc_mmio + afu->crs_offset + 976 *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset +
974 (cr * afu->crs_len) + off); 977 (cr * afu->crs_len) + off);
975 return 0; 978 return 0;
976} 979}
@@ -981,7 +984,7 @@ static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
981 return -EIO; 984 return -EIO;
982 if (unlikely(off >= afu->crs_len)) 985 if (unlikely(off >= afu->crs_len))
983 return -ERANGE; 986 return -ERANGE;
984 *out = in_le32(afu->afu_desc_mmio + afu->crs_offset + 987 *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset +
985 (cr * afu->crs_len) + off); 988 (cr * afu->crs_len) + off);
986 return 0; 989 return 0;
987} 990}
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 23b84c5d9453..fb4fd45e8744 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -90,8 +90,8 @@
90 90
91/* This works a little different than the p1/p2 register accesses to make it 91/* This works a little different than the p1/p2 register accesses to make it
92 * easier to pull out individual fields */ 92 * easier to pull out individual fields */
93#define AFUD_READ(afu, off) in_be64(afu->afu_desc_mmio + off) 93#define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off)
94#define AFUD_READ_LE(afu, off) in_le64(afu->afu_desc_mmio + off) 94#define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off)
95#define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit))) 95#define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit)))
96#define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be)) 96#define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
97 97
@@ -550,15 +550,15 @@ static int pci_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct p
550 550
551 p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size); 551 p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size);
552 p2n_base = p2_base(dev) + (afu->slice * p2n_size); 552 p2n_base = p2_base(dev) + (afu->slice * p2n_size);
553 afu->psn_phys = p2_base(dev) + (adapter->ps_off + (afu->slice * adapter->ps_size)); 553 afu->psn_phys = p2_base(dev) + (adapter->native->ps_off + (afu->slice * adapter->ps_size));
554 afu_desc = p2_base(dev) + adapter->afu_desc_off + (afu->slice * adapter->afu_desc_size); 554 afu_desc = p2_base(dev) + adapter->native->afu_desc_off + (afu->slice * adapter->native->afu_desc_size);
555 555
556 if (!(afu->p1n_mmio = ioremap(p1n_base, p1n_size))) 556 if (!(afu->native->p1n_mmio = ioremap(p1n_base, p1n_size)))
557 goto err; 557 goto err;
558 if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size))) 558 if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size)))
559 goto err1; 559 goto err1;
560 if (afu_desc) { 560 if (afu_desc) {
561 if (!(afu->afu_desc_mmio = ioremap(afu_desc, adapter->afu_desc_size))) 561 if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size)))
562 goto err2; 562 goto err2;
563 } 563 }
564 564
@@ -566,7 +566,7 @@ static int pci_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct p
566err2: 566err2:
567 iounmap(afu->p2n_mmio); 567 iounmap(afu->p2n_mmio);
568err1: 568err1:
569 iounmap(afu->p1n_mmio); 569 iounmap(afu->native->p1n_mmio);
570err: 570err:
571 dev_err(&afu->dev, "Error mapping AFU MMIO regions\n"); 571 dev_err(&afu->dev, "Error mapping AFU MMIO regions\n");
572 return -ENOMEM; 572 return -ENOMEM;
@@ -578,13 +578,13 @@ static void pci_unmap_slice_regs(struct cxl_afu *afu)
578 iounmap(afu->p2n_mmio); 578 iounmap(afu->p2n_mmio);
579 afu->p2n_mmio = NULL; 579 afu->p2n_mmio = NULL;
580 } 580 }
581 if (afu->p1n_mmio) { 581 if (afu->native->p1n_mmio) {
582 iounmap(afu->p1n_mmio); 582 iounmap(afu->native->p1n_mmio);
583 afu->p1n_mmio = NULL; 583 afu->native->p1n_mmio = NULL;
584 } 584 }
585 if (afu->afu_desc_mmio) { 585 if (afu->native->afu_desc_mmio) {
586 iounmap(afu->afu_desc_mmio); 586 iounmap(afu->native->afu_desc_mmio);
587 afu->afu_desc_mmio = NULL; 587 afu->native->afu_desc_mmio = NULL;
588 } 588 }
589} 589}
590 590
@@ -597,6 +597,7 @@ void cxl_pci_release_afu(struct device *dev)
597 idr_destroy(&afu->contexts_idr); 597 idr_destroy(&afu->contexts_idr);
598 cxl_release_spa(afu); 598 cxl_release_spa(afu);
599 599
600 kfree(afu->native);
600 kfree(afu); 601 kfree(afu);
601} 602}
602 603
@@ -621,7 +622,7 @@ static int cxl_read_afu_descriptor(struct cxl_afu *afu)
621 afu->pp_size = AFUD_PPPSA_LEN(val) * 4096; 622 afu->pp_size = AFUD_PPPSA_LEN(val) * 4096;
622 afu->psa = AFUD_PPPSA_PSA(val); 623 afu->psa = AFUD_PPPSA_PSA(val);
623 if ((afu->pp_psa = AFUD_PPPSA_PP(val))) 624 if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
624 afu->pp_offset = AFUD_READ_PPPSA_OFF(afu); 625 afu->native->pp_offset = AFUD_READ_PPPSA_OFF(afu);
625 626
626 val = AFUD_READ_CR(afu); 627 val = AFUD_READ_CR(afu);
627 afu->crs_len = AFUD_CR_LEN(val) * 256; 628 afu->crs_len = AFUD_CR_LEN(val) * 256;
@@ -652,7 +653,7 @@ static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
652 u32 val; 653 u32 val;
653 654
654 if (afu->psa && afu->adapter->ps_size < 655 if (afu->psa && afu->adapter->ps_size <
655 (afu->pp_offset + afu->pp_size*afu->max_procs_virtualised)) { 656 (afu->native->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
656 dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n"); 657 dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
657 return -ENODEV; 658 return -ENODEV;
658 } 659 }
@@ -737,7 +738,7 @@ ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
737 loff_t aligned_start, aligned_end; 738 loff_t aligned_start, aligned_end;
738 size_t aligned_length; 739 size_t aligned_length;
739 void *tbuf; 740 void *tbuf;
740 const void __iomem *ebuf = afu->afu_desc_mmio + afu->eb_offset; 741 const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset;
741 742
742 if (count == 0 || off < 0 || (size_t)off >= afu->eb_len) 743 if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
743 return 0; 744 return 0;
@@ -819,19 +820,25 @@ static void pci_deconfigure_afu(struct cxl_afu *afu)
819static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) 820static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
820{ 821{
821 struct cxl_afu *afu; 822 struct cxl_afu *afu;
822 int rc; 823 int rc = -ENOMEM;
823 824
824 afu = cxl_alloc_afu(adapter, slice); 825 afu = cxl_alloc_afu(adapter, slice);
825 if (!afu) 826 if (!afu)
826 return -ENOMEM; 827 return -ENOMEM;
827 828
829 afu->native = kzalloc(sizeof(struct cxl_afu_native), GFP_KERNEL);
830 if (!afu->native)
831 goto err_free_afu;
832
833 mutex_init(&afu->native->spa_mutex);
834
828 rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice); 835 rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice);
829 if (rc) 836 if (rc)
830 goto err_free; 837 goto err_free_native;
831 838
832 rc = pci_configure_afu(afu, adapter, dev); 839 rc = pci_configure_afu(afu, adapter, dev);
833 if (rc) 840 if (rc)
834 goto err_free; 841 goto err_free_native;
835 842
836 /* Don't care if this fails */ 843 /* Don't care if this fails */
837 cxl_debugfs_afu_add(afu); 844 cxl_debugfs_afu_add(afu);
@@ -859,7 +866,9 @@ err_put1:
859 device_unregister(&afu->dev); 866 device_unregister(&afu->dev);
860 return rc; 867 return rc;
861 868
862err_free: 869err_free_native:
870 kfree(afu->native);
871err_free_afu:
863 kfree(afu); 872 kfree(afu);
864 return rc; 873 return rc;
865 874
@@ -920,17 +929,17 @@ static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
920 pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx", 929 pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
921 p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev)); 930 p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
922 931
923 if (!(adapter->p1_mmio = ioremap(p1_base(dev), p1_size(dev)))) 932 if (!(adapter->native->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
924 goto err3; 933 goto err3;
925 934
926 if (!(adapter->p2_mmio = ioremap(p2_base(dev), p2_size(dev)))) 935 if (!(adapter->native->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
927 goto err4; 936 goto err4;
928 937
929 return 0; 938 return 0;
930 939
931err4: 940err4:
932 iounmap(adapter->p1_mmio); 941 iounmap(adapter->native->p1_mmio);
933 adapter->p1_mmio = NULL; 942 adapter->native->p1_mmio = NULL;
934err3: 943err3:
935 pci_release_region(dev, 0); 944 pci_release_region(dev, 0);
936err2: 945err2:
@@ -941,14 +950,14 @@ err1:
941 950
942static void cxl_unmap_adapter_regs(struct cxl *adapter) 951static void cxl_unmap_adapter_regs(struct cxl *adapter)
943{ 952{
944 if (adapter->p1_mmio) { 953 if (adapter->native->p1_mmio) {
945 iounmap(adapter->p1_mmio); 954 iounmap(adapter->native->p1_mmio);
946 adapter->p1_mmio = NULL; 955 adapter->native->p1_mmio = NULL;
947 pci_release_region(to_pci_dev(adapter->dev.parent), 2); 956 pci_release_region(to_pci_dev(adapter->dev.parent), 2);
948 } 957 }
949 if (adapter->p2_mmio) { 958 if (adapter->native->p2_mmio) {
950 iounmap(adapter->p2_mmio); 959 iounmap(adapter->native->p2_mmio);
951 adapter->p2_mmio = NULL; 960 adapter->native->p2_mmio = NULL;
952 pci_release_region(to_pci_dev(adapter->dev.parent), 0); 961 pci_release_region(to_pci_dev(adapter->dev.parent), 0);
953 } 962 }
954} 963}
@@ -989,10 +998,10 @@ static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
989 998
990 /* Convert everything to bytes, because there is NO WAY I'd look at the 999 /* Convert everything to bytes, because there is NO WAY I'd look at the
991 * code a month later and forget what units these are in ;-) */ 1000 * code a month later and forget what units these are in ;-) */
992 adapter->ps_off = ps_off * 64 * 1024; 1001 adapter->native->ps_off = ps_off * 64 * 1024;
993 adapter->ps_size = ps_size * 64 * 1024; 1002 adapter->ps_size = ps_size * 64 * 1024;
994 adapter->afu_desc_off = afu_desc_off * 64 * 1024; 1003 adapter->native->afu_desc_off = afu_desc_off * 64 * 1024;
995 adapter->afu_desc_size = afu_desc_size *64 * 1024; 1004 adapter->native->afu_desc_size = afu_desc_size * 64 * 1024;
996 1005
997 /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */ 1006 /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */
998 adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices; 1007 adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices;
@@ -1043,15 +1052,15 @@ static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
1043 return -EINVAL; 1052 return -EINVAL;
1044 } 1053 }
1045 1054
1046 if (!adapter->afu_desc_off || !adapter->afu_desc_size) { 1055 if (!adapter->native->afu_desc_off || !adapter->native->afu_desc_size) {
1047 dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n"); 1056 dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n");
1048 return -EINVAL; 1057 return -EINVAL;
1049 } 1058 }
1050 1059
1051 if (adapter->ps_size > p2_size(dev) - adapter->ps_off) { 1060 if (adapter->ps_size > p2_size(dev) - adapter->native->ps_off) {
1052 dev_err(&dev->dev, "ABORTING: Problem state size larger than " 1061 dev_err(&dev->dev, "ABORTING: Problem state size larger than "
1053 "available in BAR2: 0x%llx > 0x%llx\n", 1062 "available in BAR2: 0x%llx > 0x%llx\n",
1054 adapter->ps_size, p2_size(dev) - adapter->ps_off); 1063 adapter->ps_size, p2_size(dev) - adapter->native->ps_off);
1055 return -EINVAL; 1064 return -EINVAL;
1056 } 1065 }
1057 1066
@@ -1066,6 +1075,7 @@ static void cxl_release_adapter(struct device *dev)
1066 1075
1067 cxl_remove_adapter_nr(adapter); 1076 cxl_remove_adapter_nr(adapter);
1068 1077
1078 kfree(adapter->native);
1069 kfree(adapter); 1079 kfree(adapter);
1070} 1080}
1071 1081
@@ -1162,6 +1172,12 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
1162 if (!adapter) 1172 if (!adapter)
1163 return ERR_PTR(-ENOMEM); 1173 return ERR_PTR(-ENOMEM);
1164 1174
1175 adapter->native = kzalloc(sizeof(struct cxl_native), GFP_KERNEL);
1176 if (!adapter->native) {
1177 rc = -ENOMEM;
1178 goto err_release;
1179 }
1180
1165 /* Set defaults for parameters which need to persist over 1181 /* Set defaults for parameters which need to persist over
1166 * configure/reconfigure 1182 * configure/reconfigure
1167 */ 1183 */
@@ -1171,8 +1187,7 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
1171 rc = cxl_configure_adapter(adapter, dev); 1187 rc = cxl_configure_adapter(adapter, dev);
1172 if (rc) { 1188 if (rc) {
1173 pci_disable_device(dev); 1189 pci_disable_device(dev);
1174 cxl_release_adapter(&adapter->dev); 1190 goto err_release;
1175 return ERR_PTR(rc);
1176 } 1191 }
1177 1192
1178 /* Don't care if this one fails: */ 1193 /* Don't care if this one fails: */
@@ -1198,6 +1213,10 @@ err_put1:
1198 cxl_deconfigure_adapter(adapter); 1213 cxl_deconfigure_adapter(adapter);
1199 device_unregister(&adapter->dev); 1214 device_unregister(&adapter->dev);
1200 return ERR_PTR(rc); 1215 return ERR_PTR(rc);
1216
1217err_release:
1218 cxl_release_adapter(&adapter->dev);
1219 return ERR_PTR(rc);
1201} 1220}
1202 1221
1203static void cxl_pci_remove_adapter(struct cxl *adapter) 1222static void cxl_pci_remove_adapter(struct cxl *adapter)
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 300eafe4ed43..1a1409cb1652 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -165,7 +165,7 @@ static ssize_t pp_mmio_off_show(struct device *device,
165{ 165{
166 struct cxl_afu *afu = to_afu_chardev_m(device); 166 struct cxl_afu *afu = to_afu_chardev_m(device);
167 167
168 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_offset); 168 return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset);
169} 169}
170 170
171static ssize_t pp_mmio_len_show(struct device *device, 171static ssize_t pp_mmio_len_show(struct device *device,
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index e8a8eed6f006..baa408748300 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -248,7 +248,7 @@ int cxl_pci_vphb_add(struct cxl_afu *afu)
248 248
249 /* Setup the PHB using arch provided callback */ 249 /* Setup the PHB using arch provided callback */
250 phb->ops = &cxl_pcie_pci_ops; 250 phb->ops = &cxl_pcie_pci_ops;
251 phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset; 251 phb->cfg_addr = afu->native->afu_desc_mmio + afu->crs_offset;
252 phb->cfg_data = (void *)(u64)afu->crs_len; 252 phb->cfg_data = (void *)(u64)afu->crs_len;
253 phb->private_data = afu; 253 phb->private_data = afu;
254 phb->controller_ops = cxl_pci_controller_ops; 254 phb->controller_ops = cxl_pci_controller_ops;
@@ -278,7 +278,7 @@ void cxl_pci_vphb_reconfigure(struct cxl_afu *afu)
278 * and remapped. We need to reflect this in the PHB's view of 278 * and remapped. We need to reflect this in the PHB's view of
279 * the world. 279 * the world.
280 */ 280 */
281 afu->phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset; 281 afu->phb->cfg_addr = afu->native->afu_desc_mmio + afu->crs_offset;
282} 282}
283 283
284void cxl_pci_vphb_remove(struct cxl_afu *afu) 284void cxl_pci_vphb_remove(struct cxl_afu *afu)