aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/cxl/native.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/cxl/native.c')
-rw-r--r--drivers/misc/cxl/native.c85
1 files changed, 44 insertions, 41 deletions
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 7d526294112d..c0bca59a12aa 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -186,22 +186,22 @@ static int spa_max_procs(int spa_size)
186int cxl_alloc_spa(struct cxl_afu *afu) 186int cxl_alloc_spa(struct cxl_afu *afu)
187{ 187{
188 /* Work out how many pages to allocate */ 188 /* Work out how many pages to allocate */
189 afu->spa_order = 0; 189 afu->native->spa_order = 0;
190 do { 190 do {
191 afu->spa_order++; 191 afu->native->spa_order++;
192 afu->spa_size = (1 << afu->spa_order) * PAGE_SIZE; 192 afu->native->spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
193 afu->spa_max_procs = spa_max_procs(afu->spa_size); 193 afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
194 } while (afu->spa_max_procs < afu->num_procs); 194 } while (afu->native->spa_max_procs < afu->num_procs);
195 195
196 WARN_ON(afu->spa_size > 0x100000); /* Max size supported by the hardware */ 196 WARN_ON(afu->native->spa_size > 0x100000); /* Max size supported by the hardware */
197 197
198 if (!(afu->spa = (struct cxl_process_element *) 198 if (!(afu->native->spa = (struct cxl_process_element *)
199 __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->spa_order))) { 199 __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
200 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n"); 200 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
201 return -ENOMEM; 201 return -ENOMEM;
202 } 202 }
203 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n", 203 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
204 1<<afu->spa_order, afu->spa_max_procs, afu->num_procs); 204 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs);
205 205
206 return 0; 206 return 0;
207} 207}
@@ -210,13 +210,15 @@ static void attach_spa(struct cxl_afu *afu)
210{ 210{
211 u64 spap; 211 u64 spap;
212 212
213 afu->sw_command_status = (__be64 *)((char *)afu->spa + 213 afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa +
214 ((afu->spa_max_procs + 3) * 128)); 214 ((afu->native->spa_max_procs + 3) * 128));
215 215
216 spap = virt_to_phys(afu->spa) & CXL_PSL_SPAP_Addr; 216 spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr;
217 spap |= ((afu->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size; 217 spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
218 spap |= CXL_PSL_SPAP_V; 218 spap |= CXL_PSL_SPAP_V;
219 pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap); 219 pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
220 afu->native->spa, afu->native->spa_max_procs,
221 afu->native->sw_command_status, spap);
220 cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap); 222 cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
221} 223}
222 224
@@ -227,9 +229,10 @@ static inline void detach_spa(struct cxl_afu *afu)
227 229
228void cxl_release_spa(struct cxl_afu *afu) 230void cxl_release_spa(struct cxl_afu *afu)
229{ 231{
230 if (afu->spa) { 232 if (afu->native->spa) {
231 free_pages((unsigned long) afu->spa, afu->spa_order); 233 free_pages((unsigned long) afu->native->spa,
232 afu->spa = NULL; 234 afu->native->spa_order);
235 afu->native->spa = NULL;
233 } 236 }
234} 237}
235 238
@@ -291,7 +294,7 @@ static void slb_invalid(struct cxl_context *ctx)
291 struct cxl *adapter = ctx->afu->adapter; 294 struct cxl *adapter = ctx->afu->adapter;
292 u64 slbia; 295 u64 slbia;
293 296
294 WARN_ON(!mutex_is_locked(&ctx->afu->spa_mutex)); 297 WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex));
295 298
296 cxl_p1_write(adapter, CXL_PSL_LBISEL, 299 cxl_p1_write(adapter, CXL_PSL_LBISEL,
297 ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) | 300 ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
@@ -321,7 +324,7 @@ static int do_process_element_cmd(struct cxl_context *ctx,
321 324
322 ctx->elem->software_state = cpu_to_be32(pe_state); 325 ctx->elem->software_state = cpu_to_be32(pe_state);
323 smp_wmb(); 326 smp_wmb();
324 *(ctx->afu->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe); 327 *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
325 smp_mb(); 328 smp_mb();
326 cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe); 329 cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
327 while (1) { 330 while (1) {
@@ -335,7 +338,7 @@ static int do_process_element_cmd(struct cxl_context *ctx,
335 rc = -EIO; 338 rc = -EIO;
336 goto out; 339 goto out;
337 } 340 }
338 state = be64_to_cpup(ctx->afu->sw_command_status); 341 state = be64_to_cpup(ctx->afu->native->sw_command_status);
339 if (state == ~0ULL) { 342 if (state == ~0ULL) {
340 pr_err("cxl: Error adding process element to AFU\n"); 343 pr_err("cxl: Error adding process element to AFU\n");
341 rc = -1; 344 rc = -1;
@@ -363,12 +366,12 @@ static int add_process_element(struct cxl_context *ctx)
363{ 366{
364 int rc = 0; 367 int rc = 0;
365 368
366 mutex_lock(&ctx->afu->spa_mutex); 369 mutex_lock(&ctx->afu->native->spa_mutex);
367 pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe); 370 pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
368 if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V))) 371 if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
369 ctx->pe_inserted = true; 372 ctx->pe_inserted = true;
370 pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe); 373 pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
371 mutex_unlock(&ctx->afu->spa_mutex); 374 mutex_unlock(&ctx->afu->native->spa_mutex);
372 return rc; 375 return rc;
373} 376}
374 377
@@ -380,7 +383,7 @@ static int terminate_process_element(struct cxl_context *ctx)
380 if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V))) 383 if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
381 return rc; 384 return rc;
382 385
383 mutex_lock(&ctx->afu->spa_mutex); 386 mutex_lock(&ctx->afu->native->spa_mutex);
384 pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe); 387 pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
385 /* We could be asked to terminate when the hw is down. That 388 /* We could be asked to terminate when the hw is down. That
386 * should always succeed: it's not running if the hw has gone 389 * should always succeed: it's not running if the hw has gone
@@ -391,7 +394,7 @@ static int terminate_process_element(struct cxl_context *ctx)
391 CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T); 394 CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
392 ctx->elem->software_state = 0; /* Remove Valid bit */ 395 ctx->elem->software_state = 0; /* Remove Valid bit */
393 pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe); 396 pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
394 mutex_unlock(&ctx->afu->spa_mutex); 397 mutex_unlock(&ctx->afu->native->spa_mutex);
395 return rc; 398 return rc;
396} 399}
397 400
@@ -399,7 +402,7 @@ static int remove_process_element(struct cxl_context *ctx)
399{ 402{
400 int rc = 0; 403 int rc = 0;
401 404
402 mutex_lock(&ctx->afu->spa_mutex); 405 mutex_lock(&ctx->afu->native->spa_mutex);
403 pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe); 406 pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
404 407
405 /* We could be asked to remove when the hw is down. Again, if 408 /* We could be asked to remove when the hw is down. Again, if
@@ -412,7 +415,7 @@ static int remove_process_element(struct cxl_context *ctx)
412 ctx->pe_inserted = false; 415 ctx->pe_inserted = false;
413 slb_invalid(ctx); 416 slb_invalid(ctx);
414 pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe); 417 pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
415 mutex_unlock(&ctx->afu->spa_mutex); 418 mutex_unlock(&ctx->afu->native->spa_mutex);
416 419
417 return rc; 420 return rc;
418} 421}
@@ -425,7 +428,7 @@ void cxl_assign_psn_space(struct cxl_context *ctx)
425 ctx->psn_size = ctx->afu->adapter->ps_size; 428 ctx->psn_size = ctx->afu->adapter->ps_size;
426 } else { 429 } else {
427 ctx->psn_phys = ctx->afu->psn_phys + 430 ctx->psn_phys = ctx->afu->psn_phys +
428 (ctx->afu->pp_offset + ctx->afu->pp_size * ctx->pe); 431 (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe);
429 ctx->psn_size = ctx->afu->pp_size; 432 ctx->psn_size = ctx->afu->pp_size;
430 } 433 }
431} 434}
@@ -437,7 +440,7 @@ static int activate_afu_directed(struct cxl_afu *afu)
437 dev_info(&afu->dev, "Activating AFU directed mode\n"); 440 dev_info(&afu->dev, "Activating AFU directed mode\n");
438 441
439 afu->num_procs = afu->max_procs_virtualised; 442 afu->num_procs = afu->max_procs_virtualised;
440 if (afu->spa == NULL) { 443 if (afu->native->spa == NULL) {
441 if (cxl_alloc_spa(afu)) 444 if (cxl_alloc_spa(afu))
442 return -ENOMEM; 445 return -ENOMEM;
443 } 446 }
@@ -846,27 +849,27 @@ int cxl_native_register_psl_err_irq(struct cxl *adapter)
846 return -ENOMEM; 849 return -ENOMEM;
847 850
848 if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter, 851 if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter,
849 &adapter->err_hwirq, 852 &adapter->native->err_hwirq,
850 &adapter->err_virq, 853 &adapter->native->err_virq,
851 adapter->irq_name))) { 854 adapter->irq_name))) {
852 kfree(adapter->irq_name); 855 kfree(adapter->irq_name);
853 adapter->irq_name = NULL; 856 adapter->irq_name = NULL;
854 return rc; 857 return rc;
855 } 858 }
856 859
857 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff); 860 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff);
858 861
859 return 0; 862 return 0;
860} 863}
861 864
862void cxl_native_release_psl_err_irq(struct cxl *adapter) 865void cxl_native_release_psl_err_irq(struct cxl *adapter)
863{ 866{
864 if (adapter->err_virq != irq_find_mapping(NULL, adapter->err_hwirq)) 867 if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
865 return; 868 return;
866 869
867 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); 870 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
868 cxl_unmap_irq(adapter->err_virq, adapter); 871 cxl_unmap_irq(adapter->native->err_virq, adapter);
869 cxl_ops->release_one_irq(adapter, adapter->err_hwirq); 872 cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
870 kfree(adapter->irq_name); 873 kfree(adapter->irq_name);
871} 874}
872 875
@@ -915,8 +918,8 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu)
915 if (!afu->psl_irq_name) 918 if (!afu->psl_irq_name)
916 return -ENOMEM; 919 return -ENOMEM;
917 920
918 if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed, afu, 921 if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed,
919 &afu->psl_hwirq, &afu->psl_virq, 922 afu, &afu->native->psl_hwirq, &afu->native->psl_virq,
920 afu->psl_irq_name))) { 923 afu->psl_irq_name))) {
921 kfree(afu->psl_irq_name); 924 kfree(afu->psl_irq_name);
922 afu->psl_irq_name = NULL; 925 afu->psl_irq_name = NULL;
@@ -926,11 +929,11 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu)
926 929
927void cxl_native_release_psl_irq(struct cxl_afu *afu) 930void cxl_native_release_psl_irq(struct cxl_afu *afu)
928{ 931{
929 if (afu->psl_virq != irq_find_mapping(NULL, afu->psl_hwirq)) 932 if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
930 return; 933 return;
931 934
932 cxl_unmap_irq(afu->psl_virq, afu); 935 cxl_unmap_irq(afu->native->psl_virq, afu);
933 cxl_ops->release_one_irq(afu->adapter, afu->psl_hwirq); 936 cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
934 kfree(afu->psl_irq_name); 937 kfree(afu->psl_irq_name);
935} 938}
936 939
@@ -970,7 +973,7 @@ static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
970 return -EIO; 973 return -EIO;
971 if (unlikely(off >= afu->crs_len)) 974 if (unlikely(off >= afu->crs_len))
972 return -ERANGE; 975 return -ERANGE;
973 *out = in_le64(afu->afu_desc_mmio + afu->crs_offset + 976 *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset +
974 (cr * afu->crs_len) + off); 977 (cr * afu->crs_len) + off);
975 return 0; 978 return 0;
976} 979}
@@ -981,7 +984,7 @@ static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
981 return -EIO; 984 return -EIO;
982 if (unlikely(off >= afu->crs_len)) 985 if (unlikely(off >= afu->crs_len))
983 return -ERANGE; 986 return -ERANGE;
984 *out = in_le32(afu->afu_desc_mmio + afu->crs_offset + 987 *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset +
985 (cr * afu->crs_len) + off); 988 (cr * afu->crs_len) + off);
986 return 0; 989 return 0;
987} 990}