aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/cxl/native.c
diff options
context:
space:
mode:
authorDaniel Axtens <dja@axtens.net>2015-08-14 03:41:19 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2015-08-14 07:32:04 -0400
commit05155772f642a5b0e421adf2017f993759bf7f95 (patch)
treed8acd2b562297cedb54001af3c40b22ee11b0547 /drivers/misc/cxl/native.c
parent0b3f9c757cabad4b8101c5fcddddd029ed5506a6 (diff)
cxl: Allocate and release the SPA with the AFU
Previously the SPA was allocated and freed upon entering and leaving AFU-directed mode. This causes some issues for error recovery - contexts hold a pointer inside the SPA, and they may persist after the AFU has been detached. We would ideally like to allocate the SPA when the AFU is allocated, and release it until the AFU is released. However, we don't know how big the SPA needs to be until we read the AFU descriptor. Therefore, restructure the code: - Allocate the SPA only once, on the first attach. - Release the SPA only when the entire AFU is being released (not detached). Guard the release with a NULL check, so we don't free if it was never allocated (e.g. dedicated mode) Acked-by: Cyril Bur <cyrilbur@gmail.com> Signed-off-by: Daniel Axtens <dja@axtens.net> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'drivers/misc/cxl/native.c')
-rw-r--r--drivers/misc/cxl/native.c33
1 files changed, 22 insertions, 11 deletions
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 44568dd68bb9..b37f2e8004f5 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -183,10 +183,8 @@ static int spa_max_procs(int spa_size)
183 return ((spa_size / 8) - 96) / 17; 183 return ((spa_size / 8) - 96) / 17;
184} 184}
185 185
186static int alloc_spa(struct cxl_afu *afu) 186int cxl_alloc_spa(struct cxl_afu *afu)
187{ 187{
188 u64 spap;
189
190 /* Work out how many pages to allocate */ 188 /* Work out how many pages to allocate */
191 afu->spa_order = 0; 189 afu->spa_order = 0;
192 do { 190 do {
@@ -205,6 +203,13 @@ static int alloc_spa(struct cxl_afu *afu)
205 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n", 203 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
206 1<<afu->spa_order, afu->spa_max_procs, afu->num_procs); 204 1<<afu->spa_order, afu->spa_max_procs, afu->num_procs);
207 205
206 return 0;
207}
208
209static void attach_spa(struct cxl_afu *afu)
210{
211 u64 spap;
212
208 afu->sw_command_status = (__be64 *)((char *)afu->spa + 213 afu->sw_command_status = (__be64 *)((char *)afu->spa +
209 ((afu->spa_max_procs + 3) * 128)); 214 ((afu->spa_max_procs + 3) * 128));
210 215
@@ -213,14 +218,19 @@ static int alloc_spa(struct cxl_afu *afu)
213 spap |= CXL_PSL_SPAP_V; 218 spap |= CXL_PSL_SPAP_V;
214 pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap); 219 pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap);
215 cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap); 220 cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
216
217 return 0;
218} 221}
219 222
220static void release_spa(struct cxl_afu *afu) 223static inline void detach_spa(struct cxl_afu *afu)
221{ 224{
222 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); 225 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);
223 free_pages((unsigned long) afu->spa, afu->spa_order); 226}
227
228void cxl_release_spa(struct cxl_afu *afu)
229{
230 if (afu->spa) {
231 free_pages((unsigned long) afu->spa, afu->spa_order);
232 afu->spa = NULL;
233 }
224} 234}
225 235
226int cxl_tlb_slb_invalidate(struct cxl *adapter) 236int cxl_tlb_slb_invalidate(struct cxl *adapter)
@@ -447,8 +457,11 @@ static int activate_afu_directed(struct cxl_afu *afu)
447 457
448 dev_info(&afu->dev, "Activating AFU directed mode\n"); 458 dev_info(&afu->dev, "Activating AFU directed mode\n");
449 459
450 if (alloc_spa(afu)) 460 if (afu->spa == NULL) {
451 return -ENOMEM; 461 if (cxl_alloc_spa(afu))
462 return -ENOMEM;
463 }
464 attach_spa(afu);
452 465
453 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU); 466 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
454 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL); 467 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
@@ -559,8 +572,6 @@ static int deactivate_afu_directed(struct cxl_afu *afu)
559 cxl_afu_disable(afu); 572 cxl_afu_disable(afu);
560 cxl_psl_purge(afu); 573 cxl_psl_purge(afu);
561 574
562 release_spa(afu);
563
564 return 0; 575 return 0;
565} 576}
566 577