aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJeremy Kerr <jk@ozlabs.org>2007-12-04 21:49:31 -0500
committerArnd Bergmann <arnd@arndb.de>2007-12-18 19:00:05 -0500
commit684bd614015188561197342fd336292e9e2ce196 (patch)
tree89307cd386307b6bdfa9c65165a8d0fc95eb77d5 /arch
parentf6eb7d7ffef3e2fa40b0161c30486cb87203758d (diff)
[POWERPC] cell: handle SPE kernel mappings that cross segment boundaries
Currently, we have a possibilty that the SLBs setup during context switch don't cover the entirety of the necessary lscsa and code regions, if these regions cross a segment boundary. This change checks the start and end of each region, and inserts a SLB entry for each, if unique. We also remove the assumption that the spu_save_code and spu_restore_code reside in the same segment, by using the specific code array for save and restore. Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c50
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c11
2 files changed, 50 insertions, 11 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 95001cdfaa26..ee37e0e39b6b 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -275,19 +275,55 @@ static void __spu_kernel_slb(void *addr, struct spu_slb *slb)
275} 275}
276 276
277/** 277/**
278 * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
279 * address @new_addr is present.
280 */
281static inline int __slb_present(struct spu_slb *slbs, int nr_slbs,
282 void *new_addr)
283{
284 unsigned long ea = (unsigned long)new_addr;
285 int i;
286
287 for (i = 0; i < nr_slbs; i++)
288 if (!((slbs[i].esid ^ ea) & ESID_MASK))
289 return 1;
290
291 return 0;
292}
293
294/**
278 * Setup the SPU kernel SLBs, in preparation for a context save/restore. We 295 * Setup the SPU kernel SLBs, in preparation for a context save/restore. We
279 * need to map both the context save area, and the save/restore code. 296 * need to map both the context save area, and the save/restore code.
297 *
298 * Because the lscsa and code may cross segment boundaires, we check to see
299 * if mappings are required for the start and end of each range. We currently
300 * assume that the mappings are smaller that one segment - if not, something
301 * is seriously wrong.
280 */ 302 */
281void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, void *code) 303void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
304 void *code, int code_size)
282{ 305{
283 struct spu_slb code_slb, lscsa_slb; 306 struct spu_slb slbs[4];
307 int i, nr_slbs = 0;
308 /* start and end addresses of both mappings */
309 void *addrs[] = {
310 lscsa, (void *)lscsa + sizeof(*lscsa) - 1,
311 code, code + code_size - 1
312 };
313
314 /* check the set of addresses, and create a new entry in the slbs array
315 * if there isn't already a SLB for that address */
316 for (i = 0; i < ARRAY_SIZE(addrs); i++) {
317 if (__slb_present(slbs, nr_slbs, addrs[i]))
318 continue;
284 319
285 __spu_kernel_slb(lscsa, &lscsa_slb); 320 __spu_kernel_slb(addrs[i], &slbs[nr_slbs]);
286 __spu_kernel_slb(code, &code_slb); 321 nr_slbs++;
322 }
287 323
288 spu_load_slb(spu, 0, &lscsa_slb); 324 /* Add the set of SLBs */
289 if (lscsa_slb.esid != code_slb.esid) 325 for (i = 0; i < nr_slbs; i++)
290 spu_load_slb(spu, 1, &code_slb); 326 spu_load_slb(spu, i, &slbs[i]);
291} 327}
292EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs); 328EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
293 329
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 96f55148a408..8cbc6574820f 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -691,7 +691,8 @@ static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
691 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE); 691 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
692} 692}
693 693
694static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu) 694static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu,
695 unsigned int *code, int code_size)
695{ 696{
696 /* Save, Step 47: 697 /* Save, Step 47:
697 * Restore, Step 30. 698 * Restore, Step 30.
@@ -708,7 +709,7 @@ static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu)
708 * translation is desired by OS environment). 709 * translation is desired by OS environment).
709 */ 710 */
710 spu_invalidate_slbs(spu); 711 spu_invalidate_slbs(spu);
711 spu_setup_kernel_slbs(spu, csa->lscsa, &spu_save_code); 712 spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size);
712} 713}
713 714
714static inline void set_switch_active(struct spu_state *csa, struct spu *spu) 715static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
@@ -1835,7 +1836,8 @@ static void save_lscsa(struct spu_state *prev, struct spu *spu)
1835 */ 1836 */
1836 1837
1837 resume_mfc_queue(prev, spu); /* Step 46. */ 1838 resume_mfc_queue(prev, spu); /* Step 46. */
1838 setup_mfc_slbs(prev, spu); /* Step 47. */ 1839 /* Step 47. */
1840 setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code));
1839 set_switch_active(prev, spu); /* Step 48. */ 1841 set_switch_active(prev, spu); /* Step 48. */
1840 enable_interrupts(prev, spu); /* Step 49. */ 1842 enable_interrupts(prev, spu); /* Step 49. */
1841 save_ls_16kb(prev, spu); /* Step 50. */ 1843 save_ls_16kb(prev, spu); /* Step 50. */
@@ -1940,7 +1942,8 @@ static void restore_lscsa(struct spu_state *next, struct spu *spu)
1940 setup_spu_status_part1(next, spu); /* Step 27. */ 1942 setup_spu_status_part1(next, spu); /* Step 27. */
1941 setup_spu_status_part2(next, spu); /* Step 28. */ 1943 setup_spu_status_part2(next, spu); /* Step 28. */
1942 restore_mfc_rag(next, spu); /* Step 29. */ 1944 restore_mfc_rag(next, spu); /* Step 29. */
1943 setup_mfc_slbs(next, spu); /* Step 30. */ 1945 /* Step 30. */
1946 setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code));
1944 set_spu_npc(next, spu); /* Step 31. */ 1947 set_spu_npc(next, spu); /* Step 31. */
1945 set_signot1(next, spu); /* Step 32. */ 1948 set_signot1(next, spu); /* Step 32. */
1946 set_signot2(next, spu); /* Step 33. */ 1949 set_signot2(next, spu); /* Step 33. */