diff options
author | Jeremy Kerr <jk@ozlabs.org> | 2007-12-04 21:49:31 -0500 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2007-12-18 19:00:04 -0500 |
commit | 58bd403c3c79dd41acf5af2d170bd4e0872bb326 (patch) | |
tree | 64e30777f28b9378d690e224aefbaac27be79d7b /arch | |
parent | a0a7ae8939e3fdecf5478ddba54562e23de7ca1d (diff) |
[POWERPC] cell: handle kernel SLB setup in spu_base.c
Currently, the SPU context switch code (spufs/switch.c) sets up the
SPU's SLBs directly, which requires some low-level mm stuff.
This change moves the kernel SLB setup to spu_base.c, by exposing
a function spu_setup_kernel_slbs() to do this setup. This allows us
to remove the low-level mm code from switch.c, making it possible
to later move switch.c to the spufs module.
Also, add a struct spu_slb for the cases where we need to deal with
SLB entries.
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 49 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/switch.c | 33 |
2 files changed, 50 insertions, 32 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 525712055bb2..2ec1d38829dd 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/linux_logo.h> | 34 | #include <linux/linux_logo.h> |
35 | #include <asm/spu.h> | 35 | #include <asm/spu.h> |
36 | #include <asm/spu_priv1.h> | 36 | #include <asm/spu_priv1.h> |
37 | #include <asm/spu_csa.h> | ||
37 | #include <asm/xmon.h> | 38 | #include <asm/xmon.h> |
38 | #include <asm/prom.h> | 39 | #include <asm/prom.h> |
39 | 40 | ||
@@ -73,6 +74,10 @@ static LIST_HEAD(spu_full_list); | |||
73 | static DEFINE_SPINLOCK(spu_full_list_lock); | 74 | static DEFINE_SPINLOCK(spu_full_list_lock); |
74 | static DEFINE_MUTEX(spu_full_list_mutex); | 75 | static DEFINE_MUTEX(spu_full_list_mutex); |
75 | 76 | ||
77 | struct spu_slb { | ||
78 | u64 esid, vsid; | ||
79 | }; | ||
80 | |||
76 | void spu_invalidate_slbs(struct spu *spu) | 81 | void spu_invalidate_slbs(struct spu *spu) |
77 | { | 82 | { |
78 | struct spu_priv2 __iomem *priv2 = spu->priv2; | 83 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
@@ -150,6 +155,18 @@ static void spu_restart_dma(struct spu *spu) | |||
150 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); | 155 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); |
151 | } | 156 | } |
152 | 157 | ||
158 | static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) | ||
159 | { | ||
160 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
161 | |||
162 | pr_debug("%s: adding SLB[%d] 0x%016lx 0x%016lx\n", | ||
163 | __func__, slbe, slb->vsid, slb->esid); | ||
164 | |||
165 | out_be64(&priv2->slb_index_W, slbe); | ||
166 | out_be64(&priv2->slb_vsid_RW, slb->vsid); | ||
167 | out_be64(&priv2->slb_esid_RW, slb->esid); | ||
168 | } | ||
169 | |||
153 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) | 170 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) |
154 | { | 171 | { |
155 | struct spu_priv2 __iomem *priv2 = spu->priv2; | 172 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
@@ -239,6 +256,38 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) | |||
239 | return 0; | 256 | return 0; |
240 | } | 257 | } |
241 | 258 | ||
259 | static void __spu_kernel_slb(void *addr, struct spu_slb *slb) | ||
260 | { | ||
261 | unsigned long ea = (unsigned long)addr; | ||
262 | u64 llp; | ||
263 | |||
264 | if (REGION_ID(ea) == KERNEL_REGION_ID) | ||
265 | llp = mmu_psize_defs[mmu_linear_psize].sllp; | ||
266 | else | ||
267 | llp = mmu_psize_defs[mmu_virtual_psize].sllp; | ||
268 | |||
269 | slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | | ||
270 | SLB_VSID_KERNEL | llp; | ||
271 | slb->esid = (ea & ESID_MASK) | SLB_ESID_V; | ||
272 | } | ||
273 | |||
274 | /** | ||
275 | * Setup the SPU kernel SLBs, in preparation for a context save/restore. We | ||
276 | * need to map both the context save area, and the save/restore code. | ||
277 | */ | ||
278 | void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, void *code) | ||
279 | { | ||
280 | struct spu_slb code_slb, lscsa_slb; | ||
281 | |||
282 | __spu_kernel_slb(lscsa, &lscsa_slb); | ||
283 | __spu_kernel_slb(code, &code_slb); | ||
284 | |||
285 | spu_load_slb(spu, 0, &lscsa_slb); | ||
286 | if (lscsa_slb.esid != code_slb.esid) | ||
287 | spu_load_slb(spu, 1, &code_slb); | ||
288 | } | ||
289 | EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs); | ||
290 | |||
242 | static irqreturn_t | 291 | static irqreturn_t |
243 | spu_irq_class_0(int irq, void *data) | 292 | spu_irq_class_0(int irq, void *data) |
244 | { | 293 | { |
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c index 3d64c81cc6e2..96f55148a408 100644 --- a/arch/powerpc/platforms/cell/spufs/switch.c +++ b/arch/powerpc/platforms/cell/spufs/switch.c | |||
@@ -691,35 +691,8 @@ static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu) | |||
691 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE); | 691 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE); |
692 | } | 692 | } |
693 | 693 | ||
694 | static inline void get_kernel_slb(u64 ea, u64 slb[2]) | ||
695 | { | ||
696 | u64 llp; | ||
697 | |||
698 | if (REGION_ID(ea) == KERNEL_REGION_ID) | ||
699 | llp = mmu_psize_defs[mmu_linear_psize].sllp; | ||
700 | else | ||
701 | llp = mmu_psize_defs[mmu_virtual_psize].sllp; | ||
702 | slb[0] = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | | ||
703 | SLB_VSID_KERNEL | llp; | ||
704 | slb[1] = (ea & ESID_MASK) | SLB_ESID_V; | ||
705 | } | ||
706 | |||
707 | static inline void load_mfc_slb(struct spu *spu, u64 slb[2], int slbe) | ||
708 | { | ||
709 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
710 | |||
711 | out_be64(&priv2->slb_index_W, slbe); | ||
712 | eieio(); | ||
713 | out_be64(&priv2->slb_vsid_RW, slb[0]); | ||
714 | out_be64(&priv2->slb_esid_RW, slb[1]); | ||
715 | eieio(); | ||
716 | } | ||
717 | |||
718 | static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu) | 694 | static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu) |
719 | { | 695 | { |
720 | u64 code_slb[2]; | ||
721 | u64 lscsa_slb[2]; | ||
722 | |||
723 | /* Save, Step 47: | 696 | /* Save, Step 47: |
724 | * Restore, Step 30. | 697 | * Restore, Step 30. |
725 | * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All | 698 | * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All |
@@ -735,11 +708,7 @@ static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu) | |||
735 | * translation is desired by OS environment). | 708 | * translation is desired by OS environment). |
736 | */ | 709 | */ |
737 | spu_invalidate_slbs(spu); | 710 | spu_invalidate_slbs(spu); |
738 | get_kernel_slb((unsigned long)&spu_save_code[0], code_slb); | 711 | spu_setup_kernel_slbs(spu, csa->lscsa, &spu_save_code); |
739 | get_kernel_slb((unsigned long)csa->lscsa, lscsa_slb); | ||
740 | load_mfc_slb(spu, code_slb, 0); | ||
741 | if ((lscsa_slb[0] != code_slb[0]) || (lscsa_slb[1] != code_slb[1])) | ||
742 | load_mfc_slb(spu, lscsa_slb, 1); | ||
743 | } | 712 | } |
744 | 713 | ||
745 | static inline void set_switch_active(struct spu_state *csa, struct spu *spu) | 714 | static inline void set_switch_active(struct spu_state *csa, struct spu *spu) |