diff options
author | Andre Detsch <adetsch@br.ibm.com> | 2008-02-19 08:06:15 -0500 |
---|---|---|
committer | Jeremy Kerr <jk@ozlabs.org> | 2008-02-19 22:57:36 -0500 |
commit | 61b36fc1f7d511132b1dd1422c29c7a8f26d77db (patch) | |
tree | c681c9de46a88a5c99af21c881d1997cac2fee09 /arch | |
parent | 4ef110141b3e0758fe30d686417b5686b87eb25b (diff) |
[POWERPC] cell: fix spurious false return from spu_trap_data_{map,seg}
At present, the __spufs_trap_data_map and __spu_trap_data_seq functions
exit if spu->flags has the SPU_CONTEXT_SWITCH_ACTIVE set. This was
resulting in suprious returns from these functions, as they may be
legitimately called when we have this bit set.
We only use it in these two sanity checks, so this change removes the
flag completely. This fixes hangs in the page-fault path of SPE apps.
Signed-off-by: Andre Detsch <adetsch@br.ibm.com>
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 12 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/switch.c | 6 |
2 files changed, 3 insertions, 15 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index e45cfa84911f..87eb07f94c5f 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -160,13 +160,6 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) | |||
160 | 160 | ||
161 | pr_debug("%s\n", __FUNCTION__); | 161 | pr_debug("%s\n", __FUNCTION__); |
162 | 162 | ||
163 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { | ||
164 | /* SLBs are pre-loaded for context switch, so | ||
165 | * we should never get here! | ||
166 | */ | ||
167 | printk("%s: invalid access during switch!\n", __func__); | ||
168 | return 1; | ||
169 | } | ||
170 | slb.esid = (ea & ESID_MASK) | SLB_ESID_V; | 163 | slb.esid = (ea & ESID_MASK) | SLB_ESID_V; |
171 | 164 | ||
172 | switch(REGION_ID(ea)) { | 165 | switch(REGION_ID(ea)) { |
@@ -226,11 +219,6 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) | |||
226 | return 0; | 219 | return 0; |
227 | } | 220 | } |
228 | 221 | ||
229 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { | ||
230 | printk("%s: invalid access during switch!\n", __func__); | ||
231 | return 1; | ||
232 | } | ||
233 | |||
234 | spu->class_0_pending = 0; | 222 | spu->class_0_pending = 0; |
235 | spu->dar = ea; | 223 | spu->dar = ea; |
236 | spu->dsisr = dsisr; | 224 | spu->dsisr = dsisr; |
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c index 6063c88c26d2..6f5886c7b1f9 100644 --- a/arch/powerpc/platforms/cell/spufs/switch.c +++ b/arch/powerpc/platforms/cell/spufs/switch.c | |||
@@ -720,8 +720,9 @@ static inline void set_switch_active(struct spu_state *csa, struct spu *spu) | |||
720 | * Restore, Step 23. | 720 | * Restore, Step 23. |
721 | * Change the software context switch pending flag | 721 | * Change the software context switch pending flag |
722 | * to context switch active. | 722 | * to context switch active. |
723 | * | ||
724 | * This implementation does not uses a switch active flag. | ||
723 | */ | 725 | */ |
724 | set_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags); | ||
725 | clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); | 726 | clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); |
726 | mb(); | 727 | mb(); |
727 | } | 728 | } |
@@ -1739,9 +1740,8 @@ static inline void reset_switch_active(struct spu_state *csa, struct spu *spu) | |||
1739 | { | 1740 | { |
1740 | /* Restore, Step 74: | 1741 | /* Restore, Step 74: |
1741 | * Reset the "context switch active" flag. | 1742 | * Reset the "context switch active" flag. |
1743 | * Not performed by this implementation. | ||
1742 | */ | 1744 | */ |
1743 | clear_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags); | ||
1744 | mb(); | ||
1745 | } | 1745 | } |
1746 | 1746 | ||
1747 | static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu) | 1747 | static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu) |