aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell/spufs/switch.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/switch.c')
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c129
1 files changed, 47 insertions, 82 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index de1ad146fc6..1061c12b2ed 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -108,8 +108,6 @@ static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
108 108
109static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) 109static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
110{ 110{
111 struct spu_priv1 __iomem *priv1 = spu->priv1;
112
113 /* Save, Step 3: 111 /* Save, Step 3:
114 * Restore, Step 2: 112 * Restore, Step 2:
115 * Save INT_Mask_class0 in CSA. 113 * Save INT_Mask_class0 in CSA.
@@ -121,16 +119,13 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
121 */ 119 */
122 spin_lock_irq(&spu->register_lock); 120 spin_lock_irq(&spu->register_lock);
123 if (csa) { 121 if (csa) {
124 csa->priv1.int_mask_class0_RW = 122 csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
125 in_be64(&priv1->int_mask_class0_RW); 123 csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
126 csa->priv1.int_mask_class1_RW = 124 csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
127 in_be64(&priv1->int_mask_class1_RW);
128 csa->priv1.int_mask_class2_RW =
129 in_be64(&priv1->int_mask_class2_RW);
130 } 125 }
131 out_be64(&priv1->int_mask_class0_RW, 0UL); 126 spu_int_mask_set(spu, 0, 0ul);
132 out_be64(&priv1->int_mask_class1_RW, 0UL); 127 spu_int_mask_set(spu, 1, 0ul);
133 out_be64(&priv1->int_mask_class2_RW, 0UL); 128 spu_int_mask_set(spu, 2, 0ul);
134 eieio(); 129 eieio();
135 spin_unlock_irq(&spu->register_lock); 130 spin_unlock_irq(&spu->register_lock);
136} 131}
@@ -195,12 +190,10 @@ static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)
195 190
196static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu) 191static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
197{ 192{
198 struct spu_priv1 __iomem *priv1 = spu->priv1;
199
200 /* Save, Step 10: 193 /* Save, Step 10:
201 * Save MFC_SR1 in the CSA. 194 * Save MFC_SR1 in the CSA.
202 */ 195 */
203 csa->priv1.mfc_sr1_RW = in_be64(&priv1->mfc_sr1_RW); 196 csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
204} 197}
205 198
206static inline void save_spu_status(struct spu_state *csa, struct spu *spu) 199static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
@@ -292,15 +285,13 @@ static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)
292 285
293static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu) 286static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
294{ 287{
295 struct spu_priv1 __iomem *priv1 = spu->priv1;
296
297 /* Save, Step 17: 288 /* Save, Step 17:
298 * Restore, Step 12. 289 * Restore, Step 12.
299 * Restore, Step 48. 290 * Restore, Step 48.
300 * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register. 291 * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
301 * Then issue a PPE sync instruction. 292 * Then issue a PPE sync instruction.
302 */ 293 */
303 out_be64(&priv1->tlb_invalidate_entry_W, 0UL); 294 spu_tlb_invalidate(spu);
304 mb(); 295 mb();
305} 296}
306 297
@@ -410,25 +401,21 @@ static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
410 401
411static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu) 402static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
412{ 403{
413 struct spu_priv1 __iomem *priv1 = spu->priv1;
414
415 /* Save, Step 25: 404 /* Save, Step 25:
416 * Save the MFC_TCLASS_ID register in 405 * Save the MFC_TCLASS_ID register in
417 * the CSA. 406 * the CSA.
418 */ 407 */
419 csa->priv1.mfc_tclass_id_RW = in_be64(&priv1->mfc_tclass_id_RW); 408 csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
420} 409}
421 410
422static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu) 411static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
423{ 412{
424 struct spu_priv1 __iomem *priv1 = spu->priv1;
425
426 /* Save, Step 26: 413 /* Save, Step 26:
427 * Restore, Step 23. 414 * Restore, Step 23.
428 * Write the MFC_TCLASS_ID register with 415 * Write the MFC_TCLASS_ID register with
429 * the value 0x10000000. 416 * the value 0x10000000.
430 */ 417 */
431 out_be64(&priv1->mfc_tclass_id_RW, 0x10000000); 418 spu_mfc_tclass_id_set(spu, 0x10000000);
432 eieio(); 419 eieio();
433} 420}
434 421
@@ -458,14 +445,13 @@ static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
458 445
459static inline void save_mfc_slbs(struct spu_state *csa, struct spu *spu) 446static inline void save_mfc_slbs(struct spu_state *csa, struct spu *spu)
460{ 447{
461 struct spu_priv1 __iomem *priv1 = spu->priv1;
462 struct spu_priv2 __iomem *priv2 = spu->priv2; 448 struct spu_priv2 __iomem *priv2 = spu->priv2;
463 int i; 449 int i;
464 450
465 /* Save, Step 29: 451 /* Save, Step 29:
466 * If MFC_SR1[R]='1', save SLBs in CSA. 452 * If MFC_SR1[R]='1', save SLBs in CSA.
467 */ 453 */
468 if (in_be64(&priv1->mfc_sr1_RW) & MFC_STATE1_RELOCATE_MASK) { 454 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) {
469 csa->priv2.slb_index_W = in_be64(&priv2->slb_index_W); 455 csa->priv2.slb_index_W = in_be64(&priv2->slb_index_W);
470 for (i = 0; i < 8; i++) { 456 for (i = 0; i < 8; i++) {
471 out_be64(&priv2->slb_index_W, i); 457 out_be64(&priv2->slb_index_W, i);
@@ -479,8 +465,6 @@ static inline void save_mfc_slbs(struct spu_state *csa, struct spu *spu)
479 465
480static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu) 466static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
481{ 467{
482 struct spu_priv1 __iomem *priv1 = spu->priv1;
483
484 /* Save, Step 30: 468 /* Save, Step 30:
485 * Restore, Step 18: 469 * Restore, Step 18:
486 * Write MFC_SR1 with MFC_SR1[D=0,S=1] and 470 * Write MFC_SR1 with MFC_SR1[D=0,S=1] and
@@ -492,9 +476,9 @@ static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
492 * MFC_SR1[Pr] bit is not set. 476 * MFC_SR1[Pr] bit is not set.
493 * 477 *
494 */ 478 */
495 out_be64(&priv1->mfc_sr1_RW, (MFC_STATE1_MASTER_RUN_CONTROL_MASK | 479 spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
496 MFC_STATE1_RELOCATE_MASK | 480 MFC_STATE1_RELOCATE_MASK |
497 MFC_STATE1_BUS_TLBIE_MASK)); 481 MFC_STATE1_BUS_TLBIE_MASK));
498} 482}
499 483
500static inline void save_spu_npc(struct spu_state *csa, struct spu *spu) 484static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
@@ -571,16 +555,14 @@ static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)
571 555
572static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu) 556static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
573{ 557{
574 struct spu_priv1 __iomem *priv1 = spu->priv1;
575
576 /* Save, Step 38: 558 /* Save, Step 38:
577 * Save RA_GROUP_ID register and the 559 * Save RA_GROUP_ID register and the
578 * RA_ENABLE reigster in the CSA. 560 * RA_ENABLE reigster in the CSA.
579 */ 561 */
580 csa->priv1.resource_allocation_groupID_RW = 562 csa->priv1.resource_allocation_groupID_RW =
581 in_be64(&priv1->resource_allocation_groupID_RW); 563 spu_resource_allocation_groupID_get(spu);
582 csa->priv1.resource_allocation_enable_RW = 564 csa->priv1.resource_allocation_enable_RW =
583 in_be64(&priv1->resource_allocation_enable_RW); 565 spu_resource_allocation_enable_get(spu);
584} 566}
585 567
586static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu) 568static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
@@ -698,14 +680,13 @@ static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
698 680
699static inline void invalidate_slbs(struct spu_state *csa, struct spu *spu) 681static inline void invalidate_slbs(struct spu_state *csa, struct spu *spu)
700{ 682{
701 struct spu_priv1 __iomem *priv1 = spu->priv1;
702 struct spu_priv2 __iomem *priv2 = spu->priv2; 683 struct spu_priv2 __iomem *priv2 = spu->priv2;
703 684
704 /* Save, Step 45: 685 /* Save, Step 45:
705 * Restore, Step 19: 686 * Restore, Step 19:
706 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All. 687 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All.
707 */ 688 */
708 if (in_be64(&priv1->mfc_sr1_RW) & MFC_STATE1_RELOCATE_MASK) { 689 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) {
709 out_be64(&priv2->slb_invalidate_all_W, 0UL); 690 out_be64(&priv2->slb_invalidate_all_W, 0UL);
710 eieio(); 691 eieio();
711 } 692 }
@@ -774,7 +755,6 @@ static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
774 755
775static inline void enable_interrupts(struct spu_state *csa, struct spu *spu) 756static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
776{ 757{
777 struct spu_priv1 __iomem *priv1 = spu->priv1;
778 unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR | 758 unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
779 CLASS1_ENABLE_STORAGE_FAULT_INTR; 759 CLASS1_ENABLE_STORAGE_FAULT_INTR;
780 760
@@ -787,12 +767,12 @@ static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
787 * (translation) interrupts. 767 * (translation) interrupts.
788 */ 768 */
789 spin_lock_irq(&spu->register_lock); 769 spin_lock_irq(&spu->register_lock);
790 out_be64(&priv1->int_stat_class0_RW, ~(0UL)); 770 spu_int_stat_clear(spu, 0, ~0ul);
791 out_be64(&priv1->int_stat_class1_RW, ~(0UL)); 771 spu_int_stat_clear(spu, 1, ~0ul);
792 out_be64(&priv1->int_stat_class2_RW, ~(0UL)); 772 spu_int_stat_clear(spu, 2, ~0ul);
793 out_be64(&priv1->int_mask_class0_RW, 0UL); 773 spu_int_mask_set(spu, 0, 0ul);
794 out_be64(&priv1->int_mask_class1_RW, class1_mask); 774 spu_int_mask_set(spu, 1, class1_mask);
795 out_be64(&priv1->int_mask_class2_RW, 0UL); 775 spu_int_mask_set(spu, 2, 0ul);
796 spin_unlock_irq(&spu->register_lock); 776 spin_unlock_irq(&spu->register_lock);
797} 777}
798 778
@@ -930,7 +910,6 @@ static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)
930 910
931static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu) 911static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
932{ 912{
933 struct spu_priv1 __iomem *priv1 = spu->priv1;
934 struct spu_problem __iomem *prob = spu->problem; 913 struct spu_problem __iomem *prob = spu->problem;
935 u32 mask = MFC_TAGID_TO_TAGMASK(0); 914 u32 mask = MFC_TAGID_TO_TAGMASK(0);
936 unsigned long flags; 915 unsigned long flags;
@@ -947,14 +926,13 @@ static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
947 POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask); 926 POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
948 927
949 local_irq_save(flags); 928 local_irq_save(flags);
950 out_be64(&priv1->int_stat_class0_RW, ~(0UL)); 929 spu_int_stat_clear(spu, 0, ~(0ul));
951 out_be64(&priv1->int_stat_class2_RW, ~(0UL)); 930 spu_int_stat_clear(spu, 2, ~(0ul));
952 local_irq_restore(flags); 931 local_irq_restore(flags);
953} 932}
954 933
955static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu) 934static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
956{ 935{
957 struct spu_priv1 __iomem *priv1 = spu->priv1;
958 struct spu_problem __iomem *prob = spu->problem; 936 struct spu_problem __iomem *prob = spu->problem;
959 unsigned long flags; 937 unsigned long flags;
960 938
@@ -967,8 +945,8 @@ static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
967 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); 945 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
968 946
969 local_irq_save(flags); 947 local_irq_save(flags);
970 out_be64(&priv1->int_stat_class0_RW, ~(0UL)); 948 spu_int_stat_clear(spu, 0, ~(0ul));
971 out_be64(&priv1->int_stat_class2_RW, ~(0UL)); 949 spu_int_stat_clear(spu, 2, ~(0ul));
972 local_irq_restore(flags); 950 local_irq_restore(flags);
973} 951}
974 952
@@ -1067,7 +1045,6 @@ static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
1067static inline void clear_spu_status(struct spu_state *csa, struct spu *spu) 1045static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1068{ 1046{
1069 struct spu_problem __iomem *prob = spu->problem; 1047 struct spu_problem __iomem *prob = spu->problem;
1070 struct spu_priv1 __iomem *priv1 = spu->priv1;
1071 1048
1072 /* Restore, Step 10: 1049 /* Restore, Step 10:
1073 * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1, 1050 * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
@@ -1076,8 +1053,8 @@ static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1076 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) { 1053 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
1077 if (in_be32(&prob->spu_status_R) & 1054 if (in_be32(&prob->spu_status_R) &
1078 SPU_STATUS_ISOLATED_EXIT_STAUTUS) { 1055 SPU_STATUS_ISOLATED_EXIT_STAUTUS) {
1079 out_be64(&priv1->mfc_sr1_RW, 1056 spu_mfc_sr1_set(spu,
1080 MFC_STATE1_MASTER_RUN_CONTROL_MASK); 1057 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1081 eieio(); 1058 eieio();
1082 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); 1059 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1083 eieio(); 1060 eieio();
@@ -1088,8 +1065,8 @@ static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1088 SPU_STATUS_ISOLATED_LOAD_STAUTUS) 1065 SPU_STATUS_ISOLATED_LOAD_STAUTUS)
1089 || (in_be32(&prob->spu_status_R) & 1066 || (in_be32(&prob->spu_status_R) &
1090 SPU_STATUS_ISOLATED_STATE)) { 1067 SPU_STATUS_ISOLATED_STATE)) {
1091 out_be64(&priv1->mfc_sr1_RW, 1068 spu_mfc_sr1_set(spu,
1092 MFC_STATE1_MASTER_RUN_CONTROL_MASK); 1069 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1093 eieio(); 1070 eieio();
1094 out_be32(&prob->spu_runcntl_RW, 0x2); 1071 out_be32(&prob->spu_runcntl_RW, 0x2);
1095 eieio(); 1072 eieio();
@@ -1257,16 +1234,14 @@ static inline void setup_spu_status_part2(struct spu_state *csa,
1257 1234
1258static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu) 1235static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
1259{ 1236{
1260 struct spu_priv1 __iomem *priv1 = spu->priv1;
1261
1262 /* Restore, Step 29: 1237 /* Restore, Step 29:
1263 * Restore RA_GROUP_ID register and the 1238 * Restore RA_GROUP_ID register and the
1264 * RA_ENABLE reigster from the CSA. 1239 * RA_ENABLE reigster from the CSA.
1265 */ 1240 */
1266 out_be64(&priv1->resource_allocation_groupID_RW, 1241 spu_resource_allocation_groupID_set(spu,
1267 csa->priv1.resource_allocation_groupID_RW); 1242 csa->priv1.resource_allocation_groupID_RW);
1268 out_be64(&priv1->resource_allocation_enable_RW, 1243 spu_resource_allocation_enable_set(spu,
1269 csa->priv1.resource_allocation_enable_RW); 1244 csa->priv1.resource_allocation_enable_RW);
1270} 1245}
1271 1246
1272static inline void send_restore_code(struct spu_state *csa, struct spu *spu) 1247static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
@@ -1409,8 +1384,6 @@ static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
1409 1384
1410static inline void clear_interrupts(struct spu_state *csa, struct spu *spu) 1385static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
1411{ 1386{
1412 struct spu_priv1 __iomem *priv1 = spu->priv1;
1413
1414 /* Restore, Step 49: 1387 /* Restore, Step 49:
1415 * Write INT_MASK_class0 with value of 0. 1388 * Write INT_MASK_class0 with value of 0.
1416 * Write INT_MASK_class1 with value of 0. 1389 * Write INT_MASK_class1 with value of 0.
@@ -1420,12 +1393,12 @@ static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
1420 * Write INT_STAT_class2 with value of -1. 1393 * Write INT_STAT_class2 with value of -1.
1421 */ 1394 */
1422 spin_lock_irq(&spu->register_lock); 1395 spin_lock_irq(&spu->register_lock);
1423 out_be64(&priv1->int_mask_class0_RW, 0UL); 1396 spu_int_mask_set(spu, 0, 0ul);
1424 out_be64(&priv1->int_mask_class1_RW, 0UL); 1397 spu_int_mask_set(spu, 1, 0ul);
1425 out_be64(&priv1->int_mask_class2_RW, 0UL); 1398 spu_int_mask_set(spu, 2, 0ul);
1426 out_be64(&priv1->int_stat_class0_RW, ~(0UL)); 1399 spu_int_stat_clear(spu, 0, ~0ul);
1427 out_be64(&priv1->int_stat_class1_RW, ~(0UL)); 1400 spu_int_stat_clear(spu, 1, ~0ul);
1428 out_be64(&priv1->int_stat_class2_RW, ~(0UL)); 1401 spu_int_stat_clear(spu, 2, ~0ul);
1429 spin_unlock_irq(&spu->register_lock); 1402 spin_unlock_irq(&spu->register_lock);
1430} 1403}
1431 1404
@@ -1522,12 +1495,10 @@ static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
1522 1495
1523static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu) 1496static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
1524{ 1497{
1525 struct spu_priv1 __iomem *priv1 = spu->priv1;
1526
1527 /* Restore, Step 56: 1498 /* Restore, Step 56:
1528 * Restore the MFC_TCLASS_ID register from CSA. 1499 * Restore the MFC_TCLASS_ID register from CSA.
1529 */ 1500 */
1530 out_be64(&priv1->mfc_tclass_id_RW, csa->priv1.mfc_tclass_id_RW); 1501 spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
1531 eieio(); 1502 eieio();
1532} 1503}
1533 1504
@@ -1689,7 +1660,6 @@ static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
1689 1660
1690static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu) 1661static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
1691{ 1662{
1692 struct spu_priv1 __iomem *priv1 = spu->priv1;
1693 struct spu_priv2 __iomem *priv2 = spu->priv2; 1663 struct spu_priv2 __iomem *priv2 = spu->priv2;
1694 u64 dummy = 0UL; 1664 u64 dummy = 0UL;
1695 1665
@@ -1700,8 +1670,7 @@ static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
1700 if ((csa->prob.mb_stat_R & 0xFF0000) == 0) { 1670 if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
1701 dummy = in_be64(&priv2->puint_mb_R); 1671 dummy = in_be64(&priv2->puint_mb_R);
1702 eieio(); 1672 eieio();
1703 out_be64(&priv1->int_stat_class2_RW, 1673 spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
1704 CLASS2_ENABLE_MAILBOX_INTR);
1705 eieio(); 1674 eieio();
1706 } 1675 }
1707} 1676}
@@ -1729,12 +1698,10 @@ static inline void restore_mfc_slbs(struct spu_state *csa, struct spu *spu)
1729 1698
1730static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu) 1699static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
1731{ 1700{
1732 struct spu_priv1 __iomem *priv1 = spu->priv1;
1733
1734 /* Restore, Step 69: 1701 /* Restore, Step 69:
1735 * Restore the MFC_SR1 register from CSA. 1702 * Restore the MFC_SR1 register from CSA.
1736 */ 1703 */
1737 out_be64(&priv1->mfc_sr1_RW, csa->priv1.mfc_sr1_RW); 1704 spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
1738 eieio(); 1705 eieio();
1739} 1706}
1740 1707
@@ -1792,15 +1759,13 @@ static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)
1792 1759
1793static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu) 1760static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
1794{ 1761{
1795 struct spu_priv1 __iomem *priv1 = spu->priv1;
1796
1797 /* Restore, Step 75: 1762 /* Restore, Step 75:
1798 * Re-enable SPU interrupts. 1763 * Re-enable SPU interrupts.
1799 */ 1764 */
1800 spin_lock_irq(&spu->register_lock); 1765 spin_lock_irq(&spu->register_lock);
1801 out_be64(&priv1->int_mask_class0_RW, csa->priv1.int_mask_class0_RW); 1766 spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
1802 out_be64(&priv1->int_mask_class1_RW, csa->priv1.int_mask_class1_RW); 1767 spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
1803 out_be64(&priv1->int_mask_class2_RW, csa->priv1.int_mask_class2_RW); 1768 spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
1804 spin_unlock_irq(&spu->register_lock); 1769 spin_unlock_irq(&spu->register_lock);
1805} 1770}
1806 1771