diff options
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/switch.c | 2042 | ||||
-rw-r--r-- | include/asm-powerpc/spu_csa.h | 1 |
2 files changed, 2036 insertions, 7 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c index 6804342e99c3..70345b0524fc 100644 --- a/arch/powerpc/platforms/cell/spufs/switch.c +++ b/arch/powerpc/platforms/cell/spufs/switch.c | |||
@@ -52,6 +52,2019 @@ | |||
52 | #include "spu_save_dump.h" | 52 | #include "spu_save_dump.h" |
53 | #include "spu_restore_dump.h" | 53 | #include "spu_restore_dump.h" |
54 | 54 | ||
55 | #if 0 | ||
56 | #define POLL_WHILE_TRUE(_c) { \ | ||
57 | do { \ | ||
58 | } while (_c); \ | ||
59 | } | ||
60 | #else | ||
61 | #define RELAX_SPIN_COUNT 1000 | ||
62 | #define POLL_WHILE_TRUE(_c) { \ | ||
63 | do { \ | ||
64 | int _i; \ | ||
65 | for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \ | ||
66 | cpu_relax(); \ | ||
67 | } \ | ||
68 | if (unlikely(_c)) yield(); \ | ||
69 | else break; \ | ||
70 | } while (_c); \ | ||
71 | } | ||
72 | #endif /* debug */ | ||
73 | |||
74 | #define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c)) | ||
75 | |||
76 | static inline void acquire_spu_lock(struct spu *spu) | ||
77 | { | ||
78 | /* Save, Step 1: | ||
79 | * Restore, Step 1: | ||
80 | * Acquire SPU-specific mutual exclusion lock. | ||
81 | * TBD. | ||
82 | */ | ||
83 | } | ||
84 | |||
85 | static inline void release_spu_lock(struct spu *spu) | ||
86 | { | ||
87 | /* Restore, Step 76: | ||
88 | * Release SPU-specific mutual exclusion lock. | ||
89 | * TBD. | ||
90 | */ | ||
91 | } | ||
92 | |||
93 | static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu) | ||
94 | { | ||
95 | struct spu_problem __iomem *prob = spu->problem; | ||
96 | u32 isolate_state; | ||
97 | |||
98 | /* Save, Step 2: | ||
99 | * Save, Step 6: | ||
100 | * If SPU_Status[E,L,IS] any field is '1', this | ||
101 | * SPU is in isolate state and cannot be context | ||
102 | * saved at this time. | ||
103 | */ | ||
104 | isolate_state = SPU_STATUS_ISOLATED_STATE | | ||
105 | SPU_STATUS_ISOLATED_LOAD_STAUTUS | SPU_STATUS_ISOLATED_EXIT_STAUTUS; | ||
106 | return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0; | ||
107 | } | ||
108 | |||
109 | static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) | ||
110 | { | ||
111 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
112 | |||
113 | /* Save, Step 3: | ||
114 | * Restore, Step 2: | ||
115 | * Save INT_Mask_class0 in CSA. | ||
116 | * Write INT_MASK_class0 with value of 0. | ||
117 | * Save INT_Mask_class1 in CSA. | ||
118 | * Write INT_MASK_class1 with value of 0. | ||
119 | * Save INT_Mask_class2 in CSA. | ||
120 | * Write INT_MASK_class2 with value of 0. | ||
121 | */ | ||
122 | spin_lock_irq(&spu->register_lock); | ||
123 | if (csa) { | ||
124 | csa->priv1.int_mask_class0_RW = | ||
125 | in_be64(&priv1->int_mask_class0_RW); | ||
126 | csa->priv1.int_mask_class1_RW = | ||
127 | in_be64(&priv1->int_mask_class1_RW); | ||
128 | csa->priv1.int_mask_class2_RW = | ||
129 | in_be64(&priv1->int_mask_class2_RW); | ||
130 | } | ||
131 | out_be64(&priv1->int_mask_class0_RW, 0UL); | ||
132 | out_be64(&priv1->int_mask_class1_RW, 0UL); | ||
133 | out_be64(&priv1->int_mask_class2_RW, 0UL); | ||
134 | eieio(); | ||
135 | spin_unlock_irq(&spu->register_lock); | ||
136 | } | ||
137 | |||
138 | static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu) | ||
139 | { | ||
140 | /* Save, Step 4: | ||
141 | * Restore, Step 25. | ||
142 | * Set a software watchdog timer, which specifies the | ||
143 | * maximum allowable time for a context save sequence. | ||
144 | * | ||
145 | * For present, this implementation will not set a global | ||
146 | * watchdog timer, as virtualization & variable system load | ||
147 | * may cause unpredictable execution times. | ||
148 | */ | ||
149 | } | ||
150 | |||
151 | static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu) | ||
152 | { | ||
153 | /* Save, Step 5: | ||
154 | * Restore, Step 3: | ||
155 | * Inhibit user-space access (if provided) to this | ||
156 | * SPU by unmapping the virtual pages assigned to | ||
157 | * the SPU memory-mapped I/O (MMIO) for problem | ||
158 | * state. TBD. | ||
159 | */ | ||
160 | } | ||
161 | |||
162 | static inline void set_switch_pending(struct spu_state *csa, struct spu *spu) | ||
163 | { | ||
164 | /* Save, Step 7: | ||
165 | * Restore, Step 5: | ||
166 | * Set a software context switch pending flag. | ||
167 | */ | ||
168 | set_bit(SPU_CONTEXT_SWITCH_PENDING_nr, &spu->flags); | ||
169 | mb(); | ||
170 | } | ||
171 | |||
172 | static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu) | ||
173 | { | ||
174 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
175 | |||
176 | /* Save, Step 8: | ||
177 | * Read and save MFC_CNTL[Ss]. | ||
178 | */ | ||
179 | if (csa) { | ||
180 | csa->priv2.mfc_control_RW = in_be64(&priv2->mfc_control_RW) & | ||
181 | MFC_CNTL_SUSPEND_DMA_STATUS_MASK; | ||
182 | } | ||
183 | } | ||
184 | |||
185 | static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu) | ||
186 | { | ||
187 | struct spu_problem __iomem *prob = spu->problem; | ||
188 | |||
189 | /* Save, Step 9: | ||
190 | * Save SPU_Runcntl in the CSA. This value contains | ||
191 | * the "Application Desired State". | ||
192 | */ | ||
193 | csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW); | ||
194 | } | ||
195 | |||
196 | static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu) | ||
197 | { | ||
198 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
199 | |||
200 | /* Save, Step 10: | ||
201 | * Save MFC_SR1 in the CSA. | ||
202 | */ | ||
203 | csa->priv1.mfc_sr1_RW = in_be64(&priv1->mfc_sr1_RW); | ||
204 | } | ||
205 | |||
206 | static inline void save_spu_status(struct spu_state *csa, struct spu *spu) | ||
207 | { | ||
208 | struct spu_problem __iomem *prob = spu->problem; | ||
209 | |||
210 | /* Save, Step 11: | ||
211 | * Read SPU_Status[R], and save to CSA. | ||
212 | */ | ||
213 | if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) { | ||
214 | csa->prob.spu_status_R = in_be32(&prob->spu_status_R); | ||
215 | } else { | ||
216 | u32 stopped; | ||
217 | |||
218 | out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); | ||
219 | eieio(); | ||
220 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & | ||
221 | SPU_STATUS_RUNNING); | ||
222 | stopped = | ||
223 | SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | | ||
224 | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; | ||
225 | if ((in_be32(&prob->spu_status_R) & stopped) == 0) | ||
226 | csa->prob.spu_status_R = SPU_STATUS_RUNNING; | ||
227 | else | ||
228 | csa->prob.spu_status_R = in_be32(&prob->spu_status_R); | ||
229 | } | ||
230 | } | ||
231 | |||
232 | static inline void save_mfc_decr(struct spu_state *csa, struct spu *spu) | ||
233 | { | ||
234 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
235 | |||
236 | /* Save, Step 12: | ||
237 | * Read MFC_CNTL[Ds]. Update saved copy of | ||
238 | * CSA.MFC_CNTL[Ds]. | ||
239 | */ | ||
240 | if (in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING) { | ||
241 | csa->priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING; | ||
242 | csa->suspend_time = get_cycles(); | ||
243 | out_be64(&priv2->spu_chnlcntptr_RW, 7ULL); | ||
244 | eieio(); | ||
245 | csa->spu_chnldata_RW[7] = in_be64(&priv2->spu_chnldata_RW); | ||
246 | eieio(); | ||
247 | } | ||
248 | } | ||
249 | |||
250 | static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu) | ||
251 | { | ||
252 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
253 | |||
254 | /* Save, Step 13: | ||
255 | * Write MFC_CNTL[Dh] set to a '1' to halt | ||
256 | * the decrementer. | ||
257 | */ | ||
258 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_DECREMENTER_HALTED); | ||
259 | eieio(); | ||
260 | } | ||
261 | |||
262 | static inline void save_timebase(struct spu_state *csa, struct spu *spu) | ||
263 | { | ||
264 | /* Save, Step 14: | ||
265 | * Read PPE Timebase High and Timebase low registers | ||
266 | * and save in CSA. TBD. | ||
267 | */ | ||
268 | csa->suspend_time = get_cycles(); | ||
269 | } | ||
270 | |||
271 | static inline void remove_other_spu_access(struct spu_state *csa, | ||
272 | struct spu *spu) | ||
273 | { | ||
274 | /* Save, Step 15: | ||
275 | * Remove other SPU access to this SPU by unmapping | ||
276 | * this SPU's pages from their address space. TBD. | ||
277 | */ | ||
278 | } | ||
279 | |||
280 | static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu) | ||
281 | { | ||
282 | struct spu_problem __iomem *prob = spu->problem; | ||
283 | |||
284 | /* Save, Step 16: | ||
285 | * Restore, Step 11. | ||
286 | * Write SPU_MSSync register. Poll SPU_MSSync[P] | ||
287 | * for a value of 0. | ||
288 | */ | ||
289 | out_be64(&prob->spc_mssync_RW, 1UL); | ||
290 | POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING); | ||
291 | } | ||
292 | |||
293 | static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu) | ||
294 | { | ||
295 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
296 | |||
297 | /* Save, Step 17: | ||
298 | * Restore, Step 12. | ||
299 | * Restore, Step 48. | ||
300 | * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register. | ||
301 | * Then issue a PPE sync instruction. | ||
302 | */ | ||
303 | out_be64(&priv1->tlb_invalidate_entry_W, 0UL); | ||
304 | mb(); | ||
305 | } | ||
306 | |||
307 | static inline void handle_pending_interrupts(struct spu_state *csa, | ||
308 | struct spu *spu) | ||
309 | { | ||
310 | /* Save, Step 18: | ||
311 | * Handle any pending interrupts from this SPU | ||
312 | * here. This is OS or hypervisor specific. One | ||
313 | * option is to re-enable interrupts to handle any | ||
314 | * pending interrupts, with the interrupt handlers | ||
315 | * recognizing the software Context Switch Pending | ||
316 | * flag, to ensure the SPU execution or MFC command | ||
317 | * queue is not restarted. TBD. | ||
318 | */ | ||
319 | } | ||
320 | |||
321 | static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu) | ||
322 | { | ||
323 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
324 | int i; | ||
325 | |||
326 | /* Save, Step 19: | ||
327 | * If MFC_Cntl[Se]=0 then save | ||
328 | * MFC command queues. | ||
329 | */ | ||
330 | if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) { | ||
331 | for (i = 0; i < 8; i++) { | ||
332 | csa->priv2.puq[i].mfc_cq_data0_RW = | ||
333 | in_be64(&priv2->puq[i].mfc_cq_data0_RW); | ||
334 | csa->priv2.puq[i].mfc_cq_data1_RW = | ||
335 | in_be64(&priv2->puq[i].mfc_cq_data1_RW); | ||
336 | csa->priv2.puq[i].mfc_cq_data2_RW = | ||
337 | in_be64(&priv2->puq[i].mfc_cq_data2_RW); | ||
338 | csa->priv2.puq[i].mfc_cq_data3_RW = | ||
339 | in_be64(&priv2->puq[i].mfc_cq_data3_RW); | ||
340 | } | ||
341 | for (i = 0; i < 16; i++) { | ||
342 | csa->priv2.spuq[i].mfc_cq_data0_RW = | ||
343 | in_be64(&priv2->spuq[i].mfc_cq_data0_RW); | ||
344 | csa->priv2.spuq[i].mfc_cq_data1_RW = | ||
345 | in_be64(&priv2->spuq[i].mfc_cq_data1_RW); | ||
346 | csa->priv2.spuq[i].mfc_cq_data2_RW = | ||
347 | in_be64(&priv2->spuq[i].mfc_cq_data2_RW); | ||
348 | csa->priv2.spuq[i].mfc_cq_data3_RW = | ||
349 | in_be64(&priv2->spuq[i].mfc_cq_data3_RW); | ||
350 | } | ||
351 | } | ||
352 | } | ||
353 | |||
354 | static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu) | ||
355 | { | ||
356 | struct spu_problem __iomem *prob = spu->problem; | ||
357 | |||
358 | /* Save, Step 20: | ||
359 | * Save the PPU_QueryMask register | ||
360 | * in the CSA. | ||
361 | */ | ||
362 | csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW); | ||
363 | } | ||
364 | |||
365 | static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu) | ||
366 | { | ||
367 | struct spu_problem __iomem *prob = spu->problem; | ||
368 | |||
369 | /* Save, Step 21: | ||
370 | * Save the PPU_QueryType register | ||
371 | * in the CSA. | ||
372 | */ | ||
373 | csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW); | ||
374 | } | ||
375 | |||
376 | static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu) | ||
377 | { | ||
378 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
379 | |||
380 | /* Save, Step 22: | ||
381 | * Save the MFC_CSR_TSQ register | ||
382 | * in the LSCSA. | ||
383 | */ | ||
384 | csa->priv2.spu_tag_status_query_RW = | ||
385 | in_be64(&priv2->spu_tag_status_query_RW); | ||
386 | } | ||
387 | |||
388 | static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu) | ||
389 | { | ||
390 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
391 | |||
392 | /* Save, Step 23: | ||
393 | * Save the MFC_CSR_CMD1 and MFC_CSR_CMD2 | ||
394 | * registers in the CSA. | ||
395 | */ | ||
396 | csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW); | ||
397 | csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW); | ||
398 | } | ||
399 | |||
400 | static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu) | ||
401 | { | ||
402 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
403 | |||
404 | /* Save, Step 24: | ||
405 | * Save the MFC_CSR_ATO register in | ||
406 | * the CSA. | ||
407 | */ | ||
408 | csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW); | ||
409 | } | ||
410 | |||
411 | static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu) | ||
412 | { | ||
413 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
414 | |||
415 | /* Save, Step 25: | ||
416 | * Save the MFC_TCLASS_ID register in | ||
417 | * the CSA. | ||
418 | */ | ||
419 | csa->priv1.mfc_tclass_id_RW = in_be64(&priv1->mfc_tclass_id_RW); | ||
420 | } | ||
421 | |||
422 | static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu) | ||
423 | { | ||
424 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
425 | |||
426 | /* Save, Step 26: | ||
427 | * Restore, Step 23. | ||
428 | * Write the MFC_TCLASS_ID register with | ||
429 | * the value 0x10000000. | ||
430 | */ | ||
431 | out_be64(&priv1->mfc_tclass_id_RW, 0x10000000); | ||
432 | eieio(); | ||
433 | } | ||
434 | |||
435 | static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu) | ||
436 | { | ||
437 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
438 | |||
439 | /* Save, Step 27: | ||
440 | * Restore, Step 14. | ||
441 | * Write MFC_CNTL[Pc]=1 (purge queue). | ||
442 | */ | ||
443 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST); | ||
444 | eieio(); | ||
445 | } | ||
446 | |||
447 | static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu) | ||
448 | { | ||
449 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
450 | |||
451 | /* Save, Step 28: | ||
452 | * Poll MFC_CNTL[Ps] until value '11' is read | ||
453 | * (purge complete). | ||
454 | */ | ||
455 | POLL_WHILE_FALSE(in_be64(&priv2->mfc_control_RW) & | ||
456 | MFC_CNTL_PURGE_DMA_COMPLETE); | ||
457 | } | ||
458 | |||
459 | static inline void save_mfc_slbs(struct spu_state *csa, struct spu *spu) | ||
460 | { | ||
461 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
462 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
463 | int i; | ||
464 | |||
465 | /* Save, Step 29: | ||
466 | * If MFC_SR1[R]='1', save SLBs in CSA. | ||
467 | */ | ||
468 | if (in_be64(&priv1->mfc_sr1_RW) & MFC_STATE1_RELOCATE_MASK) { | ||
469 | csa->priv2.slb_index_W = in_be64(&priv2->slb_index_W); | ||
470 | for (i = 0; i < 8; i++) { | ||
471 | out_be64(&priv2->slb_index_W, i); | ||
472 | eieio(); | ||
473 | csa->slb_esid_RW[i] = in_be64(&priv2->slb_esid_RW); | ||
474 | csa->slb_vsid_RW[i] = in_be64(&priv2->slb_vsid_RW); | ||
475 | eieio(); | ||
476 | } | ||
477 | } | ||
478 | } | ||
479 | |||
480 | static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu) | ||
481 | { | ||
482 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
483 | |||
484 | /* Save, Step 30: | ||
485 | * Restore, Step 18: | ||
486 | * Write MFC_SR1 with MFC_SR1[D=0,S=1] and | ||
487 | * MFC_SR1[TL,R,Pr,T] set correctly for the | ||
488 | * OS specific environment. | ||
489 | * | ||
490 | * Implementation note: The SPU-side code | ||
491 | * for save/restore is privileged, so the | ||
492 | * MFC_SR1[Pr] bit is not set. | ||
493 | * | ||
494 | */ | ||
495 | out_be64(&priv1->mfc_sr1_RW, (MFC_STATE1_MASTER_RUN_CONTROL_MASK | | ||
496 | MFC_STATE1_RELOCATE_MASK | | ||
497 | MFC_STATE1_BUS_TLBIE_MASK)); | ||
498 | } | ||
499 | |||
500 | static inline void save_spu_npc(struct spu_state *csa, struct spu *spu) | ||
501 | { | ||
502 | struct spu_problem __iomem *prob = spu->problem; | ||
503 | |||
504 | /* Save, Step 31: | ||
505 | * Save SPU_NPC in the CSA. | ||
506 | */ | ||
507 | csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW); | ||
508 | } | ||
509 | |||
510 | static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu) | ||
511 | { | ||
512 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
513 | |||
514 | /* Save, Step 32: | ||
515 | * Save SPU_PrivCntl in the CSA. | ||
516 | */ | ||
517 | csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW); | ||
518 | } | ||
519 | |||
520 | static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu) | ||
521 | { | ||
522 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
523 | |||
524 | /* Save, Step 33: | ||
525 | * Restore, Step 16: | ||
526 | * Write SPU_PrivCntl[S,Le,A] fields reset to 0. | ||
527 | */ | ||
528 | out_be64(&priv2->spu_privcntl_RW, 0UL); | ||
529 | eieio(); | ||
530 | } | ||
531 | |||
532 | static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu) | ||
533 | { | ||
534 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
535 | |||
536 | /* Save, Step 34: | ||
537 | * Save SPU_LSLR in the CSA. | ||
538 | */ | ||
539 | csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW); | ||
540 | } | ||
541 | |||
542 | static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu) | ||
543 | { | ||
544 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
545 | |||
546 | /* Save, Step 35: | ||
547 | * Restore, Step 17. | ||
548 | * Reset SPU_LSLR. | ||
549 | */ | ||
550 | out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK); | ||
551 | eieio(); | ||
552 | } | ||
553 | |||
554 | static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu) | ||
555 | { | ||
556 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
557 | |||
558 | /* Save, Step 36: | ||
559 | * Save SPU_Cfg in the CSA. | ||
560 | */ | ||
561 | csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW); | ||
562 | } | ||
563 | |||
564 | static inline void save_pm_trace(struct spu_state *csa, struct spu *spu) | ||
565 | { | ||
566 | /* Save, Step 37: | ||
567 | * Save PM_Trace_Tag_Wait_Mask in the CSA. | ||
568 | * Not performed by this implementation. | ||
569 | */ | ||
570 | } | ||
571 | |||
572 | static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu) | ||
573 | { | ||
574 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
575 | |||
576 | /* Save, Step 38: | ||
577 | * Save RA_GROUP_ID register and the | ||
578 | * RA_ENABLE reigster in the CSA. | ||
579 | */ | ||
580 | csa->priv1.resource_allocation_groupID_RW = | ||
581 | in_be64(&priv1->resource_allocation_groupID_RW); | ||
582 | csa->priv1.resource_allocation_enable_RW = | ||
583 | in_be64(&priv1->resource_allocation_enable_RW); | ||
584 | } | ||
585 | |||
586 | static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu) | ||
587 | { | ||
588 | struct spu_problem __iomem *prob = spu->problem; | ||
589 | |||
590 | /* Save, Step 39: | ||
591 | * Save MB_Stat register in the CSA. | ||
592 | */ | ||
593 | csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R); | ||
594 | } | ||
595 | |||
596 | static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu) | ||
597 | { | ||
598 | struct spu_problem __iomem *prob = spu->problem; | ||
599 | |||
600 | /* Save, Step 40: | ||
601 | * Save the PPU_MB register in the CSA. | ||
602 | */ | ||
603 | csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R); | ||
604 | } | ||
605 | |||
606 | static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu) | ||
607 | { | ||
608 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
609 | |||
610 | /* Save, Step 41: | ||
611 | * Save the PPUINT_MB register in the CSA. | ||
612 | */ | ||
613 | csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R); | ||
614 | } | ||
615 | |||
616 | static inline void save_ch_part1(struct spu_state *csa, struct spu *spu) | ||
617 | { | ||
618 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
619 | u64 idx, ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL }; | ||
620 | int i; | ||
621 | |||
622 | /* Save, Step 42: | ||
623 | * Save the following CH: [0,1,3,4,24,25,27] | ||
624 | */ | ||
625 | for (i = 0; i < 7; i++) { | ||
626 | idx = ch_indices[i]; | ||
627 | out_be64(&priv2->spu_chnlcntptr_RW, idx); | ||
628 | eieio(); | ||
629 | csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW); | ||
630 | csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW); | ||
631 | out_be64(&priv2->spu_chnldata_RW, 0UL); | ||
632 | out_be64(&priv2->spu_chnlcnt_RW, 0UL); | ||
633 | eieio(); | ||
634 | } | ||
635 | } | ||
636 | |||
637 | static inline void save_spu_mb(struct spu_state *csa, struct spu *spu) | ||
638 | { | ||
639 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
640 | int i; | ||
641 | |||
642 | /* Save, Step 43: | ||
643 | * Save SPU Read Mailbox Channel. | ||
644 | */ | ||
645 | out_be64(&priv2->spu_chnlcntptr_RW, 29UL); | ||
646 | eieio(); | ||
647 | csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW); | ||
648 | for (i = 0; i < 4; i++) { | ||
649 | csa->pu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW); | ||
650 | } | ||
651 | out_be64(&priv2->spu_chnlcnt_RW, 0UL); | ||
652 | eieio(); | ||
653 | } | ||
654 | |||
655 | static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu) | ||
656 | { | ||
657 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
658 | |||
659 | /* Save, Step 44: | ||
660 | * Save MFC_CMD Channel. | ||
661 | */ | ||
662 | out_be64(&priv2->spu_chnlcntptr_RW, 21UL); | ||
663 | eieio(); | ||
664 | csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW); | ||
665 | eieio(); | ||
666 | } | ||
667 | |||
668 | static inline void reset_ch(struct spu_state *csa, struct spu *spu) | ||
669 | { | ||
670 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
671 | u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL }; | ||
672 | u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL }; | ||
673 | u64 idx; | ||
674 | int i; | ||
675 | |||
676 | /* Save, Step 45: | ||
677 | * Reset the following CH: [21, 23, 28, 30] | ||
678 | */ | ||
679 | for (i = 0; i < 4; i++) { | ||
680 | idx = ch_indices[i]; | ||
681 | out_be64(&priv2->spu_chnlcntptr_RW, idx); | ||
682 | eieio(); | ||
683 | out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); | ||
684 | eieio(); | ||
685 | } | ||
686 | } | ||
687 | |||
688 | static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu) | ||
689 | { | ||
690 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
691 | |||
692 | /* Save, Step 46: | ||
693 | * Restore, Step 25. | ||
694 | * Write MFC_CNTL[Sc]=0 (resume queue processing). | ||
695 | */ | ||
696 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE); | ||
697 | } | ||
698 | |||
699 | static inline void invalidate_slbs(struct spu_state *csa, struct spu *spu) | ||
700 | { | ||
701 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
702 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
703 | |||
704 | /* Save, Step 45: | ||
705 | * Restore, Step 19: | ||
706 | * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All. | ||
707 | */ | ||
708 | if (in_be64(&priv1->mfc_sr1_RW) & MFC_STATE1_RELOCATE_MASK) { | ||
709 | out_be64(&priv2->slb_invalidate_all_W, 0UL); | ||
710 | eieio(); | ||
711 | } | ||
712 | } | ||
713 | |||
714 | static inline void get_kernel_slb(u64 ea, u64 slb[2]) | ||
715 | { | ||
716 | slb[0] = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; | ||
717 | slb[1] = (ea & ESID_MASK) | SLB_ESID_V; | ||
718 | |||
719 | /* Large pages are used for kernel text/data, but not vmalloc. */ | ||
720 | if (cpu_has_feature(CPU_FTR_16M_PAGE) | ||
721 | && REGION_ID(ea) == KERNEL_REGION_ID) | ||
722 | slb[0] |= SLB_VSID_L; | ||
723 | } | ||
724 | |||
725 | static inline void load_mfc_slb(struct spu *spu, u64 slb[2], int slbe) | ||
726 | { | ||
727 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
728 | |||
729 | out_be64(&priv2->slb_index_W, slbe); | ||
730 | eieio(); | ||
731 | out_be64(&priv2->slb_vsid_RW, slb[0]); | ||
732 | out_be64(&priv2->slb_esid_RW, slb[1]); | ||
733 | eieio(); | ||
734 | } | ||
735 | |||
736 | static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu) | ||
737 | { | ||
738 | u64 code_slb[2]; | ||
739 | u64 lscsa_slb[2]; | ||
740 | |||
741 | /* Save, Step 47: | ||
742 | * Restore, Step 30. | ||
743 | * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All | ||
744 | * register, then initialize SLB_VSID and SLB_ESID | ||
745 | * to provide access to SPU context save code and | ||
746 | * LSCSA. | ||
747 | * | ||
748 | * This implementation places both the context | ||
749 | * switch code and LSCSA in kernel address space. | ||
750 | * | ||
751 | * Further this implementation assumes that the | ||
752 | * MFC_SR1[R]=1 (in other words, assume that | ||
753 | * translation is desired by OS environment). | ||
754 | */ | ||
755 | invalidate_slbs(csa, spu); | ||
756 | get_kernel_slb((unsigned long)&spu_save_code[0], code_slb); | ||
757 | get_kernel_slb((unsigned long)csa->lscsa, lscsa_slb); | ||
758 | load_mfc_slb(spu, code_slb, 0); | ||
759 | if ((lscsa_slb[0] != code_slb[0]) || (lscsa_slb[1] != code_slb[1])) | ||
760 | load_mfc_slb(spu, lscsa_slb, 1); | ||
761 | } | ||
762 | |||
763 | static inline void set_switch_active(struct spu_state *csa, struct spu *spu) | ||
764 | { | ||
765 | /* Save, Step 48: | ||
766 | * Restore, Step 23. | ||
767 | * Change the software context switch pending flag | ||
768 | * to context switch active. | ||
769 | */ | ||
770 | set_bit(SPU_CONTEXT_SWITCH_ACTIVE_nr, &spu->flags); | ||
771 | clear_bit(SPU_CONTEXT_SWITCH_PENDING_nr, &spu->flags); | ||
772 | mb(); | ||
773 | } | ||
774 | |||
775 | static inline void enable_interrupts(struct spu_state *csa, struct spu *spu) | ||
776 | { | ||
777 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
778 | unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR | | ||
779 | CLASS1_ENABLE_STORAGE_FAULT_INTR; | ||
780 | |||
781 | /* Save, Step 49: | ||
782 | * Restore, Step 22: | ||
783 | * Reset and then enable interrupts, as | ||
784 | * needed by OS. | ||
785 | * | ||
786 | * This implementation enables only class1 | ||
787 | * (translation) interrupts. | ||
788 | */ | ||
789 | spin_lock_irq(&spu->register_lock); | ||
790 | out_be64(&priv1->int_stat_class0_RW, ~(0UL)); | ||
791 | out_be64(&priv1->int_stat_class1_RW, ~(0UL)); | ||
792 | out_be64(&priv1->int_stat_class2_RW, ~(0UL)); | ||
793 | out_be64(&priv1->int_mask_class0_RW, 0UL); | ||
794 | out_be64(&priv1->int_mask_class1_RW, class1_mask); | ||
795 | out_be64(&priv1->int_mask_class2_RW, 0UL); | ||
796 | spin_unlock_irq(&spu->register_lock); | ||
797 | } | ||
798 | |||
799 | static inline int send_mfc_dma(struct spu *spu, unsigned long ea, | ||
800 | unsigned int ls_offset, unsigned int size, | ||
801 | unsigned int tag, unsigned int rclass, | ||
802 | unsigned int cmd) | ||
803 | { | ||
804 | struct spu_problem __iomem *prob = spu->problem; | ||
805 | union mfc_tag_size_class_cmd command; | ||
806 | unsigned int transfer_size; | ||
807 | volatile unsigned int status = 0x0; | ||
808 | |||
809 | while (size > 0) { | ||
810 | transfer_size = | ||
811 | (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size; | ||
812 | command.u.mfc_size = transfer_size; | ||
813 | command.u.mfc_tag = tag; | ||
814 | command.u.mfc_rclassid = rclass; | ||
815 | command.u.mfc_cmd = cmd; | ||
816 | do { | ||
817 | out_be32(&prob->mfc_lsa_W, ls_offset); | ||
818 | out_be64(&prob->mfc_ea_W, ea); | ||
819 | out_be64(&prob->mfc_union_W.all64, command.all64); | ||
820 | status = | ||
821 | in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32); | ||
822 | if (unlikely(status & 0x2)) { | ||
823 | cpu_relax(); | ||
824 | } | ||
825 | } while (status & 0x3); | ||
826 | size -= transfer_size; | ||
827 | ea += transfer_size; | ||
828 | ls_offset += transfer_size; | ||
829 | } | ||
830 | return 0; | ||
831 | } | ||
832 | |||
833 | static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu) | ||
834 | { | ||
835 | unsigned long addr = (unsigned long)&csa->lscsa->ls[0]; | ||
836 | unsigned int ls_offset = 0x0; | ||
837 | unsigned int size = 16384; | ||
838 | unsigned int tag = 0; | ||
839 | unsigned int rclass = 0; | ||
840 | unsigned int cmd = MFC_PUT_CMD; | ||
841 | |||
842 | /* Save, Step 50: | ||
843 | * Issue a DMA command to copy the first 16K bytes | ||
844 | * of local storage to the CSA. | ||
845 | */ | ||
846 | send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); | ||
847 | } | ||
848 | |||
849 | static inline void set_spu_npc(struct spu_state *csa, struct spu *spu) | ||
850 | { | ||
851 | struct spu_problem __iomem *prob = spu->problem; | ||
852 | |||
853 | /* Save, Step 51: | ||
854 | * Restore, Step 31. | ||
855 | * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry | ||
856 | * point address of context save code in local | ||
857 | * storage. | ||
858 | * | ||
859 | * This implementation uses SPU-side save/restore | ||
860 | * programs with entry points at LSA of 0. | ||
861 | */ | ||
862 | out_be32(&prob->spu_npc_RW, 0); | ||
863 | eieio(); | ||
864 | } | ||
865 | |||
866 | static inline void set_signot1(struct spu_state *csa, struct spu *spu) | ||
867 | { | ||
868 | struct spu_problem __iomem *prob = spu->problem; | ||
869 | union { | ||
870 | u64 ull; | ||
871 | u32 ui[2]; | ||
872 | } addr64; | ||
873 | |||
874 | /* Save, Step 52: | ||
875 | * Restore, Step 32: | ||
876 | * Write SPU_Sig_Notify_1 register with upper 32-bits | ||
877 | * of the CSA.LSCSA effective address. | ||
878 | */ | ||
879 | addr64.ull = (u64) csa->lscsa; | ||
880 | out_be32(&prob->signal_notify1, addr64.ui[0]); | ||
881 | eieio(); | ||
882 | } | ||
883 | |||
884 | static inline void set_signot2(struct spu_state *csa, struct spu *spu) | ||
885 | { | ||
886 | struct spu_problem __iomem *prob = spu->problem; | ||
887 | union { | ||
888 | u64 ull; | ||
889 | u32 ui[2]; | ||
890 | } addr64; | ||
891 | |||
892 | /* Save, Step 53: | ||
893 | * Restore, Step 33: | ||
894 | * Write SPU_Sig_Notify_2 register with lower 32-bits | ||
895 | * of the CSA.LSCSA effective address. | ||
896 | */ | ||
897 | addr64.ull = (u64) csa->lscsa; | ||
898 | out_be32(&prob->signal_notify2, addr64.ui[1]); | ||
899 | eieio(); | ||
900 | } | ||
901 | |||
902 | static inline void send_save_code(struct spu_state *csa, struct spu *spu) | ||
903 | { | ||
904 | unsigned long addr = (unsigned long)&spu_save_code[0]; | ||
905 | unsigned int ls_offset = 0x0; | ||
906 | unsigned int size = sizeof(spu_save_code); | ||
907 | unsigned int tag = 0; | ||
908 | unsigned int rclass = 0; | ||
909 | unsigned int cmd = MFC_GETFS_CMD; | ||
910 | |||
911 | /* Save, Step 54: | ||
912 | * Issue a DMA command to copy context save code | ||
913 | * to local storage and start SPU. | ||
914 | */ | ||
915 | send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); | ||
916 | } | ||
917 | |||
918 | static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu) | ||
919 | { | ||
920 | struct spu_problem __iomem *prob = spu->problem; | ||
921 | |||
922 | /* Save, Step 55: | ||
923 | * Restore, Step 38. | ||
924 | * Write PPU_QueryMask=1 (enable Tag Group 0) | ||
925 | * and issue eieio instruction. | ||
926 | */ | ||
927 | out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0)); | ||
928 | eieio(); | ||
929 | } | ||
930 | |||
931 | static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu) | ||
932 | { | ||
933 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
934 | struct spu_problem __iomem *prob = spu->problem; | ||
935 | u32 mask = MFC_TAGID_TO_TAGMASK(0); | ||
936 | unsigned long flags; | ||
937 | |||
938 | /* Save, Step 56: | ||
939 | * Restore, Step 39. | ||
940 | * Restore, Step 39. | ||
941 | * Restore, Step 46. | ||
942 | * Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete) | ||
943 | * or write PPU_QueryType[TS]=01 and wait for Tag Group | ||
944 | * Complete Interrupt. Write INT_Stat_Class0 or | ||
945 | * INT_Stat_Class2 with value of 'handled'. | ||
946 | */ | ||
947 | POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask); | ||
948 | |||
949 | local_irq_save(flags); | ||
950 | out_be64(&priv1->int_stat_class0_RW, ~(0UL)); | ||
951 | out_be64(&priv1->int_stat_class2_RW, ~(0UL)); | ||
952 | local_irq_restore(flags); | ||
953 | } | ||
954 | |||
955 | static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu) | ||
956 | { | ||
957 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
958 | struct spu_problem __iomem *prob = spu->problem; | ||
959 | unsigned long flags; | ||
960 | |||
961 | /* Save, Step 57: | ||
962 | * Restore, Step 40. | ||
963 | * Poll until SPU_Status[R]=0 or wait for SPU Class 0 | ||
964 | * or SPU Class 2 interrupt. Write INT_Stat_class0 | ||
965 | * or INT_Stat_class2 with value of handled. | ||
966 | */ | ||
967 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); | ||
968 | |||
969 | local_irq_save(flags); | ||
970 | out_be64(&priv1->int_stat_class0_RW, ~(0UL)); | ||
971 | out_be64(&priv1->int_stat_class2_RW, ~(0UL)); | ||
972 | local_irq_restore(flags); | ||
973 | } | ||
974 | |||
975 | static inline int check_save_status(struct spu_state *csa, struct spu *spu) | ||
976 | { | ||
977 | struct spu_problem __iomem *prob = spu->problem; | ||
978 | u32 complete; | ||
979 | |||
980 | /* Save, Step 54: | ||
981 | * If SPU_Status[P]=1 and SPU_Status[SC] = "success", | ||
982 | * context save succeeded, otherwise context save | ||
983 | * failed. | ||
984 | */ | ||
985 | complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) | | ||
986 | SPU_STATUS_STOPPED_BY_STOP); | ||
987 | return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0; | ||
988 | } | ||
989 | |||
990 | static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu) | ||
991 | { | ||
992 | /* Restore, Step 4: | ||
993 | * If required, notify the "using application" that | ||
994 | * the SPU task has been terminated. TBD. | ||
995 | */ | ||
996 | } | ||
997 | |||
998 | static inline void suspend_mfc(struct spu_state *csa, struct spu *spu) | ||
999 | { | ||
1000 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1001 | |||
1002 | /* Restore, Step 7: | ||
1003 | * Restore, Step 47. | ||
1004 | * Write MFC_Cntl[Dh,Sc]='1','1' to suspend | ||
1005 | * the queue and halt the decrementer. | ||
1006 | */ | ||
1007 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE | | ||
1008 | MFC_CNTL_DECREMENTER_HALTED); | ||
1009 | eieio(); | ||
1010 | } | ||
1011 | |||
1012 | static inline void wait_suspend_mfc_complete(struct spu_state *csa, | ||
1013 | struct spu *spu) | ||
1014 | { | ||
1015 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1016 | |||
1017 | /* Restore, Step 8: | ||
1018 | * Restore, Step 47. | ||
1019 | * Poll MFC_CNTL[Ss] until 11 is returned. | ||
1020 | */ | ||
1021 | POLL_WHILE_FALSE(in_be64(&priv2->mfc_control_RW) & | ||
1022 | MFC_CNTL_SUSPEND_COMPLETE); | ||
1023 | } | ||
1024 | |||
1025 | static inline int suspend_spe(struct spu_state *csa, struct spu *spu) | ||
1026 | { | ||
1027 | struct spu_problem __iomem *prob = spu->problem; | ||
1028 | |||
1029 | /* Restore, Step 9: | ||
1030 | * If SPU_Status[R]=1, stop SPU execution | ||
1031 | * and wait for stop to complete. | ||
1032 | * | ||
1033 | * Returns 1 if SPU_Status[R]=1 on entry. | ||
1034 | * 0 otherwise | ||
1035 | */ | ||
1036 | if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) { | ||
1037 | if (in_be32(&prob->spu_status_R) & | ||
1038 | SPU_STATUS_ISOLATED_EXIT_STAUTUS) { | ||
1039 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & | ||
1040 | SPU_STATUS_RUNNING); | ||
1041 | } | ||
1042 | if ((in_be32(&prob->spu_status_R) & | ||
1043 | SPU_STATUS_ISOLATED_LOAD_STAUTUS) | ||
1044 | || (in_be32(&prob->spu_status_R) & | ||
1045 | SPU_STATUS_ISOLATED_STATE)) { | ||
1046 | out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); | ||
1047 | eieio(); | ||
1048 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & | ||
1049 | SPU_STATUS_RUNNING); | ||
1050 | out_be32(&prob->spu_runcntl_RW, 0x2); | ||
1051 | eieio(); | ||
1052 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & | ||
1053 | SPU_STATUS_RUNNING); | ||
1054 | } | ||
1055 | if (in_be32(&prob->spu_status_R) & | ||
1056 | SPU_STATUS_WAITING_FOR_CHANNEL) { | ||
1057 | out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); | ||
1058 | eieio(); | ||
1059 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & | ||
1060 | SPU_STATUS_RUNNING); | ||
1061 | } | ||
1062 | return 1; | ||
1063 | } | ||
1064 | return 0; | ||
1065 | } | ||
1066 | |||
1067 | static inline void clear_spu_status(struct spu_state *csa, struct spu *spu) | ||
1068 | { | ||
1069 | struct spu_problem __iomem *prob = spu->problem; | ||
1070 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
1071 | |||
1072 | /* Restore, Step 10: | ||
1073 | * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1, | ||
1074 | * release SPU from isolate state. | ||
1075 | */ | ||
1076 | if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) { | ||
1077 | if (in_be32(&prob->spu_status_R) & | ||
1078 | SPU_STATUS_ISOLATED_EXIT_STAUTUS) { | ||
1079 | out_be64(&priv1->mfc_sr1_RW, | ||
1080 | MFC_STATE1_MASTER_RUN_CONTROL_MASK); | ||
1081 | eieio(); | ||
1082 | out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); | ||
1083 | eieio(); | ||
1084 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & | ||
1085 | SPU_STATUS_RUNNING); | ||
1086 | } | ||
1087 | if ((in_be32(&prob->spu_status_R) & | ||
1088 | SPU_STATUS_ISOLATED_LOAD_STAUTUS) | ||
1089 | || (in_be32(&prob->spu_status_R) & | ||
1090 | SPU_STATUS_ISOLATED_STATE)) { | ||
1091 | out_be64(&priv1->mfc_sr1_RW, | ||
1092 | MFC_STATE1_MASTER_RUN_CONTROL_MASK); | ||
1093 | eieio(); | ||
1094 | out_be32(&prob->spu_runcntl_RW, 0x2); | ||
1095 | eieio(); | ||
1096 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & | ||
1097 | SPU_STATUS_RUNNING); | ||
1098 | } | ||
1099 | } | ||
1100 | } | ||
1101 | |||
1102 | static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu) | ||
1103 | { | ||
1104 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1105 | u64 ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL }; | ||
1106 | u64 idx; | ||
1107 | int i; | ||
1108 | |||
1109 | /* Restore, Step 20: | ||
1110 | * Reset the following CH: [0,1,3,4,24,25,27] | ||
1111 | */ | ||
1112 | for (i = 0; i < 7; i++) { | ||
1113 | idx = ch_indices[i]; | ||
1114 | out_be64(&priv2->spu_chnlcntptr_RW, idx); | ||
1115 | eieio(); | ||
1116 | out_be64(&priv2->spu_chnldata_RW, 0UL); | ||
1117 | out_be64(&priv2->spu_chnlcnt_RW, 0UL); | ||
1118 | eieio(); | ||
1119 | } | ||
1120 | } | ||
1121 | |||
1122 | static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu) | ||
1123 | { | ||
1124 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1125 | u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL }; | ||
1126 | u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL }; | ||
1127 | u64 idx; | ||
1128 | int i; | ||
1129 | |||
1130 | /* Restore, Step 21: | ||
1131 | * Reset the following CH: [21, 23, 28, 29, 30] | ||
1132 | */ | ||
1133 | for (i = 0; i < 5; i++) { | ||
1134 | idx = ch_indices[i]; | ||
1135 | out_be64(&priv2->spu_chnlcntptr_RW, idx); | ||
1136 | eieio(); | ||
1137 | out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); | ||
1138 | eieio(); | ||
1139 | } | ||
1140 | } | ||
1141 | |||
1142 | static inline void setup_spu_status_part1(struct spu_state *csa, | ||
1143 | struct spu *spu) | ||
1144 | { | ||
1145 | u32 status_P = SPU_STATUS_STOPPED_BY_STOP; | ||
1146 | u32 status_I = SPU_STATUS_INVALID_INSTR; | ||
1147 | u32 status_H = SPU_STATUS_STOPPED_BY_HALT; | ||
1148 | u32 status_S = SPU_STATUS_SINGLE_STEP; | ||
1149 | u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR; | ||
1150 | u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP; | ||
1151 | u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP; | ||
1152 | u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR; | ||
1153 | u32 status_code; | ||
1154 | |||
1155 | /* Restore, Step 27: | ||
1156 | * If the CSA.SPU_Status[I,S,H,P]=1 then add the correct | ||
1157 | * instruction sequence to the end of the SPU based restore | ||
1158 | * code (after the "context restored" stop and signal) to | ||
1159 | * restore the correct SPU status. | ||
1160 | * | ||
1161 | * NOTE: Rather than modifying the SPU executable, we | ||
1162 | * instead add a new 'stopped_status' field to the | ||
1163 | * LSCSA. The SPU-side restore reads this field and | ||
1164 | * takes the appropriate action when exiting. | ||
1165 | */ | ||
1166 | |||
1167 | status_code = | ||
1168 | (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF; | ||
1169 | if ((csa->prob.spu_status_R & status_P_I) == status_P_I) { | ||
1170 | |||
1171 | /* SPU_Status[P,I]=1 - Illegal Instruction followed | ||
1172 | * by Stop and Signal instruction, followed by 'br -4'. | ||
1173 | * | ||
1174 | */ | ||
1175 | csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I; | ||
1176 | csa->lscsa->stopped_status.slot[1] = status_code; | ||
1177 | |||
1178 | } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) { | ||
1179 | |||
1180 | /* SPU_Status[P,H]=1 - Halt Conditional, followed | ||
1181 | * by Stop and Signal instruction, followed by | ||
1182 | * 'br -4'. | ||
1183 | */ | ||
1184 | csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H; | ||
1185 | csa->lscsa->stopped_status.slot[1] = status_code; | ||
1186 | |||
1187 | } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) { | ||
1188 | |||
1189 | /* SPU_Status[S,P]=1 - Stop and Signal instruction | ||
1190 | * followed by 'br -4'. | ||
1191 | */ | ||
1192 | csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P; | ||
1193 | csa->lscsa->stopped_status.slot[1] = status_code; | ||
1194 | |||
1195 | } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) { | ||
1196 | |||
1197 | /* SPU_Status[S,I]=1 - Illegal instruction followed | ||
1198 | * by 'br -4'. | ||
1199 | */ | ||
1200 | csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I; | ||
1201 | csa->lscsa->stopped_status.slot[1] = status_code; | ||
1202 | |||
1203 | } else if ((csa->prob.spu_status_R & status_P) == status_P) { | ||
1204 | |||
1205 | /* SPU_Status[P]=1 - Stop and Signal instruction | ||
1206 | * followed by 'br -4'. | ||
1207 | */ | ||
1208 | csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P; | ||
1209 | csa->lscsa->stopped_status.slot[1] = status_code; | ||
1210 | |||
1211 | } else if ((csa->prob.spu_status_R & status_H) == status_H) { | ||
1212 | |||
1213 | /* SPU_Status[H]=1 - Halt Conditional, followed | ||
1214 | * by 'br -4'. | ||
1215 | */ | ||
1216 | csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H; | ||
1217 | |||
1218 | } else if ((csa->prob.spu_status_R & status_S) == status_S) { | ||
1219 | |||
1220 | /* SPU_Status[S]=1 - Two nop instructions. | ||
1221 | */ | ||
1222 | csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S; | ||
1223 | |||
1224 | } else if ((csa->prob.spu_status_R & status_I) == status_I) { | ||
1225 | |||
1226 | /* SPU_Status[I]=1 - Illegal instruction followed | ||
1227 | * by 'br -4'. | ||
1228 | */ | ||
1229 | csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I; | ||
1230 | |||
1231 | } | ||
1232 | } | ||
1233 | |||
1234 | static inline void setup_spu_status_part2(struct spu_state *csa, | ||
1235 | struct spu *spu) | ||
1236 | { | ||
1237 | u32 mask; | ||
1238 | |||
1239 | /* Restore, Step 28: | ||
1240 | * If the CSA.SPU_Status[I,S,H,P,R]=0 then | ||
1241 | * add a 'br *' instruction to the end of | ||
1242 | * the SPU based restore code. | ||
1243 | * | ||
1244 | * NOTE: Rather than modifying the SPU executable, we | ||
1245 | * instead add a new 'stopped_status' field to the | ||
1246 | * LSCSA. The SPU-side restore reads this field and | ||
1247 | * takes the appropriate action when exiting. | ||
1248 | */ | ||
1249 | mask = SPU_STATUS_INVALID_INSTR | | ||
1250 | SPU_STATUS_SINGLE_STEP | | ||
1251 | SPU_STATUS_STOPPED_BY_HALT | | ||
1252 | SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING; | ||
1253 | if (!(csa->prob.spu_status_R & mask)) { | ||
1254 | csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R; | ||
1255 | } | ||
1256 | } | ||
1257 | |||
1258 | static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu) | ||
1259 | { | ||
1260 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
1261 | |||
1262 | /* Restore, Step 29: | ||
1263 | * Restore RA_GROUP_ID register and the | ||
1264 | * RA_ENABLE reigster from the CSA. | ||
1265 | */ | ||
1266 | out_be64(&priv1->resource_allocation_groupID_RW, | ||
1267 | csa->priv1.resource_allocation_groupID_RW); | ||
1268 | out_be64(&priv1->resource_allocation_enable_RW, | ||
1269 | csa->priv1.resource_allocation_enable_RW); | ||
1270 | } | ||
1271 | |||
1272 | static inline void send_restore_code(struct spu_state *csa, struct spu *spu) | ||
1273 | { | ||
1274 | unsigned long addr = (unsigned long)&spu_restore_code[0]; | ||
1275 | unsigned int ls_offset = 0x0; | ||
1276 | unsigned int size = sizeof(spu_restore_code); | ||
1277 | unsigned int tag = 0; | ||
1278 | unsigned int rclass = 0; | ||
1279 | unsigned int cmd = MFC_GETFS_CMD; | ||
1280 | |||
1281 | /* Restore, Step 37: | ||
1282 | * Issue MFC DMA command to copy context | ||
1283 | * restore code to local storage. | ||
1284 | */ | ||
1285 | send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); | ||
1286 | } | ||
1287 | |||
1288 | static inline void setup_decr(struct spu_state *csa, struct spu *spu) | ||
1289 | { | ||
1290 | /* Restore, Step 34: | ||
1291 | * If CSA.MFC_CNTL[Ds]=1 (decrementer was | ||
1292 | * running) then adjust decrementer, set | ||
1293 | * decrementer running status in LSCSA, | ||
1294 | * and set decrementer "wrapped" status | ||
1295 | * in LSCSA. | ||
1296 | */ | ||
1297 | if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) { | ||
1298 | cycles_t resume_time = get_cycles(); | ||
1299 | cycles_t delta_time = resume_time - csa->suspend_time; | ||
1300 | |||
1301 | csa->lscsa->decr.slot[0] = delta_time; | ||
1302 | } | ||
1303 | } | ||
1304 | |||
1305 | static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu) | ||
1306 | { | ||
1307 | /* Restore, Step 35: | ||
1308 | * Copy the CSA.PU_MB data into the LSCSA. | ||
1309 | */ | ||
1310 | csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R; | ||
1311 | } | ||
1312 | |||
1313 | static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu) | ||
1314 | { | ||
1315 | /* Restore, Step 36: | ||
1316 | * Copy the CSA.PUINT_MB data into the LSCSA. | ||
1317 | */ | ||
1318 | csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R; | ||
1319 | } | ||
1320 | |||
1321 | static inline int check_restore_status(struct spu_state *csa, struct spu *spu) | ||
1322 | { | ||
1323 | struct spu_problem __iomem *prob = spu->problem; | ||
1324 | u32 complete; | ||
1325 | |||
1326 | /* Restore, Step 40: | ||
1327 | * If SPU_Status[P]=1 and SPU_Status[SC] = "success", | ||
1328 | * context restore succeeded, otherwise context restore | ||
1329 | * failed. | ||
1330 | */ | ||
1331 | complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) | | ||
1332 | SPU_STATUS_STOPPED_BY_STOP); | ||
1333 | return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0; | ||
1334 | } | ||
1335 | |||
1336 | static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu) | ||
1337 | { | ||
1338 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1339 | |||
1340 | /* Restore, Step 41: | ||
1341 | * Restore SPU_PrivCntl from the CSA. | ||
1342 | */ | ||
1343 | out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW); | ||
1344 | eieio(); | ||
1345 | } | ||
1346 | |||
1347 | static inline void restore_status_part1(struct spu_state *csa, struct spu *spu) | ||
1348 | { | ||
1349 | struct spu_problem __iomem *prob = spu->problem; | ||
1350 | u32 mask; | ||
1351 | |||
1352 | /* Restore, Step 42: | ||
1353 | * If any CSA.SPU_Status[I,S,H,P]=1, then | ||
1354 | * restore the error or single step state. | ||
1355 | */ | ||
1356 | mask = SPU_STATUS_INVALID_INSTR | | ||
1357 | SPU_STATUS_SINGLE_STEP | | ||
1358 | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; | ||
1359 | if (csa->prob.spu_status_R & mask) { | ||
1360 | out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); | ||
1361 | eieio(); | ||
1362 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & | ||
1363 | SPU_STATUS_RUNNING); | ||
1364 | } | ||
1365 | } | ||
1366 | |||
1367 | static inline void restore_status_part2(struct spu_state *csa, struct spu *spu) | ||
1368 | { | ||
1369 | struct spu_problem __iomem *prob = spu->problem; | ||
1370 | u32 mask; | ||
1371 | |||
1372 | /* Restore, Step 43: | ||
1373 | * If all CSA.SPU_Status[I,S,H,P,R]=0 then write | ||
1374 | * SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1, | ||
1375 | * then write '00' to SPU_RunCntl[R0R1] and wait | ||
1376 | * for SPU_Status[R]=0. | ||
1377 | */ | ||
1378 | mask = SPU_STATUS_INVALID_INSTR | | ||
1379 | SPU_STATUS_SINGLE_STEP | | ||
1380 | SPU_STATUS_STOPPED_BY_HALT | | ||
1381 | SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING; | ||
1382 | if (!(csa->prob.spu_status_R & mask)) { | ||
1383 | out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); | ||
1384 | eieio(); | ||
1385 | POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) & | ||
1386 | SPU_STATUS_RUNNING); | ||
1387 | out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); | ||
1388 | eieio(); | ||
1389 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & | ||
1390 | SPU_STATUS_RUNNING); | ||
1391 | } | ||
1392 | } | ||
1393 | |||
1394 | static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu) | ||
1395 | { | ||
1396 | unsigned long addr = (unsigned long)&csa->lscsa->ls[0]; | ||
1397 | unsigned int ls_offset = 0x0; | ||
1398 | unsigned int size = 16384; | ||
1399 | unsigned int tag = 0; | ||
1400 | unsigned int rclass = 0; | ||
1401 | unsigned int cmd = MFC_GET_CMD; | ||
1402 | |||
1403 | /* Restore, Step 44: | ||
1404 | * Issue a DMA command to restore the first | ||
1405 | * 16kb of local storage from CSA. | ||
1406 | */ | ||
1407 | send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); | ||
1408 | } | ||
1409 | |||
1410 | static inline void clear_interrupts(struct spu_state *csa, struct spu *spu) | ||
1411 | { | ||
1412 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
1413 | |||
1414 | /* Restore, Step 49: | ||
1415 | * Write INT_MASK_class0 with value of 0. | ||
1416 | * Write INT_MASK_class1 with value of 0. | ||
1417 | * Write INT_MASK_class2 with value of 0. | ||
1418 | * Write INT_STAT_class0 with value of -1. | ||
1419 | * Write INT_STAT_class1 with value of -1. | ||
1420 | * Write INT_STAT_class2 with value of -1. | ||
1421 | */ | ||
1422 | spin_lock_irq(&spu->register_lock); | ||
1423 | out_be64(&priv1->int_mask_class0_RW, 0UL); | ||
1424 | out_be64(&priv1->int_mask_class1_RW, 0UL); | ||
1425 | out_be64(&priv1->int_mask_class2_RW, 0UL); | ||
1426 | out_be64(&priv1->int_stat_class0_RW, ~(0UL)); | ||
1427 | out_be64(&priv1->int_stat_class1_RW, ~(0UL)); | ||
1428 | out_be64(&priv1->int_stat_class2_RW, ~(0UL)); | ||
1429 | spin_unlock_irq(&spu->register_lock); | ||
1430 | } | ||
1431 | |||
1432 | static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu) | ||
1433 | { | ||
1434 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1435 | int i; | ||
1436 | |||
1437 | /* Restore, Step 50: | ||
1438 | * If MFC_Cntl[Se]!=0 then restore | ||
1439 | * MFC command queues. | ||
1440 | */ | ||
1441 | if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) { | ||
1442 | for (i = 0; i < 8; i++) { | ||
1443 | out_be64(&priv2->puq[i].mfc_cq_data0_RW, | ||
1444 | csa->priv2.puq[i].mfc_cq_data0_RW); | ||
1445 | out_be64(&priv2->puq[i].mfc_cq_data1_RW, | ||
1446 | csa->priv2.puq[i].mfc_cq_data1_RW); | ||
1447 | out_be64(&priv2->puq[i].mfc_cq_data2_RW, | ||
1448 | csa->priv2.puq[i].mfc_cq_data2_RW); | ||
1449 | out_be64(&priv2->puq[i].mfc_cq_data3_RW, | ||
1450 | csa->priv2.puq[i].mfc_cq_data3_RW); | ||
1451 | } | ||
1452 | for (i = 0; i < 16; i++) { | ||
1453 | out_be64(&priv2->spuq[i].mfc_cq_data0_RW, | ||
1454 | csa->priv2.spuq[i].mfc_cq_data0_RW); | ||
1455 | out_be64(&priv2->spuq[i].mfc_cq_data1_RW, | ||
1456 | csa->priv2.spuq[i].mfc_cq_data1_RW); | ||
1457 | out_be64(&priv2->spuq[i].mfc_cq_data2_RW, | ||
1458 | csa->priv2.spuq[i].mfc_cq_data2_RW); | ||
1459 | out_be64(&priv2->spuq[i].mfc_cq_data3_RW, | ||
1460 | csa->priv2.spuq[i].mfc_cq_data3_RW); | ||
1461 | } | ||
1462 | } | ||
1463 | eieio(); | ||
1464 | } | ||
1465 | |||
1466 | static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu) | ||
1467 | { | ||
1468 | struct spu_problem __iomem *prob = spu->problem; | ||
1469 | |||
1470 | /* Restore, Step 51: | ||
1471 | * Restore the PPU_QueryMask register from CSA. | ||
1472 | */ | ||
1473 | out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW); | ||
1474 | eieio(); | ||
1475 | } | ||
1476 | |||
1477 | static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu) | ||
1478 | { | ||
1479 | struct spu_problem __iomem *prob = spu->problem; | ||
1480 | |||
1481 | /* Restore, Step 52: | ||
1482 | * Restore the PPU_QueryType register from CSA. | ||
1483 | */ | ||
1484 | out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW); | ||
1485 | eieio(); | ||
1486 | } | ||
1487 | |||
1488 | static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu) | ||
1489 | { | ||
1490 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1491 | |||
1492 | /* Restore, Step 53: | ||
1493 | * Restore the MFC_CSR_TSQ register from CSA. | ||
1494 | */ | ||
1495 | out_be64(&priv2->spu_tag_status_query_RW, | ||
1496 | csa->priv2.spu_tag_status_query_RW); | ||
1497 | eieio(); | ||
1498 | } | ||
1499 | |||
1500 | static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu) | ||
1501 | { | ||
1502 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1503 | |||
1504 | /* Restore, Step 54: | ||
1505 | * Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2 | ||
1506 | * registers from CSA. | ||
1507 | */ | ||
1508 | out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW); | ||
1509 | out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW); | ||
1510 | eieio(); | ||
1511 | } | ||
1512 | |||
1513 | static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu) | ||
1514 | { | ||
1515 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1516 | |||
1517 | /* Restore, Step 55: | ||
1518 | * Restore the MFC_CSR_ATO register from CSA. | ||
1519 | */ | ||
1520 | out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW); | ||
1521 | } | ||
1522 | |||
1523 | static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu) | ||
1524 | { | ||
1525 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
1526 | |||
1527 | /* Restore, Step 56: | ||
1528 | * Restore the MFC_TCLASS_ID register from CSA. | ||
1529 | */ | ||
1530 | out_be64(&priv1->mfc_tclass_id_RW, csa->priv1.mfc_tclass_id_RW); | ||
1531 | eieio(); | ||
1532 | } | ||
1533 | |||
1534 | static inline void set_llr_event(struct spu_state *csa, struct spu *spu) | ||
1535 | { | ||
1536 | u64 ch0_cnt, ch0_data; | ||
1537 | u64 ch1_data; | ||
1538 | |||
1539 | /* Restore, Step 57: | ||
1540 | * Set the Lock Line Reservation Lost Event by: | ||
1541 | * 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1. | ||
1542 | * 2. If CSA.SPU_Channel_0_Count=0 and | ||
1543 | * CSA.SPU_Wr_Event_Mask[Lr]=1 and | ||
1544 | * CSA.SPU_Event_Status[Lr]=0 then set | ||
1545 | * CSA.SPU_Event_Status_Count=1. | ||
1546 | */ | ||
1547 | ch0_cnt = csa->spu_chnlcnt_RW[0]; | ||
1548 | ch0_data = csa->spu_chnldata_RW[0]; | ||
1549 | ch1_data = csa->spu_chnldata_RW[1]; | ||
1550 | csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT; | ||
1551 | if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) && | ||
1552 | (ch1_data & MFC_LLR_LOST_EVENT)) { | ||
1553 | csa->spu_chnlcnt_RW[0] = 1; | ||
1554 | } | ||
1555 | } | ||
1556 | |||
1557 | static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu) | ||
1558 | { | ||
1559 | /* Restore, Step 58: | ||
1560 | * If the status of the CSA software decrementer | ||
1561 | * "wrapped" flag is set, OR in a '1' to | ||
1562 | * CSA.SPU_Event_Status[Tm]. | ||
1563 | */ | ||
1564 | if (csa->lscsa->decr_status.slot[0] == 1) { | ||
1565 | csa->spu_chnldata_RW[0] |= 0x20; | ||
1566 | } | ||
1567 | if ((csa->lscsa->decr_status.slot[0] == 1) && | ||
1568 | (csa->spu_chnlcnt_RW[0] == 0 && | ||
1569 | ((csa->spu_chnldata_RW[2] & 0x20) == 0x0) && | ||
1570 | ((csa->spu_chnldata_RW[0] & 0x20) != 0x1))) { | ||
1571 | csa->spu_chnlcnt_RW[0] = 1; | ||
1572 | } | ||
1573 | } | ||
1574 | |||
1575 | static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu) | ||
1576 | { | ||
1577 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1578 | u64 idx, ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL }; | ||
1579 | int i; | ||
1580 | |||
1581 | /* Restore, Step 59: | ||
1582 | * Restore the following CH: [0,1,3,4,24,25,27] | ||
1583 | */ | ||
1584 | for (i = 0; i < 7; i++) { | ||
1585 | idx = ch_indices[i]; | ||
1586 | out_be64(&priv2->spu_chnlcntptr_RW, idx); | ||
1587 | eieio(); | ||
1588 | out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]); | ||
1589 | out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]); | ||
1590 | eieio(); | ||
1591 | } | ||
1592 | } | ||
1593 | |||
1594 | static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu) | ||
1595 | { | ||
1596 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1597 | u64 ch_indices[3] = { 9UL, 21UL, 23UL }; | ||
1598 | u64 ch_counts[3] = { 1UL, 16UL, 1UL }; | ||
1599 | u64 idx; | ||
1600 | int i; | ||
1601 | |||
1602 | /* Restore, Step 60: | ||
1603 | * Restore the following CH: [9,21,23]. | ||
1604 | */ | ||
1605 | ch_counts[0] = 1UL; | ||
1606 | ch_counts[1] = csa->spu_chnlcnt_RW[21]; | ||
1607 | ch_counts[2] = 1UL; | ||
1608 | for (i = 0; i < 3; i++) { | ||
1609 | idx = ch_indices[i]; | ||
1610 | out_be64(&priv2->spu_chnlcntptr_RW, idx); | ||
1611 | eieio(); | ||
1612 | out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); | ||
1613 | eieio(); | ||
1614 | } | ||
1615 | } | ||
1616 | |||
1617 | static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu) | ||
1618 | { | ||
1619 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1620 | |||
1621 | /* Restore, Step 61: | ||
1622 | * Restore the SPU_LSLR register from CSA. | ||
1623 | */ | ||
1624 | out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW); | ||
1625 | eieio(); | ||
1626 | } | ||
1627 | |||
1628 | static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu) | ||
1629 | { | ||
1630 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1631 | |||
1632 | /* Restore, Step 62: | ||
1633 | * Restore the SPU_Cfg register from CSA. | ||
1634 | */ | ||
1635 | out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW); | ||
1636 | eieio(); | ||
1637 | } | ||
1638 | |||
1639 | static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu) | ||
1640 | { | ||
1641 | /* Restore, Step 63: | ||
1642 | * Restore PM_Trace_Tag_Wait_Mask from CSA. | ||
1643 | * Not performed by this implementation. | ||
1644 | */ | ||
1645 | } | ||
1646 | |||
1647 | static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu) | ||
1648 | { | ||
1649 | struct spu_problem __iomem *prob = spu->problem; | ||
1650 | |||
1651 | /* Restore, Step 64: | ||
1652 | * Restore SPU_NPC from CSA. | ||
1653 | */ | ||
1654 | out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW); | ||
1655 | eieio(); | ||
1656 | } | ||
1657 | |||
1658 | static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu) | ||
1659 | { | ||
1660 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1661 | int i; | ||
1662 | |||
1663 | /* Restore, Step 65: | ||
1664 | * Restore MFC_RdSPU_MB from CSA. | ||
1665 | */ | ||
1666 | out_be64(&priv2->spu_chnlcntptr_RW, 29UL); | ||
1667 | eieio(); | ||
1668 | out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]); | ||
1669 | for (i = 0; i < 4; i++) { | ||
1670 | out_be64(&priv2->spu_chnldata_RW, csa->pu_mailbox_data[i]); | ||
1671 | } | ||
1672 | eieio(); | ||
1673 | } | ||
1674 | |||
1675 | static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu) | ||
1676 | { | ||
1677 | struct spu_problem __iomem *prob = spu->problem; | ||
1678 | u32 dummy = 0; | ||
1679 | |||
1680 | /* Restore, Step 66: | ||
1681 | * If CSA.MB_Stat[P]=0 (mailbox empty) then | ||
1682 | * read from the PPU_MB register. | ||
1683 | */ | ||
1684 | if ((csa->prob.mb_stat_R & 0xFF) == 0) { | ||
1685 | dummy = in_be32(&prob->pu_mb_R); | ||
1686 | eieio(); | ||
1687 | } | ||
1688 | } | ||
1689 | |||
1690 | static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu) | ||
1691 | { | ||
1692 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
1693 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1694 | u64 dummy = 0UL; | ||
1695 | |||
1696 | /* Restore, Step 66: | ||
1697 | * If CSA.MB_Stat[I]=0 (mailbox empty) then | ||
1698 | * read from the PPUINT_MB register. | ||
1699 | */ | ||
1700 | if ((csa->prob.mb_stat_R & 0xFF0000) == 0) { | ||
1701 | dummy = in_be64(&priv2->puint_mb_R); | ||
1702 | eieio(); | ||
1703 | out_be64(&priv1->int_stat_class2_RW, | ||
1704 | CLASS2_ENABLE_MAILBOX_INTR); | ||
1705 | eieio(); | ||
1706 | } | ||
1707 | } | ||
1708 | |||
1709 | static inline void restore_mfc_slbs(struct spu_state *csa, struct spu *spu) | ||
1710 | { | ||
1711 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1712 | int i; | ||
1713 | |||
1714 | /* Restore, Step 68: | ||
1715 | * If MFC_SR1[R]='1', restore SLBs from CSA. | ||
1716 | */ | ||
1717 | if (csa->priv1.mfc_sr1_RW & MFC_STATE1_RELOCATE_MASK) { | ||
1718 | for (i = 0; i < 8; i++) { | ||
1719 | out_be64(&priv2->slb_index_W, i); | ||
1720 | eieio(); | ||
1721 | out_be64(&priv2->slb_esid_RW, csa->slb_esid_RW[i]); | ||
1722 | out_be64(&priv2->slb_vsid_RW, csa->slb_vsid_RW[i]); | ||
1723 | eieio(); | ||
1724 | } | ||
1725 | out_be64(&priv2->slb_index_W, csa->priv2.slb_index_W); | ||
1726 | eieio(); | ||
1727 | } | ||
1728 | } | ||
1729 | |||
1730 | static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu) | ||
1731 | { | ||
1732 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
1733 | |||
1734 | /* Restore, Step 69: | ||
1735 | * Restore the MFC_SR1 register from CSA. | ||
1736 | */ | ||
1737 | out_be64(&priv1->mfc_sr1_RW, csa->priv1.mfc_sr1_RW); | ||
1738 | eieio(); | ||
1739 | } | ||
1740 | |||
1741 | static inline void restore_other_spu_access(struct spu_state *csa, | ||
1742 | struct spu *spu) | ||
1743 | { | ||
1744 | /* Restore, Step 70: | ||
1745 | * Restore other SPU mappings to this SPU. TBD. | ||
1746 | */ | ||
1747 | } | ||
1748 | |||
1749 | static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu) | ||
1750 | { | ||
1751 | struct spu_problem __iomem *prob = spu->problem; | ||
1752 | |||
1753 | /* Restore, Step 71: | ||
1754 | * If CSA.SPU_Status[R]=1 then write | ||
1755 | * SPU_RunCntl[R0R1]='01'. | ||
1756 | */ | ||
1757 | if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) { | ||
1758 | out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); | ||
1759 | eieio(); | ||
1760 | } | ||
1761 | } | ||
1762 | |||
1763 | static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu) | ||
1764 | { | ||
1765 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1766 | |||
1767 | /* Restore, Step 72: | ||
1768 | * Restore the MFC_CNTL register for the CSA. | ||
1769 | */ | ||
1770 | out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW); | ||
1771 | eieio(); | ||
1772 | } | ||
1773 | |||
1774 | static inline void enable_user_access(struct spu_state *csa, struct spu *spu) | ||
1775 | { | ||
1776 | /* Restore, Step 73: | ||
1777 | * Enable user-space access (if provided) to this | ||
1778 | * SPU by mapping the virtual pages assigned to | ||
1779 | * the SPU memory-mapped I/O (MMIO) for problem | ||
1780 | * state. TBD. | ||
1781 | */ | ||
1782 | } | ||
1783 | |||
1784 | static inline void reset_switch_active(struct spu_state *csa, struct spu *spu) | ||
1785 | { | ||
1786 | /* Restore, Step 74: | ||
1787 | * Reset the "context switch active" flag. | ||
1788 | */ | ||
1789 | clear_bit(SPU_CONTEXT_SWITCH_ACTIVE_nr, &spu->flags); | ||
1790 | mb(); | ||
1791 | } | ||
1792 | |||
1793 | static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu) | ||
1794 | { | ||
1795 | struct spu_priv1 __iomem *priv1 = spu->priv1; | ||
1796 | |||
1797 | /* Restore, Step 75: | ||
1798 | * Re-enable SPU interrupts. | ||
1799 | */ | ||
1800 | spin_lock_irq(&spu->register_lock); | ||
1801 | out_be64(&priv1->int_mask_class0_RW, csa->priv1.int_mask_class0_RW); | ||
1802 | out_be64(&priv1->int_mask_class1_RW, csa->priv1.int_mask_class1_RW); | ||
1803 | out_be64(&priv1->int_mask_class2_RW, csa->priv1.int_mask_class2_RW); | ||
1804 | spin_unlock_irq(&spu->register_lock); | ||
1805 | } | ||
1806 | |||
1807 | static int quiece_spu(struct spu_state *prev, struct spu *spu) | ||
1808 | { | ||
1809 | /* | ||
1810 | * Combined steps 2-18 of SPU context save sequence, which | ||
1811 | * quiesce the SPU state (disable SPU execution, MFC command | ||
1812 | * queues, decrementer, SPU interrupts, etc.). | ||
1813 | * | ||
1814 | * Returns 0 on success. | ||
1815 | * 2 if failed step 2. | ||
1816 | * 6 if failed step 6. | ||
1817 | */ | ||
1818 | |||
1819 | if (check_spu_isolate(prev, spu)) { /* Step 2. */ | ||
1820 | return 2; | ||
1821 | } | ||
1822 | disable_interrupts(prev, spu); /* Step 3. */ | ||
1823 | set_watchdog_timer(prev, spu); /* Step 4. */ | ||
1824 | inhibit_user_access(prev, spu); /* Step 5. */ | ||
1825 | if (check_spu_isolate(prev, spu)) { /* Step 6. */ | ||
1826 | return 6; | ||
1827 | } | ||
1828 | set_switch_pending(prev, spu); /* Step 7. */ | ||
1829 | save_mfc_cntl(prev, spu); /* Step 8. */ | ||
1830 | save_spu_runcntl(prev, spu); /* Step 9. */ | ||
1831 | save_mfc_sr1(prev, spu); /* Step 10. */ | ||
1832 | save_spu_status(prev, spu); /* Step 11. */ | ||
1833 | save_mfc_decr(prev, spu); /* Step 12. */ | ||
1834 | halt_mfc_decr(prev, spu); /* Step 13. */ | ||
1835 | save_timebase(prev, spu); /* Step 14. */ | ||
1836 | remove_other_spu_access(prev, spu); /* Step 15. */ | ||
1837 | do_mfc_mssync(prev, spu); /* Step 16. */ | ||
1838 | issue_mfc_tlbie(prev, spu); /* Step 17. */ | ||
1839 | handle_pending_interrupts(prev, spu); /* Step 18. */ | ||
1840 | |||
1841 | return 0; | ||
1842 | } | ||
1843 | |||
1844 | static void save_csa(struct spu_state *prev, struct spu *spu) | ||
1845 | { | ||
1846 | /* | ||
1847 | * Combine steps 19-44 of SPU context save sequence, which | ||
1848 | * save regions of the privileged & problem state areas. | ||
1849 | */ | ||
1850 | |||
1851 | save_mfc_queues(prev, spu); /* Step 19. */ | ||
1852 | save_ppu_querymask(prev, spu); /* Step 20. */ | ||
1853 | save_ppu_querytype(prev, spu); /* Step 21. */ | ||
1854 | save_mfc_csr_tsq(prev, spu); /* Step 22. */ | ||
1855 | save_mfc_csr_cmd(prev, spu); /* Step 23. */ | ||
1856 | save_mfc_csr_ato(prev, spu); /* Step 24. */ | ||
1857 | save_mfc_tclass_id(prev, spu); /* Step 25. */ | ||
1858 | set_mfc_tclass_id(prev, spu); /* Step 26. */ | ||
1859 | purge_mfc_queue(prev, spu); /* Step 27. */ | ||
1860 | wait_purge_complete(prev, spu); /* Step 28. */ | ||
1861 | save_mfc_slbs(prev, spu); /* Step 29. */ | ||
1862 | setup_mfc_sr1(prev, spu); /* Step 30. */ | ||
1863 | save_spu_npc(prev, spu); /* Step 31. */ | ||
1864 | save_spu_privcntl(prev, spu); /* Step 32. */ | ||
1865 | reset_spu_privcntl(prev, spu); /* Step 33. */ | ||
1866 | save_spu_lslr(prev, spu); /* Step 34. */ | ||
1867 | reset_spu_lslr(prev, spu); /* Step 35. */ | ||
1868 | save_spu_cfg(prev, spu); /* Step 36. */ | ||
1869 | save_pm_trace(prev, spu); /* Step 37. */ | ||
1870 | save_mfc_rag(prev, spu); /* Step 38. */ | ||
1871 | save_ppu_mb_stat(prev, spu); /* Step 39. */ | ||
1872 | save_ppu_mb(prev, spu); /* Step 40. */ | ||
1873 | save_ppuint_mb(prev, spu); /* Step 41. */ | ||
1874 | save_ch_part1(prev, spu); /* Step 42. */ | ||
1875 | save_spu_mb(prev, spu); /* Step 43. */ | ||
1876 | save_mfc_cmd(prev, spu); /* Step 44. */ | ||
1877 | reset_ch(prev, spu); /* Step 45. */ | ||
1878 | } | ||
1879 | |||
1880 | static void save_lscsa(struct spu_state *prev, struct spu *spu) | ||
1881 | { | ||
1882 | /* | ||
1883 | * Perform steps 46-57 of SPU context save sequence, | ||
1884 | * which save regions of the local store and register | ||
1885 | * file. | ||
1886 | */ | ||
1887 | |||
1888 | resume_mfc_queue(prev, spu); /* Step 46. */ | ||
1889 | setup_mfc_slbs(prev, spu); /* Step 47. */ | ||
1890 | set_switch_active(prev, spu); /* Step 48. */ | ||
1891 | enable_interrupts(prev, spu); /* Step 49. */ | ||
1892 | save_ls_16kb(prev, spu); /* Step 50. */ | ||
1893 | set_spu_npc(prev, spu); /* Step 51. */ | ||
1894 | set_signot1(prev, spu); /* Step 52. */ | ||
1895 | set_signot2(prev, spu); /* Step 53. */ | ||
1896 | send_save_code(prev, spu); /* Step 54. */ | ||
1897 | set_ppu_querymask(prev, spu); /* Step 55. */ | ||
1898 | wait_tag_complete(prev, spu); /* Step 56. */ | ||
1899 | wait_spu_stopped(prev, spu); /* Step 57. */ | ||
1900 | } | ||
1901 | |||
1902 | static void harvest(struct spu_state *prev, struct spu *spu) | ||
1903 | { | ||
1904 | /* | ||
1905 | * Perform steps 2-25 of SPU context restore sequence, | ||
1906 | * which resets an SPU either after a failed save, or | ||
1907 | * when using SPU for first time. | ||
1908 | */ | ||
1909 | |||
1910 | disable_interrupts(prev, spu); /* Step 2. */ | ||
1911 | inhibit_user_access(prev, spu); /* Step 3. */ | ||
1912 | terminate_spu_app(prev, spu); /* Step 4. */ | ||
1913 | set_switch_pending(prev, spu); /* Step 5. */ | ||
1914 | remove_other_spu_access(prev, spu); /* Step 6. */ | ||
1915 | suspend_mfc(prev, spu); /* Step 7. */ | ||
1916 | wait_suspend_mfc_complete(prev, spu); /* Step 8. */ | ||
1917 | if (!suspend_spe(prev, spu)) /* Step 9. */ | ||
1918 | clear_spu_status(prev, spu); /* Step 10. */ | ||
1919 | do_mfc_mssync(prev, spu); /* Step 11. */ | ||
1920 | issue_mfc_tlbie(prev, spu); /* Step 12. */ | ||
1921 | handle_pending_interrupts(prev, spu); /* Step 13. */ | ||
1922 | purge_mfc_queue(prev, spu); /* Step 14. */ | ||
1923 | wait_purge_complete(prev, spu); /* Step 15. */ | ||
1924 | reset_spu_privcntl(prev, spu); /* Step 16. */ | ||
1925 | reset_spu_lslr(prev, spu); /* Step 17. */ | ||
1926 | setup_mfc_sr1(prev, spu); /* Step 18. */ | ||
1927 | invalidate_slbs(prev, spu); /* Step 19. */ | ||
1928 | reset_ch_part1(prev, spu); /* Step 20. */ | ||
1929 | reset_ch_part2(prev, spu); /* Step 21. */ | ||
1930 | enable_interrupts(prev, spu); /* Step 22. */ | ||
1931 | set_switch_active(prev, spu); /* Step 23. */ | ||
1932 | set_mfc_tclass_id(prev, spu); /* Step 24. */ | ||
1933 | resume_mfc_queue(prev, spu); /* Step 25. */ | ||
1934 | } | ||
1935 | |||
1936 | static void restore_lscsa(struct spu_state *next, struct spu *spu) | ||
1937 | { | ||
1938 | /* | ||
1939 | * Perform steps 26-40 of SPU context restore sequence, | ||
1940 | * which restores regions of the local store and register | ||
1941 | * file. | ||
1942 | */ | ||
1943 | |||
1944 | set_watchdog_timer(next, spu); /* Step 26. */ | ||
1945 | setup_spu_status_part1(next, spu); /* Step 27. */ | ||
1946 | setup_spu_status_part2(next, spu); /* Step 28. */ | ||
1947 | restore_mfc_rag(next, spu); /* Step 29. */ | ||
1948 | setup_mfc_slbs(next, spu); /* Step 30. */ | ||
1949 | set_spu_npc(next, spu); /* Step 31. */ | ||
1950 | set_signot1(next, spu); /* Step 32. */ | ||
1951 | set_signot2(next, spu); /* Step 33. */ | ||
1952 | setup_decr(next, spu); /* Step 34. */ | ||
1953 | setup_ppu_mb(next, spu); /* Step 35. */ | ||
1954 | setup_ppuint_mb(next, spu); /* Step 36. */ | ||
1955 | send_restore_code(next, spu); /* Step 37. */ | ||
1956 | set_ppu_querymask(next, spu); /* Step 38. */ | ||
1957 | wait_tag_complete(next, spu); /* Step 39. */ | ||
1958 | wait_spu_stopped(next, spu); /* Step 40. */ | ||
1959 | } | ||
1960 | |||
1961 | static void restore_csa(struct spu_state *next, struct spu *spu) | ||
1962 | { | ||
1963 | /* | ||
1964 | * Combine steps 41-76 of SPU context restore sequence, which | ||
1965 | * restore regions of the privileged & problem state areas. | ||
1966 | */ | ||
1967 | |||
1968 | restore_spu_privcntl(next, spu); /* Step 41. */ | ||
1969 | restore_status_part1(next, spu); /* Step 42. */ | ||
1970 | restore_status_part2(next, spu); /* Step 43. */ | ||
1971 | restore_ls_16kb(next, spu); /* Step 44. */ | ||
1972 | wait_tag_complete(next, spu); /* Step 45. */ | ||
1973 | suspend_mfc(next, spu); /* Step 46. */ | ||
1974 | wait_suspend_mfc_complete(next, spu); /* Step 47. */ | ||
1975 | issue_mfc_tlbie(next, spu); /* Step 48. */ | ||
1976 | clear_interrupts(next, spu); /* Step 49. */ | ||
1977 | restore_mfc_queues(next, spu); /* Step 50. */ | ||
1978 | restore_ppu_querymask(next, spu); /* Step 51. */ | ||
1979 | restore_ppu_querytype(next, spu); /* Step 52. */ | ||
1980 | restore_mfc_csr_tsq(next, spu); /* Step 53. */ | ||
1981 | restore_mfc_csr_cmd(next, spu); /* Step 54. */ | ||
1982 | restore_mfc_csr_ato(next, spu); /* Step 55. */ | ||
1983 | restore_mfc_tclass_id(next, spu); /* Step 56. */ | ||
1984 | set_llr_event(next, spu); /* Step 57. */ | ||
1985 | restore_decr_wrapped(next, spu); /* Step 58. */ | ||
1986 | restore_ch_part1(next, spu); /* Step 59. */ | ||
1987 | restore_ch_part2(next, spu); /* Step 60. */ | ||
1988 | restore_spu_lslr(next, spu); /* Step 61. */ | ||
1989 | restore_spu_cfg(next, spu); /* Step 62. */ | ||
1990 | restore_pm_trace(next, spu); /* Step 63. */ | ||
1991 | restore_spu_npc(next, spu); /* Step 64. */ | ||
1992 | restore_spu_mb(next, spu); /* Step 65. */ | ||
1993 | check_ppu_mb_stat(next, spu); /* Step 66. */ | ||
1994 | check_ppuint_mb_stat(next, spu); /* Step 67. */ | ||
1995 | restore_mfc_slbs(next, spu); /* Step 68. */ | ||
1996 | restore_mfc_sr1(next, spu); /* Step 69. */ | ||
1997 | restore_other_spu_access(next, spu); /* Step 70. */ | ||
1998 | restore_spu_runcntl(next, spu); /* Step 71. */ | ||
1999 | restore_mfc_cntl(next, spu); /* Step 72. */ | ||
2000 | enable_user_access(next, spu); /* Step 73. */ | ||
2001 | reset_switch_active(next, spu); /* Step 74. */ | ||
2002 | reenable_interrupts(next, spu); /* Step 75. */ | ||
2003 | } | ||
2004 | |||
2005 | static int __do_spu_save(struct spu_state *prev, struct spu *spu) | ||
2006 | { | ||
2007 | int rc; | ||
2008 | |||
2009 | /* | ||
2010 | * SPU context save can be broken into three phases: | ||
2011 | * | ||
2012 | * (a) quiesce [steps 2-16]. | ||
2013 | * (b) save of CSA, performed by PPE [steps 17-42] | ||
2014 | * (c) save of LSCSA, mostly performed by SPU [steps 43-52]. | ||
2015 | * | ||
2016 | * Returns 0 on success. | ||
2017 | * 2,6 if failed to quiece SPU | ||
2018 | * 53 if SPU-side of save failed. | ||
2019 | */ | ||
2020 | |||
2021 | rc = quiece_spu(prev, spu); /* Steps 2-16. */ | ||
2022 | switch (rc) { | ||
2023 | default: | ||
2024 | case 2: | ||
2025 | case 6: | ||
2026 | harvest(prev, spu); | ||
2027 | return rc; | ||
2028 | break; | ||
2029 | case 0: | ||
2030 | break; | ||
2031 | } | ||
2032 | save_csa(prev, spu); /* Steps 17-43. */ | ||
2033 | save_lscsa(prev, spu); /* Steps 44-53. */ | ||
2034 | return check_save_status(prev, spu); /* Step 54. */ | ||
2035 | } | ||
2036 | |||
2037 | static int __do_spu_restore(struct spu_state *next, struct spu *spu) | ||
2038 | { | ||
2039 | int rc; | ||
2040 | |||
2041 | /* | ||
2042 | * SPU context restore can be broken into three phases: | ||
2043 | * | ||
2044 | * (a) harvest (or reset) SPU [steps 2-24]. | ||
2045 | * (b) restore LSCSA [steps 25-40], mostly performed by SPU. | ||
2046 | * (c) restore CSA [steps 41-76], performed by PPE. | ||
2047 | * | ||
2048 | * The 'harvest' step is not performed here, but rather | ||
2049 | * as needed below. | ||
2050 | */ | ||
2051 | |||
2052 | restore_lscsa(next, spu); /* Steps 24-39. */ | ||
2053 | rc = check_restore_status(next, spu); /* Step 40. */ | ||
2054 | switch (rc) { | ||
2055 | default: | ||
2056 | /* Failed. Return now. */ | ||
2057 | return rc; | ||
2058 | break; | ||
2059 | case 0: | ||
2060 | /* Fall through to next step. */ | ||
2061 | break; | ||
2062 | } | ||
2063 | restore_csa(next, spu); | ||
2064 | |||
2065 | return 0; | ||
2066 | } | ||
2067 | |||
55 | /** | 2068 | /** |
56 | * spu_save - SPU context save, with locking. | 2069 | * spu_save - SPU context save, with locking. |
57 | * @prev: pointer to SPU context save area, to be saved. | 2070 | * @prev: pointer to SPU context save area, to be saved. |
@@ -61,9 +2074,13 @@ | |||
61 | */ | 2074 | */ |
62 | int spu_save(struct spu_state *prev, struct spu *spu) | 2075 | int spu_save(struct spu_state *prev, struct spu *spu) |
63 | { | 2076 | { |
64 | /* XXX missing */ | 2077 | int rc; |
65 | 2078 | ||
66 | return 0; | 2079 | acquire_spu_lock(spu); /* Step 1. */ |
2080 | rc = __do_spu_save(prev, spu); /* Steps 2-53. */ | ||
2081 | release_spu_lock(spu); | ||
2082 | |||
2083 | return rc; | ||
67 | } | 2084 | } |
68 | 2085 | ||
69 | /** | 2086 | /** |
@@ -77,9 +2094,14 @@ int spu_save(struct spu_state *prev, struct spu *spu) | |||
77 | */ | 2094 | */ |
78 | int spu_restore(struct spu_state *new, struct spu *spu) | 2095 | int spu_restore(struct spu_state *new, struct spu *spu) |
79 | { | 2096 | { |
80 | /* XXX missing */ | 2097 | int rc; |
81 | 2098 | ||
82 | return 0; | 2099 | acquire_spu_lock(spu); |
2100 | harvest(NULL, spu); | ||
2101 | rc = __do_spu_restore(new, spu); | ||
2102 | release_spu_lock(spu); | ||
2103 | |||
2104 | return rc; | ||
83 | } | 2105 | } |
84 | 2106 | ||
85 | /** | 2107 | /** |
@@ -93,9 +2115,17 @@ int spu_restore(struct spu_state *new, struct spu *spu) | |||
93 | */ | 2115 | */ |
94 | int spu_switch(struct spu_state *prev, struct spu_state *new, struct spu *spu) | 2116 | int spu_switch(struct spu_state *prev, struct spu_state *new, struct spu *spu) |
95 | { | 2117 | { |
96 | /* XXX missing */ | 2118 | int rc; |
97 | 2119 | ||
98 | return 0; | 2120 | acquire_spu_lock(spu); /* Save, Step 1. */ |
2121 | rc = __do_spu_save(prev, spu); /* Save, Steps 2-53. */ | ||
2122 | if (rc != 0) { | ||
2123 | harvest(prev, spu); | ||
2124 | } | ||
2125 | rc = __do_spu_restore(new, spu); | ||
2126 | release_spu_lock(spu); | ||
2127 | |||
2128 | return rc; | ||
99 | } | 2129 | } |
100 | 2130 | ||
101 | static void init_prob(struct spu_state *csa) | 2131 | static void init_prob(struct spu_state *csa) |
diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h index d1d537de4f5c..989a0688144e 100644 --- a/include/asm-powerpc/spu_csa.h +++ b/include/asm-powerpc/spu_csa.h | |||
@@ -200,7 +200,6 @@ struct spu_priv2_collapsed { | |||
200 | u64 spu_chnlcnt_RW; | 200 | u64 spu_chnlcnt_RW; |
201 | u64 spu_chnldata_RW; | 201 | u64 spu_chnldata_RW; |
202 | u64 spu_cfg_RW; | 202 | u64 spu_cfg_RW; |
203 | u64 spu_pm_trace_tag_status_RW; | ||
204 | u64 spu_tag_status_query_RW; | 203 | u64 spu_tag_status_query_RW; |
205 | u64 spu_cmd_buf1_RW; | 204 | u64 spu_cmd_buf1_RW; |
206 | u64 spu_cmd_buf2_RW; | 205 | u64 spu_cmd_buf2_RW; |