diff options
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 103 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/backing_ops.c | 6 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/fault.c | 193 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/hw_ops.c | 9 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/run.c | 28 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 4 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/switch.c | 8 | ||||
-rw-r--r-- | include/asm-powerpc/mmu.h | 1 | ||||
-rw-r--r-- | include/asm-powerpc/spu_csa.h | 1 |
10 files changed, 225 insertions, 130 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 6242f3c19f68..31fa55f33415 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -290,7 +290,6 @@ spu_irq_class_1(int irq, void *data) | |||
290 | 290 | ||
291 | return stat ? IRQ_HANDLED : IRQ_NONE; | 291 | return stat ? IRQ_HANDLED : IRQ_NONE; |
292 | } | 292 | } |
293 | EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom); | ||
294 | 293 | ||
295 | static irqreturn_t | 294 | static irqreturn_t |
296 | spu_irq_class_2(int irq, void *data) | 295 | spu_irq_class_2(int irq, void *data) |
@@ -462,108 +461,6 @@ void spu_free(struct spu *spu) | |||
462 | } | 461 | } |
463 | EXPORT_SYMBOL_GPL(spu_free); | 462 | EXPORT_SYMBOL_GPL(spu_free); |
464 | 463 | ||
465 | static int spu_handle_mm_fault(struct spu *spu) | ||
466 | { | ||
467 | struct mm_struct *mm = spu->mm; | ||
468 | struct vm_area_struct *vma; | ||
469 | u64 ea, dsisr, is_write; | ||
470 | int ret; | ||
471 | |||
472 | ea = spu->dar; | ||
473 | dsisr = spu->dsisr; | ||
474 | #if 0 | ||
475 | if (!IS_VALID_EA(ea)) { | ||
476 | return -EFAULT; | ||
477 | } | ||
478 | #endif /* XXX */ | ||
479 | if (mm == NULL) { | ||
480 | return -EFAULT; | ||
481 | } | ||
482 | if (mm->pgd == NULL) { | ||
483 | return -EFAULT; | ||
484 | } | ||
485 | |||
486 | down_read(&mm->mmap_sem); | ||
487 | vma = find_vma(mm, ea); | ||
488 | if (!vma) | ||
489 | goto bad_area; | ||
490 | if (vma->vm_start <= ea) | ||
491 | goto good_area; | ||
492 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
493 | goto bad_area; | ||
494 | #if 0 | ||
495 | if (expand_stack(vma, ea)) | ||
496 | goto bad_area; | ||
497 | #endif /* XXX */ | ||
498 | good_area: | ||
499 | is_write = dsisr & MFC_DSISR_ACCESS_PUT; | ||
500 | if (is_write) { | ||
501 | if (!(vma->vm_flags & VM_WRITE)) | ||
502 | goto bad_area; | ||
503 | } else { | ||
504 | if (dsisr & MFC_DSISR_ACCESS_DENIED) | ||
505 | goto bad_area; | ||
506 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | ||
507 | goto bad_area; | ||
508 | } | ||
509 | ret = 0; | ||
510 | switch (handle_mm_fault(mm, vma, ea, is_write)) { | ||
511 | case VM_FAULT_MINOR: | ||
512 | current->min_flt++; | ||
513 | break; | ||
514 | case VM_FAULT_MAJOR: | ||
515 | current->maj_flt++; | ||
516 | break; | ||
517 | case VM_FAULT_SIGBUS: | ||
518 | ret = -EFAULT; | ||
519 | goto bad_area; | ||
520 | case VM_FAULT_OOM: | ||
521 | ret = -ENOMEM; | ||
522 | goto bad_area; | ||
523 | default: | ||
524 | BUG(); | ||
525 | } | ||
526 | up_read(&mm->mmap_sem); | ||
527 | return ret; | ||
528 | |||
529 | bad_area: | ||
530 | up_read(&mm->mmap_sem); | ||
531 | return -EFAULT; | ||
532 | } | ||
533 | |||
534 | int spu_irq_class_1_bottom(struct spu *spu) | ||
535 | { | ||
536 | u64 ea, dsisr, access, error = 0UL; | ||
537 | int ret = 0; | ||
538 | |||
539 | ea = spu->dar; | ||
540 | dsisr = spu->dsisr; | ||
541 | if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) { | ||
542 | u64 flags; | ||
543 | |||
544 | access = (_PAGE_PRESENT | _PAGE_USER); | ||
545 | access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL; | ||
546 | local_irq_save(flags); | ||
547 | if (hash_page(ea, access, 0x300) != 0) | ||
548 | error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; | ||
549 | local_irq_restore(flags); | ||
550 | } | ||
551 | if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) { | ||
552 | if ((ret = spu_handle_mm_fault(spu)) != 0) | ||
553 | error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; | ||
554 | else | ||
555 | error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR; | ||
556 | } | ||
557 | spu->dar = 0UL; | ||
558 | spu->dsisr = 0UL; | ||
559 | if (!error) { | ||
560 | spu_restart_dma(spu); | ||
561 | } else { | ||
562 | spu->dma_callback(spu, SPE_EVENT_SPE_DATA_STORAGE); | ||
563 | } | ||
564 | return ret; | ||
565 | } | ||
566 | |||
567 | struct sysdev_class spu_sysdev_class = { | 464 | struct sysdev_class spu_sysdev_class = { |
568 | set_kset_name("spu") | 465 | set_kset_name("spu") |
569 | }; | 466 | }; |
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile index 472217d19faf..2cd89c11af5a 100644 --- a/arch/powerpc/platforms/cell/spufs/Makefile +++ b/arch/powerpc/platforms/cell/spufs/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | obj-y += switch.o | 1 | obj-y += switch.o fault.o |
2 | 2 | ||
3 | obj-$(CONFIG_SPU_FS) += spufs.o | 3 | obj-$(CONFIG_SPU_FS) += spufs.o |
4 | spufs-y += inode.o file.o context.o syscalls.o coredump.o | 4 | spufs-y += inode.o file.o context.o syscalls.o coredump.o |
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c index 1898f0d3a8b8..3322528fa6eb 100644 --- a/arch/powerpc/platforms/cell/spufs/backing_ops.c +++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c | |||
@@ -350,6 +350,11 @@ static int spu_backing_send_mfc_command(struct spu_context *ctx, | |||
350 | return ret; | 350 | return ret; |
351 | } | 351 | } |
352 | 352 | ||
353 | static void spu_backing_restart_dma(struct spu_context *ctx) | ||
354 | { | ||
355 | /* nothing to do here */ | ||
356 | } | ||
357 | |||
353 | struct spu_context_ops spu_backing_ops = { | 358 | struct spu_context_ops spu_backing_ops = { |
354 | .mbox_read = spu_backing_mbox_read, | 359 | .mbox_read = spu_backing_mbox_read, |
355 | .mbox_stat_read = spu_backing_mbox_stat_read, | 360 | .mbox_stat_read = spu_backing_mbox_stat_read, |
@@ -376,4 +381,5 @@ struct spu_context_ops spu_backing_ops = { | |||
376 | .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus, | 381 | .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus, |
377 | .get_mfc_free_elements = spu_backing_get_mfc_free_elements, | 382 | .get_mfc_free_elements = spu_backing_get_mfc_free_elements, |
378 | .send_mfc_command = spu_backing_send_mfc_command, | 383 | .send_mfc_command = spu_backing_send_mfc_command, |
384 | .restart_dma = spu_backing_restart_dma, | ||
379 | }; | 385 | }; |
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c new file mode 100644 index 000000000000..182dc914cbc9 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/fault.c | |||
@@ -0,0 +1,193 @@ | |||
1 | /* | ||
2 | * Low-level SPU handling | ||
3 | * | ||
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | ||
5 | * | ||
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/module.h> | ||
25 | |||
26 | #include <asm/spu.h> | ||
27 | #include <asm/spu_csa.h> | ||
28 | |||
29 | #include "spufs.h" | ||
30 | |||
31 | /* | ||
32 | * This ought to be kept in sync with the powerpc specific do_page_fault | ||
33 | * function. Currently, there are a few corner cases that we haven't had | ||
34 | * to handle fortunately. | ||
35 | */ | ||
36 | static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, unsigned long dsisr) | ||
37 | { | ||
38 | struct vm_area_struct *vma; | ||
39 | unsigned long is_write; | ||
40 | int ret; | ||
41 | |||
42 | #if 0 | ||
43 | if (!IS_VALID_EA(ea)) { | ||
44 | return -EFAULT; | ||
45 | } | ||
46 | #endif /* XXX */ | ||
47 | if (mm == NULL) { | ||
48 | return -EFAULT; | ||
49 | } | ||
50 | if (mm->pgd == NULL) { | ||
51 | return -EFAULT; | ||
52 | } | ||
53 | |||
54 | down_read(&mm->mmap_sem); | ||
55 | vma = find_vma(mm, ea); | ||
56 | if (!vma) | ||
57 | goto bad_area; | ||
58 | if (vma->vm_start <= ea) | ||
59 | goto good_area; | ||
60 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
61 | goto bad_area; | ||
62 | if (expand_stack(vma, ea)) | ||
63 | goto bad_area; | ||
64 | good_area: | ||
65 | is_write = dsisr & MFC_DSISR_ACCESS_PUT; | ||
66 | if (is_write) { | ||
67 | if (!(vma->vm_flags & VM_WRITE)) | ||
68 | goto bad_area; | ||
69 | } else { | ||
70 | if (dsisr & MFC_DSISR_ACCESS_DENIED) | ||
71 | goto bad_area; | ||
72 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | ||
73 | goto bad_area; | ||
74 | } | ||
75 | ret = 0; | ||
76 | switch (handle_mm_fault(mm, vma, ea, is_write)) { | ||
77 | case VM_FAULT_MINOR: | ||
78 | current->min_flt++; | ||
79 | break; | ||
80 | case VM_FAULT_MAJOR: | ||
81 | current->maj_flt++; | ||
82 | break; | ||
83 | case VM_FAULT_SIGBUS: | ||
84 | ret = -EFAULT; | ||
85 | goto bad_area; | ||
86 | case VM_FAULT_OOM: | ||
87 | ret = -ENOMEM; | ||
88 | goto bad_area; | ||
89 | default: | ||
90 | BUG(); | ||
91 | } | ||
92 | up_read(&mm->mmap_sem); | ||
93 | return ret; | ||
94 | |||
95 | bad_area: | ||
96 | up_read(&mm->mmap_sem); | ||
97 | return -EFAULT; | ||
98 | } | ||
99 | |||
100 | static void spufs_handle_dma_error(struct spu_context *ctx, int type) | ||
101 | { | ||
102 | if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { | ||
103 | ctx->event_return |= type; | ||
104 | wake_up_all(&ctx->stop_wq); | ||
105 | } else { | ||
106 | switch (type) { | ||
107 | case SPE_EVENT_DMA_ALIGNMENT: | ||
108 | case SPE_EVENT_SPE_DATA_STORAGE: | ||
109 | case SPE_EVENT_INVALID_DMA: | ||
110 | force_sig(SIGBUS, /* info, */ current); | ||
111 | break; | ||
112 | case SPE_EVENT_SPE_ERROR: | ||
113 | force_sig(SIGILL, /* info */ current); | ||
114 | break; | ||
115 | } | ||
116 | } | ||
117 | } | ||
118 | |||
119 | void spufs_dma_callback(struct spu *spu, int type) | ||
120 | { | ||
121 | spufs_handle_dma_error(spu->ctx, type); | ||
122 | } | ||
123 | EXPORT_SYMBOL_GPL(spufs_dma_callback); | ||
124 | |||
125 | /* | ||
126 | * bottom half handler for page faults, we can't do this from | ||
127 | * interrupt context, since we might need to sleep. | ||
128 | * we also need to give up the mutex so we can get scheduled | ||
129 | * out while waiting for the backing store. | ||
130 | * | ||
131 | * TODO: try calling hash_page from the interrupt handler first | ||
132 | * in order to speed up the easy case. | ||
133 | */ | ||
134 | int spufs_handle_class1(struct spu_context *ctx) | ||
135 | { | ||
136 | u64 ea, dsisr, access; | ||
137 | unsigned long flags; | ||
138 | int ret; | ||
139 | |||
140 | /* | ||
141 | * dar and dsisr get passed from the registers | ||
142 | * to the spu_context, to this function, but not | ||
143 | * back to the spu if it gets scheduled again. | ||
144 | * | ||
145 | * if we don't handle the fault for a saved context | ||
146 | * in time, we can still expect to get the same fault | ||
147 | * the immediately after the context restore. | ||
148 | */ | ||
149 | if (ctx->state == SPU_STATE_RUNNABLE) { | ||
150 | ea = ctx->spu->dar; | ||
151 | dsisr = ctx->spu->dsisr; | ||
152 | ctx->spu->dar= ctx->spu->dsisr = 0; | ||
153 | } else { | ||
154 | ea = ctx->csa.priv1.mfc_dar_RW; | ||
155 | dsisr = ctx->csa.priv1.mfc_dsisr_RW; | ||
156 | ctx->csa.priv1.mfc_dar_RW = 0; | ||
157 | ctx->csa.priv1.mfc_dsisr_RW = 0; | ||
158 | } | ||
159 | |||
160 | if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) | ||
161 | return 0; | ||
162 | |||
163 | pr_debug("ctx %p: ea %016lx, dsisr %016lx state %d\n", ctx, ea, | ||
164 | dsisr, ctx->state); | ||
165 | |||
166 | /* we must not hold the lock when entering spu_handle_mm_fault */ | ||
167 | spu_release(ctx); | ||
168 | |||
169 | access = (_PAGE_PRESENT | _PAGE_USER); | ||
170 | access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL; | ||
171 | local_irq_save(flags); | ||
172 | ret = hash_page(ea, access, 0x300); | ||
173 | local_irq_restore(flags); | ||
174 | |||
175 | /* hashing failed, so try the actual fault handler */ | ||
176 | if (ret) | ||
177 | ret = spu_handle_mm_fault(current->mm, ea, dsisr); | ||
178 | |||
179 | spu_acquire(ctx); | ||
180 | /* | ||
181 | * If we handled the fault successfully and are in runnable | ||
182 | * state, restart the DMA. | ||
183 | * In case of unhandled error report the problem to user space. | ||
184 | */ | ||
185 | if (!ret) { | ||
186 | if (ctx->spu) | ||
187 | ctx->ops->restart_dma(ctx); | ||
188 | } else | ||
189 | spufs_handle_dma_error(ctx, SPE_EVENT_SPE_DATA_STORAGE); | ||
190 | |||
191 | return ret; | ||
192 | } | ||
193 | EXPORT_SYMBOL_GPL(spufs_handle_class1); | ||
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c index ae42e03b8c86..428875c5e4ec 100644 --- a/arch/powerpc/platforms/cell/spufs/hw_ops.c +++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c | |||
@@ -296,6 +296,14 @@ static int spu_hw_send_mfc_command(struct spu_context *ctx, | |||
296 | } | 296 | } |
297 | } | 297 | } |
298 | 298 | ||
299 | static void spu_hw_restart_dma(struct spu_context *ctx) | ||
300 | { | ||
301 | struct spu_priv2 __iomem *priv2 = ctx->spu->priv2; | ||
302 | |||
303 | if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &ctx->spu->flags)) | ||
304 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); | ||
305 | } | ||
306 | |||
299 | struct spu_context_ops spu_hw_ops = { | 307 | struct spu_context_ops spu_hw_ops = { |
300 | .mbox_read = spu_hw_mbox_read, | 308 | .mbox_read = spu_hw_mbox_read, |
301 | .mbox_stat_read = spu_hw_mbox_stat_read, | 309 | .mbox_stat_read = spu_hw_mbox_stat_read, |
@@ -320,4 +328,5 @@ struct spu_context_ops spu_hw_ops = { | |||
320 | .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus, | 328 | .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus, |
321 | .get_mfc_free_elements = spu_hw_get_mfc_free_elements, | 329 | .get_mfc_free_elements = spu_hw_get_mfc_free_elements, |
322 | .send_mfc_command = spu_hw_send_mfc_command, | 330 | .send_mfc_command = spu_hw_send_mfc_command, |
331 | .restart_dma = spu_hw_restart_dma, | ||
323 | }; | 332 | }; |
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index 7df5202c9a90..1a8195bf75d5 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c | |||
@@ -18,27 +18,6 @@ void spufs_stop_callback(struct spu *spu) | |||
18 | wake_up_all(&ctx->stop_wq); | 18 | wake_up_all(&ctx->stop_wq); |
19 | } | 19 | } |
20 | 20 | ||
21 | void spufs_dma_callback(struct spu *spu, int type) | ||
22 | { | ||
23 | struct spu_context *ctx = spu->ctx; | ||
24 | |||
25 | if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { | ||
26 | ctx->event_return |= type; | ||
27 | wake_up_all(&ctx->stop_wq); | ||
28 | } else { | ||
29 | switch (type) { | ||
30 | case SPE_EVENT_DMA_ALIGNMENT: | ||
31 | case SPE_EVENT_SPE_DATA_STORAGE: | ||
32 | case SPE_EVENT_INVALID_DMA: | ||
33 | force_sig(SIGBUS, /* info, */ current); | ||
34 | break; | ||
35 | case SPE_EVENT_SPE_ERROR: | ||
36 | force_sig(SIGILL, /* info */ current); | ||
37 | break; | ||
38 | } | ||
39 | } | ||
40 | } | ||
41 | |||
42 | static inline int spu_stopped(struct spu_context *ctx, u32 * stat) | 21 | static inline int spu_stopped(struct spu_context *ctx, u32 * stat) |
43 | { | 22 | { |
44 | struct spu *spu; | 23 | struct spu *spu; |
@@ -294,11 +273,8 @@ int spu_process_callback(struct spu_context *ctx) | |||
294 | static inline int spu_process_events(struct spu_context *ctx) | 273 | static inline int spu_process_events(struct spu_context *ctx) |
295 | { | 274 | { |
296 | struct spu *spu = ctx->spu; | 275 | struct spu *spu = ctx->spu; |
297 | u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED; | ||
298 | int ret = 0; | 276 | int ret = 0; |
299 | 277 | ||
300 | if (spu->dsisr & pte_fault) | ||
301 | ret = spu_irq_class_1_bottom(spu); | ||
302 | if (spu->class_0_pending) | 278 | if (spu->class_0_pending) |
303 | ret = spu_irq_class_0_bottom(spu); | 279 | ret = spu_irq_class_0_bottom(spu); |
304 | if (!ret && signal_pending(current)) | 280 | if (!ret && signal_pending(current)) |
@@ -332,6 +308,10 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx, | |||
332 | break; | 308 | break; |
333 | status &= ~SPU_STATUS_STOPPED_BY_STOP; | 309 | status &= ~SPU_STATUS_STOPPED_BY_STOP; |
334 | } | 310 | } |
311 | ret = spufs_handle_class1(ctx); | ||
312 | if (ret) | ||
313 | break; | ||
314 | |||
335 | if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { | 315 | if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { |
336 | ret = spu_reacquire_runnable(ctx, npc, &status); | 316 | ret = spu_reacquire_runnable(ctx, npc, &status); |
337 | if (ret) { | 317 | if (ret) { |
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index cae2ad435b0a..9993c9b3cffc 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h | |||
@@ -141,6 +141,7 @@ struct spu_context_ops { | |||
141 | struct spu_dma_info * info); | 141 | struct spu_dma_info * info); |
142 | void (*proxydma_info_read) (struct spu_context * ctx, | 142 | void (*proxydma_info_read) (struct spu_context * ctx, |
143 | struct spu_proxydma_info * info); | 143 | struct spu_proxydma_info * info); |
144 | void (*restart_dma)(struct spu_context *ctx); | ||
144 | }; | 145 | }; |
145 | 146 | ||
146 | extern struct spu_context_ops spu_hw_ops; | 147 | extern struct spu_context_ops spu_hw_ops; |
@@ -172,6 +173,9 @@ int put_spu_gang(struct spu_gang *gang); | |||
172 | void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx); | 173 | void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx); |
173 | void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx); | 174 | void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx); |
174 | 175 | ||
176 | /* fault handling */ | ||
177 | int spufs_handle_class1(struct spu_context *ctx); | ||
178 | |||
175 | /* context management */ | 179 | /* context management */ |
176 | static inline void spu_acquire(struct spu_context *ctx) | 180 | static inline void spu_acquire(struct spu_context *ctx) |
177 | { | 181 | { |
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c index fd91c73de34e..8347c4a3f894 100644 --- a/arch/powerpc/platforms/cell/spufs/switch.c +++ b/arch/powerpc/platforms/cell/spufs/switch.c | |||
@@ -2084,6 +2084,10 @@ int spu_save(struct spu_state *prev, struct spu *spu) | |||
2084 | int rc; | 2084 | int rc; |
2085 | 2085 | ||
2086 | acquire_spu_lock(spu); /* Step 1. */ | 2086 | acquire_spu_lock(spu); /* Step 1. */ |
2087 | prev->dar = spu->dar; | ||
2088 | prev->dsisr = spu->dsisr; | ||
2089 | spu->dar = 0; | ||
2090 | spu->dsisr = 0; | ||
2087 | rc = __do_spu_save(prev, spu); /* Steps 2-53. */ | 2091 | rc = __do_spu_save(prev, spu); /* Steps 2-53. */ |
2088 | release_spu_lock(spu); | 2092 | release_spu_lock(spu); |
2089 | if (rc != 0 && rc != 2 && rc != 6) { | 2093 | if (rc != 0 && rc != 2 && rc != 6) { |
@@ -2109,9 +2113,9 @@ int spu_restore(struct spu_state *new, struct spu *spu) | |||
2109 | 2113 | ||
2110 | acquire_spu_lock(spu); | 2114 | acquire_spu_lock(spu); |
2111 | harvest(NULL, spu); | 2115 | harvest(NULL, spu); |
2112 | spu->dar = 0; | ||
2113 | spu->dsisr = 0; | ||
2114 | spu->slb_replace = 0; | 2116 | spu->slb_replace = 0; |
2117 | new->dar = 0; | ||
2118 | new->dsisr = 0; | ||
2115 | spu->class_0_pending = 0; | 2119 | spu->class_0_pending = 0; |
2116 | rc = __do_spu_restore(new, spu); | 2120 | rc = __do_spu_restore(new, spu); |
2117 | release_spu_lock(spu); | 2121 | release_spu_lock(spu); |
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h index 200055a4b82b..e22fd8811505 100644 --- a/include/asm-powerpc/mmu.h +++ b/include/asm-powerpc/mmu.h | |||
@@ -234,6 +234,7 @@ extern int __hash_page_64K(unsigned long ea, unsigned long access, | |||
234 | unsigned long vsid, pte_t *ptep, unsigned long trap, | 234 | unsigned long vsid, pte_t *ptep, unsigned long trap, |
235 | unsigned int local); | 235 | unsigned int local); |
236 | struct mm_struct; | 236 | struct mm_struct; |
237 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); | ||
237 | extern int hash_huge_page(struct mm_struct *mm, unsigned long access, | 238 | extern int hash_huge_page(struct mm_struct *mm, unsigned long access, |
238 | unsigned long ea, unsigned long vsid, int local, | 239 | unsigned long ea, unsigned long vsid, int local, |
239 | unsigned long trap); | 240 | unsigned long trap); |
diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h index 8aad0619eb8e..02e56a6685a2 100644 --- a/include/asm-powerpc/spu_csa.h +++ b/include/asm-powerpc/spu_csa.h | |||
@@ -242,6 +242,7 @@ struct spu_state { | |||
242 | u64 spu_chnldata_RW[32]; | 242 | u64 spu_chnldata_RW[32]; |
243 | u32 spu_mailbox_data[4]; | 243 | u32 spu_mailbox_data[4]; |
244 | u32 pu_mailbox_data[1]; | 244 | u32 pu_mailbox_data[1]; |
245 | u64 dar, dsisr; | ||
245 | unsigned long suspend_time; | 246 | unsigned long suspend_time; |
246 | spinlock_t register_lock; | 247 | spinlock_t register_lock; |
247 | }; | 248 | }; |