diff options
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/fault.c')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/fault.c | 211 |
1 files changed, 211 insertions, 0 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c new file mode 100644 index 0000000000..0f75c07e29 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/fault.c | |||
@@ -0,0 +1,211 @@ | |||
1 | /* | ||
2 | * Low-level SPU handling | ||
3 | * | ||
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | ||
5 | * | ||
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/module.h> | ||
25 | |||
26 | #include <asm/spu.h> | ||
27 | #include <asm/spu_csa.h> | ||
28 | |||
29 | #include "spufs.h" | ||
30 | |||
31 | /* | ||
32 | * This ought to be kept in sync with the powerpc specific do_page_fault | ||
33 | * function. Currently, there are a few corner cases that we haven't had | ||
34 | * to handle fortunately. | ||
35 | */ | ||
36 | static int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, unsigned long dsisr) | ||
37 | { | ||
38 | struct vm_area_struct *vma; | ||
39 | unsigned long is_write; | ||
40 | int ret; | ||
41 | |||
42 | #if 0 | ||
43 | if (!IS_VALID_EA(ea)) { | ||
44 | return -EFAULT; | ||
45 | } | ||
46 | #endif /* XXX */ | ||
47 | if (mm == NULL) { | ||
48 | return -EFAULT; | ||
49 | } | ||
50 | if (mm->pgd == NULL) { | ||
51 | return -EFAULT; | ||
52 | } | ||
53 | |||
54 | down_read(&mm->mmap_sem); | ||
55 | vma = find_vma(mm, ea); | ||
56 | if (!vma) | ||
57 | goto bad_area; | ||
58 | if (vma->vm_start <= ea) | ||
59 | goto good_area; | ||
60 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
61 | goto bad_area; | ||
62 | if (expand_stack(vma, ea)) | ||
63 | goto bad_area; | ||
64 | good_area: | ||
65 | is_write = dsisr & MFC_DSISR_ACCESS_PUT; | ||
66 | if (is_write) { | ||
67 | if (!(vma->vm_flags & VM_WRITE)) | ||
68 | goto bad_area; | ||
69 | } else { | ||
70 | if (dsisr & MFC_DSISR_ACCESS_DENIED) | ||
71 | goto bad_area; | ||
72 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | ||
73 | goto bad_area; | ||
74 | } | ||
75 | ret = 0; | ||
76 | switch (handle_mm_fault(mm, vma, ea, is_write)) { | ||
77 | case VM_FAULT_MINOR: | ||
78 | current->min_flt++; | ||
79 | break; | ||
80 | case VM_FAULT_MAJOR: | ||
81 | current->maj_flt++; | ||
82 | break; | ||
83 | case VM_FAULT_SIGBUS: | ||
84 | ret = -EFAULT; | ||
85 | goto bad_area; | ||
86 | case VM_FAULT_OOM: | ||
87 | ret = -ENOMEM; | ||
88 | goto bad_area; | ||
89 | default: | ||
90 | BUG(); | ||
91 | } | ||
92 | up_read(&mm->mmap_sem); | ||
93 | return ret; | ||
94 | |||
95 | bad_area: | ||
96 | up_read(&mm->mmap_sem); | ||
97 | return -EFAULT; | ||
98 | } | ||
99 | |||
100 | static void spufs_handle_dma_error(struct spu_context *ctx, | ||
101 | unsigned long ea, int type) | ||
102 | { | ||
103 | if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { | ||
104 | ctx->event_return |= type; | ||
105 | wake_up_all(&ctx->stop_wq); | ||
106 | } else { | ||
107 | siginfo_t info; | ||
108 | memset(&info, 0, sizeof(info)); | ||
109 | |||
110 | switch (type) { | ||
111 | case SPE_EVENT_INVALID_DMA: | ||
112 | info.si_signo = SIGBUS; | ||
113 | info.si_code = BUS_OBJERR; | ||
114 | break; | ||
115 | case SPE_EVENT_SPE_DATA_STORAGE: | ||
116 | info.si_signo = SIGBUS; | ||
117 | info.si_addr = (void __user *)ea; | ||
118 | info.si_code = BUS_ADRERR; | ||
119 | break; | ||
120 | case SPE_EVENT_DMA_ALIGNMENT: | ||
121 | info.si_signo = SIGBUS; | ||
122 | /* DAR isn't set for an alignment fault :( */ | ||
123 | info.si_code = BUS_ADRALN; | ||
124 | break; | ||
125 | case SPE_EVENT_SPE_ERROR: | ||
126 | info.si_signo = SIGILL; | ||
127 | info.si_addr = (void __user *)(unsigned long) | ||
128 | ctx->ops->npc_read(ctx) - 4; | ||
129 | info.si_code = ILL_ILLOPC; | ||
130 | break; | ||
131 | } | ||
132 | if (info.si_signo) | ||
133 | force_sig_info(info.si_signo, &info, current); | ||
134 | } | ||
135 | } | ||
136 | |||
137 | void spufs_dma_callback(struct spu *spu, int type) | ||
138 | { | ||
139 | spufs_handle_dma_error(spu->ctx, spu->dar, type); | ||
140 | } | ||
141 | EXPORT_SYMBOL_GPL(spufs_dma_callback); | ||
142 | |||
143 | /* | ||
144 | * bottom half handler for page faults, we can't do this from | ||
145 | * interrupt context, since we might need to sleep. | ||
146 | * we also need to give up the mutex so we can get scheduled | ||
147 | * out while waiting for the backing store. | ||
148 | * | ||
149 | * TODO: try calling hash_page from the interrupt handler first | ||
150 | * in order to speed up the easy case. | ||
151 | */ | ||
152 | int spufs_handle_class1(struct spu_context *ctx) | ||
153 | { | ||
154 | u64 ea, dsisr, access; | ||
155 | unsigned long flags; | ||
156 | int ret; | ||
157 | |||
158 | /* | ||
159 | * dar and dsisr get passed from the registers | ||
160 | * to the spu_context, to this function, but not | ||
161 | * back to the spu if it gets scheduled again. | ||
162 | * | ||
163 | * if we don't handle the fault for a saved context | ||
164 | * in time, we can still expect to get the same fault | ||
165 | * the immediately after the context restore. | ||
166 | */ | ||
167 | if (ctx->state == SPU_STATE_RUNNABLE) { | ||
168 | ea = ctx->spu->dar; | ||
169 | dsisr = ctx->spu->dsisr; | ||
170 | ctx->spu->dar= ctx->spu->dsisr = 0; | ||
171 | } else { | ||
172 | ea = ctx->csa.priv1.mfc_dar_RW; | ||
173 | dsisr = ctx->csa.priv1.mfc_dsisr_RW; | ||
174 | ctx->csa.priv1.mfc_dar_RW = 0; | ||
175 | ctx->csa.priv1.mfc_dsisr_RW = 0; | ||
176 | } | ||
177 | |||
178 | if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) | ||
179 | return 0; | ||
180 | |||
181 | pr_debug("ctx %p: ea %016lx, dsisr %016lx state %d\n", ctx, ea, | ||
182 | dsisr, ctx->state); | ||
183 | |||
184 | /* we must not hold the lock when entering spu_handle_mm_fault */ | ||
185 | spu_release(ctx); | ||
186 | |||
187 | access = (_PAGE_PRESENT | _PAGE_USER); | ||
188 | access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL; | ||
189 | local_irq_save(flags); | ||
190 | ret = hash_page(ea, access, 0x300); | ||
191 | local_irq_restore(flags); | ||
192 | |||
193 | /* hashing failed, so try the actual fault handler */ | ||
194 | if (ret) | ||
195 | ret = spu_handle_mm_fault(current->mm, ea, dsisr); | ||
196 | |||
197 | spu_acquire(ctx); | ||
198 | /* | ||
199 | * If we handled the fault successfully and are in runnable | ||
200 | * state, restart the DMA. | ||
201 | * In case of unhandled error report the problem to user space. | ||
202 | */ | ||
203 | if (!ret) { | ||
204 | if (ctx->spu) | ||
205 | ctx->ops->restart_dma(ctx); | ||
206 | } else | ||
207 | spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE); | ||
208 | |||
209 | return ret; | ||
210 | } | ||
211 | EXPORT_SYMBOL_GPL(spufs_handle_class1); | ||