diff options
Diffstat (limited to 'arch/powerpc/platforms/cell/spu_base.c')
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 141 |
1 files changed, 125 insertions, 16 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index c83c3e3f517..f73263ba984 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/linux_logo.h> | 34 | #include <linux/linux_logo.h> |
35 | #include <asm/spu.h> | 35 | #include <asm/spu.h> |
36 | #include <asm/spu_priv1.h> | 36 | #include <asm/spu_priv1.h> |
37 | #include <asm/spu_csa.h> | ||
37 | #include <asm/xmon.h> | 38 | #include <asm/xmon.h> |
38 | #include <asm/prom.h> | 39 | #include <asm/prom.h> |
39 | 40 | ||
@@ -47,6 +48,13 @@ struct cbe_spu_info cbe_spu_info[MAX_NUMNODES]; | |||
47 | EXPORT_SYMBOL_GPL(cbe_spu_info); | 48 | EXPORT_SYMBOL_GPL(cbe_spu_info); |
48 | 49 | ||
49 | /* | 50 | /* |
51 | * The spufs fault-handling code needs to call force_sig_info to raise signals | ||
52 | * on DMA errors. Export it here to avoid general kernel-wide access to this | ||
53 | * function | ||
54 | */ | ||
55 | EXPORT_SYMBOL_GPL(force_sig_info); | ||
56 | |||
57 | /* | ||
50 | * Protects cbe_spu_info and spu->number. | 58 | * Protects cbe_spu_info and spu->number. |
51 | */ | 59 | */ |
52 | static DEFINE_SPINLOCK(spu_lock); | 60 | static DEFINE_SPINLOCK(spu_lock); |
@@ -66,6 +74,10 @@ static LIST_HEAD(spu_full_list); | |||
66 | static DEFINE_SPINLOCK(spu_full_list_lock); | 74 | static DEFINE_SPINLOCK(spu_full_list_lock); |
67 | static DEFINE_MUTEX(spu_full_list_mutex); | 75 | static DEFINE_MUTEX(spu_full_list_mutex); |
68 | 76 | ||
77 | struct spu_slb { | ||
78 | u64 esid, vsid; | ||
79 | }; | ||
80 | |||
69 | void spu_invalidate_slbs(struct spu *spu) | 81 | void spu_invalidate_slbs(struct spu *spu) |
70 | { | 82 | { |
71 | struct spu_priv2 __iomem *priv2 = spu->priv2; | 83 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
@@ -114,6 +126,12 @@ void spu_associate_mm(struct spu *spu, struct mm_struct *mm) | |||
114 | } | 126 | } |
115 | EXPORT_SYMBOL_GPL(spu_associate_mm); | 127 | EXPORT_SYMBOL_GPL(spu_associate_mm); |
116 | 128 | ||
129 | int spu_64k_pages_available(void) | ||
130 | { | ||
131 | return mmu_psize_defs[MMU_PAGE_64K].shift != 0; | ||
132 | } | ||
133 | EXPORT_SYMBOL_GPL(spu_64k_pages_available); | ||
134 | |||
117 | static int __spu_trap_invalid_dma(struct spu *spu) | 135 | static int __spu_trap_invalid_dma(struct spu *spu) |
118 | { | 136 | { |
119 | pr_debug("%s\n", __FUNCTION__); | 137 | pr_debug("%s\n", __FUNCTION__); |
@@ -143,11 +161,22 @@ static void spu_restart_dma(struct spu *spu) | |||
143 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); | 161 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); |
144 | } | 162 | } |
145 | 163 | ||
146 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) | 164 | static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) |
147 | { | 165 | { |
148 | struct spu_priv2 __iomem *priv2 = spu->priv2; | 166 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
167 | |||
168 | pr_debug("%s: adding SLB[%d] 0x%016lx 0x%016lx\n", | ||
169 | __func__, slbe, slb->vsid, slb->esid); | ||
170 | |||
171 | out_be64(&priv2->slb_index_W, slbe); | ||
172 | out_be64(&priv2->slb_vsid_RW, slb->vsid); | ||
173 | out_be64(&priv2->slb_esid_RW, slb->esid); | ||
174 | } | ||
175 | |||
176 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) | ||
177 | { | ||
149 | struct mm_struct *mm = spu->mm; | 178 | struct mm_struct *mm = spu->mm; |
150 | u64 esid, vsid, llp; | 179 | struct spu_slb slb; |
151 | int psize; | 180 | int psize; |
152 | 181 | ||
153 | pr_debug("%s\n", __FUNCTION__); | 182 | pr_debug("%s\n", __FUNCTION__); |
@@ -159,7 +188,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) | |||
159 | printk("%s: invalid access during switch!\n", __func__); | 188 | printk("%s: invalid access during switch!\n", __func__); |
160 | return 1; | 189 | return 1; |
161 | } | 190 | } |
162 | esid = (ea & ESID_MASK) | SLB_ESID_V; | 191 | slb.esid = (ea & ESID_MASK) | SLB_ESID_V; |
163 | 192 | ||
164 | switch(REGION_ID(ea)) { | 193 | switch(REGION_ID(ea)) { |
165 | case USER_REGION_ID: | 194 | case USER_REGION_ID: |
@@ -168,21 +197,21 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) | |||
168 | #else | 197 | #else |
169 | psize = mm->context.user_psize; | 198 | psize = mm->context.user_psize; |
170 | #endif | 199 | #endif |
171 | vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | | 200 | slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M) |
172 | SLB_VSID_USER; | 201 | << SLB_VSID_SHIFT) | SLB_VSID_USER; |
173 | break; | 202 | break; |
174 | case VMALLOC_REGION_ID: | 203 | case VMALLOC_REGION_ID: |
175 | if (ea < VMALLOC_END) | 204 | if (ea < VMALLOC_END) |
176 | psize = mmu_vmalloc_psize; | 205 | psize = mmu_vmalloc_psize; |
177 | else | 206 | else |
178 | psize = mmu_io_psize; | 207 | psize = mmu_io_psize; |
179 | vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | | 208 | slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) |
180 | SLB_VSID_KERNEL; | 209 | << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; |
181 | break; | 210 | break; |
182 | case KERNEL_REGION_ID: | 211 | case KERNEL_REGION_ID: |
183 | psize = mmu_linear_psize; | 212 | psize = mmu_linear_psize; |
184 | vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | | 213 | slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) |
185 | SLB_VSID_KERNEL; | 214 | << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; |
186 | break; | 215 | break; |
187 | default: | 216 | default: |
188 | /* Future: support kernel segments so that drivers | 217 | /* Future: support kernel segments so that drivers |
@@ -191,11 +220,9 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) | |||
191 | pr_debug("invalid region access at %016lx\n", ea); | 220 | pr_debug("invalid region access at %016lx\n", ea); |
192 | return 1; | 221 | return 1; |
193 | } | 222 | } |
194 | llp = mmu_psize_defs[psize].sllp; | 223 | slb.vsid |= mmu_psize_defs[psize].sllp; |
195 | 224 | ||
196 | out_be64(&priv2->slb_index_W, spu->slb_replace); | 225 | spu_load_slb(spu, spu->slb_replace, &slb); |
197 | out_be64(&priv2->slb_vsid_RW, vsid | llp); | ||
198 | out_be64(&priv2->slb_esid_RW, esid); | ||
199 | 226 | ||
200 | spu->slb_replace++; | 227 | spu->slb_replace++; |
201 | if (spu->slb_replace >= 8) | 228 | if (spu->slb_replace >= 8) |
@@ -232,6 +259,74 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) | |||
232 | return 0; | 259 | return 0; |
233 | } | 260 | } |
234 | 261 | ||
262 | static void __spu_kernel_slb(void *addr, struct spu_slb *slb) | ||
263 | { | ||
264 | unsigned long ea = (unsigned long)addr; | ||
265 | u64 llp; | ||
266 | |||
267 | if (REGION_ID(ea) == KERNEL_REGION_ID) | ||
268 | llp = mmu_psize_defs[mmu_linear_psize].sllp; | ||
269 | else | ||
270 | llp = mmu_psize_defs[mmu_virtual_psize].sllp; | ||
271 | |||
272 | slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | | ||
273 | SLB_VSID_KERNEL | llp; | ||
274 | slb->esid = (ea & ESID_MASK) | SLB_ESID_V; | ||
275 | } | ||
276 | |||
277 | /** | ||
278 | * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the | ||
279 | * address @new_addr is present. | ||
280 | */ | ||
281 | static inline int __slb_present(struct spu_slb *slbs, int nr_slbs, | ||
282 | void *new_addr) | ||
283 | { | ||
284 | unsigned long ea = (unsigned long)new_addr; | ||
285 | int i; | ||
286 | |||
287 | for (i = 0; i < nr_slbs; i++) | ||
288 | if (!((slbs[i].esid ^ ea) & ESID_MASK)) | ||
289 | return 1; | ||
290 | |||
291 | return 0; | ||
292 | } | ||
293 | |||
294 | /** | ||
295 | * Setup the SPU kernel SLBs, in preparation for a context save/restore. We | ||
296 | * need to map both the context save area, and the save/restore code. | ||
297 | * | ||
298 | * Because the lscsa and code may cross segment boundaires, we check to see | ||
299 | * if mappings are required for the start and end of each range. We currently | ||
300 | * assume that the mappings are smaller that one segment - if not, something | ||
301 | * is seriously wrong. | ||
302 | */ | ||
303 | void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, | ||
304 | void *code, int code_size) | ||
305 | { | ||
306 | struct spu_slb slbs[4]; | ||
307 | int i, nr_slbs = 0; | ||
308 | /* start and end addresses of both mappings */ | ||
309 | void *addrs[] = { | ||
310 | lscsa, (void *)lscsa + sizeof(*lscsa) - 1, | ||
311 | code, code + code_size - 1 | ||
312 | }; | ||
313 | |||
314 | /* check the set of addresses, and create a new entry in the slbs array | ||
315 | * if there isn't already a SLB for that address */ | ||
316 | for (i = 0; i < ARRAY_SIZE(addrs); i++) { | ||
317 | if (__slb_present(slbs, nr_slbs, addrs[i])) | ||
318 | continue; | ||
319 | |||
320 | __spu_kernel_slb(addrs[i], &slbs[nr_slbs]); | ||
321 | nr_slbs++; | ||
322 | } | ||
323 | |||
324 | /* Add the set of SLBs */ | ||
325 | for (i = 0; i < nr_slbs; i++) | ||
326 | spu_load_slb(spu, i, &slbs[i]); | ||
327 | } | ||
328 | EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs); | ||
329 | |||
235 | static irqreturn_t | 330 | static irqreturn_t |
236 | spu_irq_class_0(int irq, void *data) | 331 | spu_irq_class_0(int irq, void *data) |
237 | { | 332 | { |
@@ -479,13 +574,27 @@ EXPORT_SYMBOL_GPL(spu_add_sysdev_attr); | |||
479 | int spu_add_sysdev_attr_group(struct attribute_group *attrs) | 574 | int spu_add_sysdev_attr_group(struct attribute_group *attrs) |
480 | { | 575 | { |
481 | struct spu *spu; | 576 | struct spu *spu; |
577 | int rc = 0; | ||
482 | 578 | ||
483 | mutex_lock(&spu_full_list_mutex); | 579 | mutex_lock(&spu_full_list_mutex); |
484 | list_for_each_entry(spu, &spu_full_list, full_list) | 580 | list_for_each_entry(spu, &spu_full_list, full_list) { |
485 | sysfs_create_group(&spu->sysdev.kobj, attrs); | 581 | rc = sysfs_create_group(&spu->sysdev.kobj, attrs); |
582 | |||
583 | /* we're in trouble here, but try unwinding anyway */ | ||
584 | if (rc) { | ||
585 | printk(KERN_ERR "%s: can't create sysfs group '%s'\n", | ||
586 | __func__, attrs->name); | ||
587 | |||
588 | list_for_each_entry_continue_reverse(spu, | ||
589 | &spu_full_list, full_list) | ||
590 | sysfs_remove_group(&spu->sysdev.kobj, attrs); | ||
591 | break; | ||
592 | } | ||
593 | } | ||
594 | |||
486 | mutex_unlock(&spu_full_list_mutex); | 595 | mutex_unlock(&spu_full_list_mutex); |
487 | 596 | ||
488 | return 0; | 597 | return rc; |
489 | } | 598 | } |
490 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group); | 599 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group); |
491 | 600 | ||