aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/oprofile/op_model_cell.c20
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.c3
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c3
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c4
-rw-r--r--arch/powerpc/platforms/cell/iommu.c2
-rw-r--r--arch/powerpc/platforms/cell/pmu.c2
-rw-r--r--arch/powerpc/platforms/cell/setup.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c141
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c15
-rw-r--r--arch/powerpc/platforms/cell/spufs/lscsa_alloc.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c42
-rw-r--r--arch/powerpc/platforms/celleb/iommu.c2
-rw-r--r--arch/powerpc/platforms/celleb/setup.c2
-rw-r--r--include/asm-powerpc/spu.h5
14 files changed, 170 insertions, 75 deletions
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index bb6bff51ce48..13929771bee7 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -61,7 +61,7 @@ static unsigned int spu_cycle_reset;
61#define NUM_THREADS 2 /* number of physical threads in 61#define NUM_THREADS 2 /* number of physical threads in
62 * physical processor 62 * physical processor
63 */ 63 */
64#define NUM_TRACE_BUS_WORDS 4 64#define NUM_DEBUG_BUS_WORDS 4
65#define NUM_INPUT_BUS_WORDS 2 65#define NUM_INPUT_BUS_WORDS 2
66 66
67#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */ 67#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
@@ -169,7 +169,6 @@ static DEFINE_SPINLOCK(virt_cntr_lock);
169 169
170static u32 ctr_enabled; 170static u32 ctr_enabled;
171 171
172static unsigned char trace_bus[NUM_TRACE_BUS_WORDS];
173static unsigned char input_bus[NUM_INPUT_BUS_WORDS]; 172static unsigned char input_bus[NUM_INPUT_BUS_WORDS];
174 173
175/* 174/*
@@ -298,7 +297,7 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
298 297
299 p->signal_group = event / 100; 298 p->signal_group = event / 100;
300 p->bus_word = bus_word; 299 p->bus_word = bus_word;
301 p->sub_unit = (unit_mask & 0x0000f000) >> 12; 300 p->sub_unit = GET_SUB_UNIT(unit_mask);
302 301
303 pm_regs.pm07_cntrl[ctr] = 0; 302 pm_regs.pm07_cntrl[ctr] = 0;
304 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles); 303 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
@@ -334,16 +333,16 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
334 p->bit = signal_bit; 333 p->bit = signal_bit;
335 } 334 }
336 335
337 for (i = 0; i < NUM_TRACE_BUS_WORDS; i++) { 336 for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
338 if (bus_word & (1 << i)) { 337 if (bus_word & (1 << i)) {
339 pm_regs.debug_bus_control |= 338 pm_regs.debug_bus_control |=
340 (bus_type << (31 - (2 * i) + 1)); 339 (bus_type << (30 - (2 * i)));
341 340
342 for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) { 341 for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
343 if (input_bus[j] == 0xff) { 342 if (input_bus[j] == 0xff) {
344 input_bus[j] = i; 343 input_bus[j] = i;
345 pm_regs.group_control |= 344 pm_regs.group_control |=
346 (i << (31 - i)); 345 (i << (30 - (2 * j)));
347 346
348 break; 347 break;
349 } 348 }
@@ -450,6 +449,12 @@ static void cell_virtual_cntr(unsigned long data)
450 hdw_thread = 1 ^ hdw_thread; 449 hdw_thread = 1 ^ hdw_thread;
451 next_hdw_thread = hdw_thread; 450 next_hdw_thread = hdw_thread;
452 451
452 pm_regs.group_control = 0;
453 pm_regs.debug_bus_control = 0;
454
455 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
456 input_bus[i] = 0xff;
457
453 /* 458 /*
454 * There are some per thread events. Must do the 459 * There are some per thread events. Must do the
455 * set event, for the thread that is being started 460 * set event, for the thread that is being started
@@ -619,9 +624,6 @@ static int cell_reg_setup(struct op_counter_config *ctr,
619 pmc_cntrl[1][i].vcntr = i; 624 pmc_cntrl[1][i].vcntr = i;
620 } 625 }
621 626
622 for (i = 0; i < NUM_TRACE_BUS_WORDS; i++)
623 trace_bus[i] = 0xff;
624
625 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++) 627 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
626 input_bus[i] = 0xff; 628 input_bus[i] = 0xff;
627 629
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
index 13d5a87f13b1..ec7c8f45a215 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -21,8 +21,9 @@
21 */ 21 */
22 22
23#include <linux/cpufreq.h> 23#include <linux/cpufreq.h>
24#include <linux/of_platform.h>
25
24#include <asm/machdep.h> 26#include <asm/machdep.h>
25#include <asm/of_platform.h>
26#include <asm/prom.h> 27#include <asm/prom.h>
27#include <asm/cell-regs.h> 28#include <asm/cell-regs.h>
28#include "cbe_cpufreq.h" 29#include "cbe_cpufreq.h"
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
index 6a2c1b0a9a94..69288f653144 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
@@ -23,7 +23,8 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/timer.h> 25#include <linux/timer.h>
26#include <asm/of_platform.h> 26#include <linux/of_platform.h>
27
27#include <asm/processor.h> 28#include <asm/processor.h>
28#include <asm/prom.h> 29#include <asm/prom.h>
29#include <asm/pmi.h> 30#include <asm/pmi.h>
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index 16a9b07e7b0c..a839c6cf3447 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -9,13 +9,13 @@
9#include <linux/percpu.h> 9#include <linux/percpu.h>
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/of_device.h>
13#include <linux/of_platform.h>
12 14
13#include <asm/io.h> 15#include <asm/io.h>
14#include <asm/pgtable.h> 16#include <asm/pgtable.h>
15#include <asm/prom.h> 17#include <asm/prom.h>
16#include <asm/ptrace.h> 18#include <asm/ptrace.h>
17#include <asm/of_device.h>
18#include <asm/of_platform.h>
19#include <asm/cell-regs.h> 19#include <asm/cell-regs.h>
20 20
21/* 21/*
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index b465494cc24c..39fa2149fd02 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -26,13 +26,13 @@
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/notifier.h> 28#include <linux/notifier.h>
29#include <linux/of_platform.h>
29 30
30#include <asm/prom.h> 31#include <asm/prom.h>
31#include <asm/iommu.h> 32#include <asm/iommu.h>
32#include <asm/machdep.h> 33#include <asm/machdep.h>
33#include <asm/pci-bridge.h> 34#include <asm/pci-bridge.h>
34#include <asm/udbg.h> 35#include <asm/udbg.h>
35#include <asm/of_platform.h>
36#include <asm/lmb.h> 36#include <asm/lmb.h>
37#include <asm/firmware.h> 37#include <asm/firmware.h>
38#include <asm/cell-regs.h> 38#include <asm/cell-regs.h>
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
index 1ed303678887..99d688e88cbe 100644
--- a/arch/powerpc/platforms/cell/pmu.c
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -213,7 +213,7 @@ u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg)
213 break; 213 break;
214 214
215 case pm_interval: 215 case pm_interval:
216 READ_SHADOW_REG(val, pm_interval); 216 READ_MMIO_UPPER32(val, pm_interval);
217 break; 217 break;
218 218
219 case pm_start_stop: 219 case pm_start_stop:
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 6a56b6474f52..7f42b7d0adcb 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -30,6 +30,7 @@
30#include <linux/console.h> 30#include <linux/console.h>
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/memory_hotplug.h> 32#include <linux/memory_hotplug.h>
33#include <linux/of_platform.h>
33 34
34#include <asm/mmu.h> 35#include <asm/mmu.h>
35#include <asm/processor.h> 36#include <asm/processor.h>
@@ -51,7 +52,6 @@
51#include <asm/spu_priv1.h> 52#include <asm/spu_priv1.h>
52#include <asm/udbg.h> 53#include <asm/udbg.h>
53#include <asm/mpic.h> 54#include <asm/mpic.h>
54#include <asm/of_platform.h>
55#include <asm/cell-regs.h> 55#include <asm/cell-regs.h>
56 56
57#include "interrupt.h" 57#include "interrupt.h"
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index c83c3e3f5178..f73263ba9841 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -34,6 +34,7 @@
34#include <linux/linux_logo.h> 34#include <linux/linux_logo.h>
35#include <asm/spu.h> 35#include <asm/spu.h>
36#include <asm/spu_priv1.h> 36#include <asm/spu_priv1.h>
37#include <asm/spu_csa.h>
37#include <asm/xmon.h> 38#include <asm/xmon.h>
38#include <asm/prom.h> 39#include <asm/prom.h>
39 40
@@ -47,6 +48,13 @@ struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
47EXPORT_SYMBOL_GPL(cbe_spu_info); 48EXPORT_SYMBOL_GPL(cbe_spu_info);
48 49
49/* 50/*
51 * The spufs fault-handling code needs to call force_sig_info to raise signals
52 * on DMA errors. Export it here to avoid general kernel-wide access to this
53 * function
54 */
55EXPORT_SYMBOL_GPL(force_sig_info);
56
57/*
50 * Protects cbe_spu_info and spu->number. 58 * Protects cbe_spu_info and spu->number.
51 */ 59 */
52static DEFINE_SPINLOCK(spu_lock); 60static DEFINE_SPINLOCK(spu_lock);
@@ -66,6 +74,10 @@ static LIST_HEAD(spu_full_list);
66static DEFINE_SPINLOCK(spu_full_list_lock); 74static DEFINE_SPINLOCK(spu_full_list_lock);
67static DEFINE_MUTEX(spu_full_list_mutex); 75static DEFINE_MUTEX(spu_full_list_mutex);
68 76
77struct spu_slb {
78 u64 esid, vsid;
79};
80
69void spu_invalidate_slbs(struct spu *spu) 81void spu_invalidate_slbs(struct spu *spu)
70{ 82{
71 struct spu_priv2 __iomem *priv2 = spu->priv2; 83 struct spu_priv2 __iomem *priv2 = spu->priv2;
@@ -114,6 +126,12 @@ void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
114} 126}
115EXPORT_SYMBOL_GPL(spu_associate_mm); 127EXPORT_SYMBOL_GPL(spu_associate_mm);
116 128
129int spu_64k_pages_available(void)
130{
131 return mmu_psize_defs[MMU_PAGE_64K].shift != 0;
132}
133EXPORT_SYMBOL_GPL(spu_64k_pages_available);
134
117static int __spu_trap_invalid_dma(struct spu *spu) 135static int __spu_trap_invalid_dma(struct spu *spu)
118{ 136{
119 pr_debug("%s\n", __FUNCTION__); 137 pr_debug("%s\n", __FUNCTION__);
@@ -143,11 +161,22 @@ static void spu_restart_dma(struct spu *spu)
143 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); 161 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
144} 162}
145 163
146static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) 164static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
147{ 165{
148 struct spu_priv2 __iomem *priv2 = spu->priv2; 166 struct spu_priv2 __iomem *priv2 = spu->priv2;
167
168 pr_debug("%s: adding SLB[%d] 0x%016lx 0x%016lx\n",
169 __func__, slbe, slb->vsid, slb->esid);
170
171 out_be64(&priv2->slb_index_W, slbe);
172 out_be64(&priv2->slb_vsid_RW, slb->vsid);
173 out_be64(&priv2->slb_esid_RW, slb->esid);
174}
175
176static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
177{
149 struct mm_struct *mm = spu->mm; 178 struct mm_struct *mm = spu->mm;
150 u64 esid, vsid, llp; 179 struct spu_slb slb;
151 int psize; 180 int psize;
152 181
153 pr_debug("%s\n", __FUNCTION__); 182 pr_debug("%s\n", __FUNCTION__);
@@ -159,7 +188,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
159 printk("%s: invalid access during switch!\n", __func__); 188 printk("%s: invalid access during switch!\n", __func__);
160 return 1; 189 return 1;
161 } 190 }
162 esid = (ea & ESID_MASK) | SLB_ESID_V; 191 slb.esid = (ea & ESID_MASK) | SLB_ESID_V;
163 192
164 switch(REGION_ID(ea)) { 193 switch(REGION_ID(ea)) {
165 case USER_REGION_ID: 194 case USER_REGION_ID:
@@ -168,21 +197,21 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
168#else 197#else
169 psize = mm->context.user_psize; 198 psize = mm->context.user_psize;
170#endif 199#endif
171 vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | 200 slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M)
172 SLB_VSID_USER; 201 << SLB_VSID_SHIFT) | SLB_VSID_USER;
173 break; 202 break;
174 case VMALLOC_REGION_ID: 203 case VMALLOC_REGION_ID:
175 if (ea < VMALLOC_END) 204 if (ea < VMALLOC_END)
176 psize = mmu_vmalloc_psize; 205 psize = mmu_vmalloc_psize;
177 else 206 else
178 psize = mmu_io_psize; 207 psize = mmu_io_psize;
179 vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | 208 slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
180 SLB_VSID_KERNEL; 209 << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
181 break; 210 break;
182 case KERNEL_REGION_ID: 211 case KERNEL_REGION_ID:
183 psize = mmu_linear_psize; 212 psize = mmu_linear_psize;
184 vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | 213 slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
185 SLB_VSID_KERNEL; 214 << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
186 break; 215 break;
187 default: 216 default:
188 /* Future: support kernel segments so that drivers 217 /* Future: support kernel segments so that drivers
@@ -191,11 +220,9 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
191 pr_debug("invalid region access at %016lx\n", ea); 220 pr_debug("invalid region access at %016lx\n", ea);
192 return 1; 221 return 1;
193 } 222 }
194 llp = mmu_psize_defs[psize].sllp; 223 slb.vsid |= mmu_psize_defs[psize].sllp;
195 224
196 out_be64(&priv2->slb_index_W, spu->slb_replace); 225 spu_load_slb(spu, spu->slb_replace, &slb);
197 out_be64(&priv2->slb_vsid_RW, vsid | llp);
198 out_be64(&priv2->slb_esid_RW, esid);
199 226
200 spu->slb_replace++; 227 spu->slb_replace++;
201 if (spu->slb_replace >= 8) 228 if (spu->slb_replace >= 8)
@@ -232,6 +259,74 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
232 return 0; 259 return 0;
233} 260}
234 261
262static void __spu_kernel_slb(void *addr, struct spu_slb *slb)
263{
264 unsigned long ea = (unsigned long)addr;
265 u64 llp;
266
267 if (REGION_ID(ea) == KERNEL_REGION_ID)
268 llp = mmu_psize_defs[mmu_linear_psize].sllp;
269 else
270 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
271
272 slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
273 SLB_VSID_KERNEL | llp;
274 slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
275}
276
277/**
278 * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
279 * address @new_addr is present.
280 */
281static inline int __slb_present(struct spu_slb *slbs, int nr_slbs,
282 void *new_addr)
283{
284 unsigned long ea = (unsigned long)new_addr;
285 int i;
286
287 for (i = 0; i < nr_slbs; i++)
288 if (!((slbs[i].esid ^ ea) & ESID_MASK))
289 return 1;
290
291 return 0;
292}
293
294/**
295 * Setup the SPU kernel SLBs, in preparation for a context save/restore. We
296 * need to map both the context save area, and the save/restore code.
297 *
298 * Because the lscsa and code may cross segment boundaires, we check to see
299 * if mappings are required for the start and end of each range. We currently
300 * assume that the mappings are smaller that one segment - if not, something
301 * is seriously wrong.
302 */
303void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
304 void *code, int code_size)
305{
306 struct spu_slb slbs[4];
307 int i, nr_slbs = 0;
308 /* start and end addresses of both mappings */
309 void *addrs[] = {
310 lscsa, (void *)lscsa + sizeof(*lscsa) - 1,
311 code, code + code_size - 1
312 };
313
314 /* check the set of addresses, and create a new entry in the slbs array
315 * if there isn't already a SLB for that address */
316 for (i = 0; i < ARRAY_SIZE(addrs); i++) {
317 if (__slb_present(slbs, nr_slbs, addrs[i]))
318 continue;
319
320 __spu_kernel_slb(addrs[i], &slbs[nr_slbs]);
321 nr_slbs++;
322 }
323
324 /* Add the set of SLBs */
325 for (i = 0; i < nr_slbs; i++)
326 spu_load_slb(spu, i, &slbs[i]);
327}
328EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
329
235static irqreturn_t 330static irqreturn_t
236spu_irq_class_0(int irq, void *data) 331spu_irq_class_0(int irq, void *data)
237{ 332{
@@ -479,13 +574,27 @@ EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
479int spu_add_sysdev_attr_group(struct attribute_group *attrs) 574int spu_add_sysdev_attr_group(struct attribute_group *attrs)
480{ 575{
481 struct spu *spu; 576 struct spu *spu;
577 int rc = 0;
482 578
483 mutex_lock(&spu_full_list_mutex); 579 mutex_lock(&spu_full_list_mutex);
484 list_for_each_entry(spu, &spu_full_list, full_list) 580 list_for_each_entry(spu, &spu_full_list, full_list) {
485 sysfs_create_group(&spu->sysdev.kobj, attrs); 581 rc = sysfs_create_group(&spu->sysdev.kobj, attrs);
582
583 /* we're in trouble here, but try unwinding anyway */
584 if (rc) {
585 printk(KERN_ERR "%s: can't create sysfs group '%s'\n",
586 __func__, attrs->name);
587
588 list_for_each_entry_continue_reverse(spu,
589 &spu_full_list, full_list)
590 sysfs_remove_group(&spu->sysdev.kobj, attrs);
591 break;
592 }
593 }
594
486 mutex_unlock(&spu_full_list_mutex); 595 mutex_unlock(&spu_full_list_mutex);
487 596
488 return 0; 597 return rc;
489} 598}
490EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group); 599EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
491 600
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index 1b010707488d..9979197ff409 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -345,7 +345,7 @@ static int __init of_create_spu(struct spu *spu, void *data)
345 } 345 }
346 ret = spu_map_interrupts_old(spu, spe); 346 ret = spu_map_interrupts_old(spu, spe);
347 if (ret) { 347 if (ret) {
348 printk(KERN_ERR "%s: could not map interrupts", 348 printk(KERN_ERR "%s: could not map interrupts\n",
349 spu->name); 349 spu->name);
350 goto out_unmap; 350 goto out_unmap;
351 } 351 }
@@ -411,10 +411,15 @@ static void init_affinity_qs20_harcoded(void)
411 411
412static int of_has_vicinity(void) 412static int of_has_vicinity(void)
413{ 413{
414 struct spu* spu; 414 struct device_node *dn;
415 415
416 spu = list_first_entry(&cbe_spu_info[0].spus, struct spu, cbe_list); 416 for_each_node_by_type(dn, "spe") {
417 return of_find_property(spu_devnode(spu), "vicinity", NULL) != NULL; 417 if (of_find_property(dn, "vicinity", NULL)) {
418 of_node_put(dn);
419 return 1;
420 }
421 }
422 return 0;
418} 423}
419 424
420static struct spu *devnode_spu(int cbe, struct device_node *dn) 425static struct spu *devnode_spu(int cbe, struct device_node *dn)
@@ -525,7 +530,7 @@ static int __init init_affinity(void)
525 if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0")) 530 if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
526 init_affinity_qs20_harcoded(); 531 init_affinity_qs20_harcoded();
527 else 532 else
528 printk("No affinity configuration found"); 533 printk("No affinity configuration found\n");
529 } 534 }
530 535
531 return 0; 536 return 0;
diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
index f4b3c052dabf..d606e575a204 100644
--- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
+++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
@@ -73,7 +73,7 @@ int spu_alloc_lscsa(struct spu_state *csa)
73 int i, j, n_4k; 73 int i, j, n_4k;
74 74
75 /* Check availability of 64K pages */ 75 /* Check availability of 64K pages */
76 if (mmu_psize_defs[MMU_PAGE_64K].shift == 0) 76 if (!spu_64k_pages_available())
77 goto fail; 77 goto fail;
78 78
79 csa->use_big_pages = 1; 79 csa->use_big_pages = 1;
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 3d64c81cc6e2..8cbc6574820f 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -691,35 +691,9 @@ static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
691 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE); 691 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
692} 692}
693 693
694static inline void get_kernel_slb(u64 ea, u64 slb[2]) 694static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu,
695 unsigned int *code, int code_size)
695{ 696{
696 u64 llp;
697
698 if (REGION_ID(ea) == KERNEL_REGION_ID)
699 llp = mmu_psize_defs[mmu_linear_psize].sllp;
700 else
701 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
702 slb[0] = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
703 SLB_VSID_KERNEL | llp;
704 slb[1] = (ea & ESID_MASK) | SLB_ESID_V;
705}
706
707static inline void load_mfc_slb(struct spu *spu, u64 slb[2], int slbe)
708{
709 struct spu_priv2 __iomem *priv2 = spu->priv2;
710
711 out_be64(&priv2->slb_index_W, slbe);
712 eieio();
713 out_be64(&priv2->slb_vsid_RW, slb[0]);
714 out_be64(&priv2->slb_esid_RW, slb[1]);
715 eieio();
716}
717
718static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu)
719{
720 u64 code_slb[2];
721 u64 lscsa_slb[2];
722
723 /* Save, Step 47: 697 /* Save, Step 47:
724 * Restore, Step 30. 698 * Restore, Step 30.
725 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All 699 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All
@@ -735,11 +709,7 @@ static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu)
735 * translation is desired by OS environment). 709 * translation is desired by OS environment).
736 */ 710 */
737 spu_invalidate_slbs(spu); 711 spu_invalidate_slbs(spu);
738 get_kernel_slb((unsigned long)&spu_save_code[0], code_slb); 712 spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size);
739 get_kernel_slb((unsigned long)csa->lscsa, lscsa_slb);
740 load_mfc_slb(spu, code_slb, 0);
741 if ((lscsa_slb[0] != code_slb[0]) || (lscsa_slb[1] != code_slb[1]))
742 load_mfc_slb(spu, lscsa_slb, 1);
743} 713}
744 714
745static inline void set_switch_active(struct spu_state *csa, struct spu *spu) 715static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
@@ -1866,7 +1836,8 @@ static void save_lscsa(struct spu_state *prev, struct spu *spu)
1866 */ 1836 */
1867 1837
1868 resume_mfc_queue(prev, spu); /* Step 46. */ 1838 resume_mfc_queue(prev, spu); /* Step 46. */
1869 setup_mfc_slbs(prev, spu); /* Step 47. */ 1839 /* Step 47. */
1840 setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code));
1870 set_switch_active(prev, spu); /* Step 48. */ 1841 set_switch_active(prev, spu); /* Step 48. */
1871 enable_interrupts(prev, spu); /* Step 49. */ 1842 enable_interrupts(prev, spu); /* Step 49. */
1872 save_ls_16kb(prev, spu); /* Step 50. */ 1843 save_ls_16kb(prev, spu); /* Step 50. */
@@ -1971,7 +1942,8 @@ static void restore_lscsa(struct spu_state *next, struct spu *spu)
1971 setup_spu_status_part1(next, spu); /* Step 27. */ 1942 setup_spu_status_part1(next, spu); /* Step 27. */
1972 setup_spu_status_part2(next, spu); /* Step 28. */ 1943 setup_spu_status_part2(next, spu); /* Step 28. */
1973 restore_mfc_rag(next, spu); /* Step 29. */ 1944 restore_mfc_rag(next, spu); /* Step 29. */
1974 setup_mfc_slbs(next, spu); /* Step 30. */ 1945 /* Step 30. */
1946 setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code));
1975 set_spu_npc(next, spu); /* Step 31. */ 1947 set_spu_npc(next, spu); /* Step 31. */
1976 set_signot1(next, spu); /* Step 32. */ 1948 set_signot1(next, spu); /* Step 32. */
1977 set_signot2(next, spu); /* Step 33. */ 1949 set_signot2(next, spu); /* Step 33. */
diff --git a/arch/powerpc/platforms/celleb/iommu.c b/arch/powerpc/platforms/celleb/iommu.c
index 287450a07c41..61df97f4e1a6 100644
--- a/arch/powerpc/platforms/celleb/iommu.c
+++ b/arch/powerpc/platforms/celleb/iommu.c
@@ -22,8 +22,8 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/of_platform.h>
25 26
26#include <asm/of_platform.h>
27#include <asm/firmware.h> 27#include <asm/firmware.h>
28 28
29#include "beat_wrapper.h" 29#include "beat_wrapper.h"
diff --git a/arch/powerpc/platforms/celleb/setup.c b/arch/powerpc/platforms/celleb/setup.c
index 5a3f73478f4b..8b03a1bdc79f 100644
--- a/arch/powerpc/platforms/celleb/setup.c
+++ b/arch/powerpc/platforms/celleb/setup.c
@@ -40,6 +40,7 @@
40#include <linux/seq_file.h> 40#include <linux/seq_file.h>
41#include <linux/root_dev.h> 41#include <linux/root_dev.h>
42#include <linux/console.h> 42#include <linux/console.h>
43#include <linux/of_platform.h>
43 44
44#include <asm/mmu.h> 45#include <asm/mmu.h>
45#include <asm/processor.h> 46#include <asm/processor.h>
@@ -52,7 +53,6 @@
52#include <asm/time.h> 53#include <asm/time.h>
53#include <asm/spu_priv1.h> 54#include <asm/spu_priv1.h>
54#include <asm/firmware.h> 55#include <asm/firmware.h>
55#include <asm/of_platform.h>
56#include <asm/rtas.h> 56#include <asm/rtas.h>
57#include <asm/cell-regs.h> 57#include <asm/cell-regs.h>
58 58
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index b1accce77bb5..314aad357d98 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -104,6 +104,7 @@
104 104
105struct spu_context; 105struct spu_context;
106struct spu_runqueue; 106struct spu_runqueue;
107struct spu_lscsa;
107struct device_node; 108struct device_node;
108 109
109enum spu_utilization_state { 110enum spu_utilization_state {
@@ -200,6 +201,9 @@ int spu_irq_class_0_bottom(struct spu *spu);
200int spu_irq_class_1_bottom(struct spu *spu); 201int spu_irq_class_1_bottom(struct spu *spu);
201void spu_irq_setaffinity(struct spu *spu, int cpu); 202void spu_irq_setaffinity(struct spu *spu, int cpu);
202 203
204void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
205 void *code, int code_size);
206
203#ifdef CONFIG_KEXEC 207#ifdef CONFIG_KEXEC
204void crash_register_spus(struct list_head *list); 208void crash_register_spus(struct list_head *list);
205#else 209#else
@@ -210,6 +214,7 @@ static inline void crash_register_spus(struct list_head *list)
210 214
211extern void spu_invalidate_slbs(struct spu *spu); 215extern void spu_invalidate_slbs(struct spu *spu);
212extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm); 216extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm);
217int spu_64k_pages_available(void);
213 218
214/* Calls from the memory management to the SPU */ 219/* Calls from the memory management to the SPU */
215struct mm_struct; 220struct mm_struct;