aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/boot/cuboot-bamboo.c1
-rw-r--r--arch/powerpc/boot/cuboot-ebony.c1
-rw-r--r--arch/powerpc/boot/cuboot-katmai.c1
-rw-r--r--arch/powerpc/boot/cuboot-taishan.c2
-rw-r--r--arch/powerpc/boot/cuboot-warp.c1
-rw-r--r--arch/powerpc/boot/dts/haleakala.dts2
-rw-r--r--arch/powerpc/boot/dts/katmai.dts58
-rw-r--r--arch/powerpc/oprofile/op_model_cell.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_common.c1
-rw-r--r--arch/powerpc/platforms/cell/iommu.c151
-rw-r--r--arch/powerpc/platforms/cell/setup.c7
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c16
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c7
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c12
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/sputrace.c7
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c6
-rw-r--r--arch/powerpc/platforms/celleb/beat.h3
18 files changed, 171 insertions, 109 deletions
diff --git a/arch/powerpc/boot/cuboot-bamboo.c b/arch/powerpc/boot/cuboot-bamboo.c
index 900c7ff2b7e9..b5c30f766c40 100644
--- a/arch/powerpc/boot/cuboot-bamboo.c
+++ b/arch/powerpc/boot/cuboot-bamboo.c
@@ -17,6 +17,7 @@
17#include "44x.h" 17#include "44x.h"
18#include "cuboot.h" 18#include "cuboot.h"
19 19
20#define TARGET_4xx
20#define TARGET_44x 21#define TARGET_44x
21#include "ppcboot.h" 22#include "ppcboot.h"
22 23
diff --git a/arch/powerpc/boot/cuboot-ebony.c b/arch/powerpc/boot/cuboot-ebony.c
index c5f37ce172ea..56564ba37f62 100644
--- a/arch/powerpc/boot/cuboot-ebony.c
+++ b/arch/powerpc/boot/cuboot-ebony.c
@@ -17,6 +17,7 @@
17#include "44x.h" 17#include "44x.h"
18#include "cuboot.h" 18#include "cuboot.h"
19 19
20#define TARGET_4xx
20#define TARGET_44x 21#define TARGET_44x
21#include "ppcboot.h" 22#include "ppcboot.h"
22 23
diff --git a/arch/powerpc/boot/cuboot-katmai.c b/arch/powerpc/boot/cuboot-katmai.c
index c021167f9381..5434d70b5660 100644
--- a/arch/powerpc/boot/cuboot-katmai.c
+++ b/arch/powerpc/boot/cuboot-katmai.c
@@ -22,6 +22,7 @@
22#include "44x.h" 22#include "44x.h"
23#include "cuboot.h" 23#include "cuboot.h"
24 24
25#define TARGET_4xx
25#define TARGET_44x 26#define TARGET_44x
26#include "ppcboot.h" 27#include "ppcboot.h"
27 28
diff --git a/arch/powerpc/boot/cuboot-taishan.c b/arch/powerpc/boot/cuboot-taishan.c
index f66455a45ab1..b55b80467eed 100644
--- a/arch/powerpc/boot/cuboot-taishan.c
+++ b/arch/powerpc/boot/cuboot-taishan.c
@@ -21,7 +21,9 @@
21#include "dcr.h" 21#include "dcr.h"
22#include "4xx.h" 22#include "4xx.h"
23 23
24#define TARGET_4xx
24#define TARGET_44x 25#define TARGET_44x
26#define TARGET_440GX
25#include "ppcboot.h" 27#include "ppcboot.h"
26 28
27static bd_t bd; 29static bd_t bd;
diff --git a/arch/powerpc/boot/cuboot-warp.c b/arch/powerpc/boot/cuboot-warp.c
index bdedebe1bc14..3db93e85e9ea 100644
--- a/arch/powerpc/boot/cuboot-warp.c
+++ b/arch/powerpc/boot/cuboot-warp.c
@@ -11,6 +11,7 @@
11#include "4xx.h" 11#include "4xx.h"
12#include "cuboot.h" 12#include "cuboot.h"
13 13
14#define TARGET_4xx
14#define TARGET_44x 15#define TARGET_44x
15#include "ppcboot.h" 16#include "ppcboot.h"
16 17
diff --git a/arch/powerpc/boot/dts/haleakala.dts b/arch/powerpc/boot/dts/haleakala.dts
index 5dd3d15f0feb..ae68fefc01b6 100644
--- a/arch/powerpc/boot/dts/haleakala.dts
+++ b/arch/powerpc/boot/dts/haleakala.dts
@@ -235,7 +235,7 @@
235 #interrupt-cells = <1>; 235 #interrupt-cells = <1>;
236 #size-cells = <2>; 236 #size-cells = <2>;
237 #address-cells = <3>; 237 #address-cells = <3>;
238 compatible = "ibm,plb-pciex-405exr", "ibm,plb-pciex"; 238 compatible = "ibm,plb-pciex-405ex", "ibm,plb-pciex";
239 primary; 239 primary;
240 port = <0>; /* port number */ 240 port = <0>; /* port number */
241 reg = <a0000000 20000000 /* Config space access */ 241 reg = <a0000000 20000000 /* Config space access */
diff --git a/arch/powerpc/boot/dts/katmai.dts b/arch/powerpc/boot/dts/katmai.dts
index bc32ac7250ec..fc86e5a3afc4 100644
--- a/arch/powerpc/boot/dts/katmai.dts
+++ b/arch/powerpc/boot/dts/katmai.dts
@@ -38,8 +38,8 @@
38 timebase-frequency = <0>; /* Filled in by zImage */ 38 timebase-frequency = <0>; /* Filled in by zImage */
39 i-cache-line-size = <20>; 39 i-cache-line-size = <20>;
40 d-cache-line-size = <20>; 40 d-cache-line-size = <20>;
41 i-cache-size = <20000>; 41 i-cache-size = <8000>;
42 d-cache-size = <20000>; 42 d-cache-size = <8000>;
43 dcr-controller; 43 dcr-controller;
44 dcr-access-method = "native"; 44 dcr-access-method = "native";
45 }; 45 };
@@ -136,11 +136,11 @@
136 }; 136 };
137 137
138 POB0: opb { 138 POB0: opb {
139 compatible = "ibm,opb-440spe", "ibm,opb-440gp", "ibm,opb"; 139 compatible = "ibm,opb-440spe", "ibm,opb-440gp", "ibm,opb";
140 #address-cells = <1>; 140 #address-cells = <1>;
141 #size-cells = <1>; 141 #size-cells = <1>;
142 ranges = <00000000 4 e0000000 20000000>; 142 ranges = <00000000 4 e0000000 20000000>;
143 clock-frequency = <0>; /* Filled in by zImage */ 143 clock-frequency = <0>; /* Filled in by zImage */
144 144
145 EBC0: ebc { 145 EBC0: ebc {
146 compatible = "ibm,ebc-440spe", "ibm,ebc-440gp", "ibm,ebc"; 146 compatible = "ibm,ebc-440spe", "ibm,ebc-440gp", "ibm,ebc";
@@ -153,38 +153,38 @@
153 }; 153 };
154 154
155 UART0: serial@10000200 { 155 UART0: serial@10000200 {
156 device_type = "serial"; 156 device_type = "serial";
157 compatible = "ns16550"; 157 compatible = "ns16550";
158 reg = <10000200 8>; 158 reg = <10000200 8>;
159 virtual-reg = <a0000200>; 159 virtual-reg = <a0000200>;
160 clock-frequency = <0>; /* Filled in by zImage */ 160 clock-frequency = <0>; /* Filled in by zImage */
161 current-speed = <1c200>; 161 current-speed = <1c200>;
162 interrupt-parent = <&UIC0>; 162 interrupt-parent = <&UIC0>;
163 interrupts = <0 4>; 163 interrupts = <0 4>;
164 }; 164 };
165 165
166 UART1: serial@10000300 { 166 UART1: serial@10000300 {
167 device_type = "serial"; 167 device_type = "serial";
168 compatible = "ns16550"; 168 compatible = "ns16550";
169 reg = <10000300 8>; 169 reg = <10000300 8>;
170 virtual-reg = <a0000300>; 170 virtual-reg = <a0000300>;
171 clock-frequency = <0>; 171 clock-frequency = <0>;
172 current-speed = <0>; 172 current-speed = <0>;
173 interrupt-parent = <&UIC0>; 173 interrupt-parent = <&UIC0>;
174 interrupts = <1 4>; 174 interrupts = <1 4>;
175 }; 175 };
176 176
177 177
178 UART2: serial@10000600 { 178 UART2: serial@10000600 {
179 device_type = "serial"; 179 device_type = "serial";
180 compatible = "ns16550"; 180 compatible = "ns16550";
181 reg = <10000600 8>; 181 reg = <10000600 8>;
182 virtual-reg = <a0000600>; 182 virtual-reg = <a0000600>;
183 clock-frequency = <0>; 183 clock-frequency = <0>;
184 current-speed = <0>; 184 current-speed = <0>;
185 interrupt-parent = <&UIC1>; 185 interrupt-parent = <&UIC1>;
186 interrupts = <5 4>; 186 interrupts = <5 4>;
187 }; 187 };
188 188
189 IIC0: i2c@10000400 { 189 IIC0: i2c@10000400 {
190 compatible = "ibm,iic-440spe", "ibm,iic-440gp", "ibm,iic"; 190 compatible = "ibm,iic-440spe", "ibm,iic-440gp", "ibm,iic";
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index 13929771bee7..9eed1f68fcab 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -1151,7 +1151,7 @@ static void cell_handle_interrupt(struct pt_regs *regs,
1151 for (i = 0; i < num_counters; ++i) { 1151 for (i = 0; i < num_counters; ++i) {
1152 if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i)) 1152 if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i))
1153 && ctr[i].enabled) { 1153 && ctr[i].enabled) {
1154 oprofile_add_pc(pc, is_kernel, i); 1154 oprofile_add_ext_sample(pc, regs, i, is_kernel);
1155 cbe_write_ctr(cpu, i, reset_value[i]); 1155 cbe_write_ctr(cpu, i, reset_value[i]);
1156 } 1156 }
1157 } 1157 }
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index 9aa4425d80b2..4d5fd1dbd400 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -199,6 +199,7 @@ int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv)
199 199
200 return 0; 200 return 0;
201} 201}
202EXPORT_SYMBOL(mpc52xx_set_psc_clkdiv);
202 203
203/** 204/**
204 * mpc52xx_restart: ppc_md->restart hook for mpc5200 using the watchdog timer 205 * mpc52xx_restart: ppc_md->restart hook for mpc5200 using the watchdog timer
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index edab631a8dcb..20ea0e118f24 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -113,7 +113,7 @@
113 113
114/* IOMMU sizing */ 114/* IOMMU sizing */
115#define IO_SEGMENT_SHIFT 28 115#define IO_SEGMENT_SHIFT 28
116#define IO_PAGENO_BITS (IO_SEGMENT_SHIFT - IOMMU_PAGE_SHIFT) 116#define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift))
117 117
118/* The high bit needs to be set on every DMA address */ 118/* The high bit needs to be set on every DMA address */
119#define SPIDER_DMA_OFFSET 0x80000000ul 119#define SPIDER_DMA_OFFSET 0x80000000ul
@@ -123,7 +123,6 @@ struct iommu_window {
123 struct cbe_iommu *iommu; 123 struct cbe_iommu *iommu;
124 unsigned long offset; 124 unsigned long offset;
125 unsigned long size; 125 unsigned long size;
126 unsigned long pte_offset;
127 unsigned int ioid; 126 unsigned int ioid;
128 struct iommu_table table; 127 struct iommu_table table;
129}; 128};
@@ -200,7 +199,7 @@ static void tce_build_cell(struct iommu_table *tbl, long index, long npages,
200 (window->ioid & IOPTE_IOID_Mask); 199 (window->ioid & IOPTE_IOID_Mask);
201#endif 200#endif
202 201
203 io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset); 202 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
204 203
205 for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE) 204 for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE)
206 io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask); 205 io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask);
@@ -232,7 +231,7 @@ static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
232 | (window->ioid & IOPTE_IOID_Mask); 231 | (window->ioid & IOPTE_IOID_Mask);
233#endif 232#endif
234 233
235 io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset); 234 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
236 235
237 for (i = 0; i < npages; i++) 236 for (i = 0; i < npages; i++)
238 io_pte[i] = pte; 237 io_pte[i] = pte;
@@ -307,76 +306,84 @@ static int cell_iommu_find_ioc(int nid, unsigned long *base)
307 return -ENODEV; 306 return -ENODEV;
308} 307}
309 308
310static void cell_iommu_setup_page_tables(struct cbe_iommu *iommu, 309static void cell_iommu_setup_stab(struct cbe_iommu *iommu,
311 unsigned long dbase, unsigned long dsize, 310 unsigned long dbase, unsigned long dsize,
312 unsigned long fbase, unsigned long fsize) 311 unsigned long fbase, unsigned long fsize)
313{ 312{
314 struct page *page; 313 struct page *page;
315 int i; 314 unsigned long segments, stab_size;
316 unsigned long reg, segments, pages_per_segment, ptab_size, stab_size,
317 n_pte_pages, base;
318
319 base = dbase;
320 if (fsize != 0)
321 base = min(fbase, dbase);
322 315
323 segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT; 316 segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT;
324 pages_per_segment = 1ull << IO_PAGENO_BITS;
325 317
326 pr_debug("%s: iommu[%d]: segments: %lu, pages per segment: %lu\n", 318 pr_debug("%s: iommu[%d]: segments: %lu\n",
327 __FUNCTION__, iommu->nid, segments, pages_per_segment); 319 __FUNCTION__, iommu->nid, segments);
328 320
329 /* set up the segment table */ 321 /* set up the segment table */
330 stab_size = segments * sizeof(unsigned long); 322 stab_size = segments * sizeof(unsigned long);
331 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); 323 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size));
332 BUG_ON(!page); 324 BUG_ON(!page);
333 iommu->stab = page_address(page); 325 iommu->stab = page_address(page);
334 clear_page(iommu->stab); 326 memset(iommu->stab, 0, stab_size);
327}
328
329static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
330 unsigned long base, unsigned long size, unsigned long gap_base,
331 unsigned long gap_size, unsigned long page_shift)
332{
333 struct page *page;
334 int i;
335 unsigned long reg, segments, pages_per_segment, ptab_size,
336 n_pte_pages, start_seg, *ptab;
337
338 start_seg = base >> IO_SEGMENT_SHIFT;
339 segments = size >> IO_SEGMENT_SHIFT;
340 pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift);
341 /* PTEs for each segment must start on a 4K bounday */
342 pages_per_segment = max(pages_per_segment,
343 (1 << 12) / sizeof(unsigned long));
335 344
336 /* ... and the page tables. Since these are contiguous, we can treat
337 * the page tables as one array of ptes, like pSeries does.
338 */
339 ptab_size = segments * pages_per_segment * sizeof(unsigned long); 345 ptab_size = segments * pages_per_segment * sizeof(unsigned long);
340 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__, 346 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__,
341 iommu->nid, ptab_size, get_order(ptab_size)); 347 iommu->nid, ptab_size, get_order(ptab_size));
342 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); 348 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
343 BUG_ON(!page); 349 BUG_ON(!page);
344 350
345 iommu->ptab = page_address(page); 351 ptab = page_address(page);
346 memset(iommu->ptab, 0, ptab_size); 352 memset(ptab, 0, ptab_size);
347 353
348 /* allocate a bogus page for the end of each mapping */ 354 /* number of 4K pages needed for a page table */
349 page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); 355 n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12;
350 BUG_ON(!page);
351 iommu->pad_page = page_address(page);
352 clear_page(iommu->pad_page);
353
354 /* number of pages needed for a page table */
355 n_pte_pages = (pages_per_segment *
356 sizeof(unsigned long)) >> IOMMU_PAGE_SHIFT;
357 356
358 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", 357 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
359 __FUNCTION__, iommu->nid, iommu->stab, iommu->ptab, 358 __FUNCTION__, iommu->nid, iommu->stab, ptab,
360 n_pte_pages); 359 n_pte_pages);
361 360
362 /* initialise the STEs */ 361 /* initialise the STEs */
363 reg = IOSTE_V | ((n_pte_pages - 1) << 5); 362 reg = IOSTE_V | ((n_pte_pages - 1) << 5);
364 363
365 if (IOMMU_PAGE_SIZE == 0x1000) 364 switch (page_shift) {
366 reg |= IOSTE_PS_4K; 365 case 12: reg |= IOSTE_PS_4K; break;
367 else if (IOMMU_PAGE_SIZE == 0x10000) 366 case 16: reg |= IOSTE_PS_64K; break;
368 reg |= IOSTE_PS_64K; 367 case 20: reg |= IOSTE_PS_1M; break;
369 else { 368 case 24: reg |= IOSTE_PS_16M; break;
370 extern void __unknown_page_size_error(void); 369 default: BUG();
371 __unknown_page_size_error();
372 } 370 }
373 371
372 gap_base = gap_base >> IO_SEGMENT_SHIFT;
373 gap_size = gap_size >> IO_SEGMENT_SHIFT;
374
374 pr_debug("Setting up IOMMU stab:\n"); 375 pr_debug("Setting up IOMMU stab:\n");
375 for (i = base >> IO_SEGMENT_SHIFT; i < segments; i++) { 376 for (i = start_seg; i < (start_seg + segments); i++) {
376 iommu->stab[i] = reg | 377 if (i >= gap_base && i < (gap_base + gap_size)) {
377 (__pa(iommu->ptab) + n_pte_pages * IOMMU_PAGE_SIZE * i); 378 pr_debug("\toverlap at %d, skipping\n", i);
379 continue;
380 }
381 iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) *
382 (i - start_seg));
378 pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); 383 pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]);
379 } 384 }
385
386 return ptab;
380} 387}
381 388
382static void cell_iommu_enable_hardware(struct cbe_iommu *iommu) 389static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
@@ -423,7 +430,9 @@ static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
423static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, 430static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
424 unsigned long base, unsigned long size) 431 unsigned long base, unsigned long size)
425{ 432{
426 cell_iommu_setup_page_tables(iommu, base, size, 0, 0); 433 cell_iommu_setup_stab(iommu, base, size, 0, 0);
434 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0,
435 IOMMU_PAGE_SHIFT);
427 cell_iommu_enable_hardware(iommu); 436 cell_iommu_enable_hardware(iommu);
428} 437}
429 438
@@ -464,6 +473,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
464 unsigned long pte_offset) 473 unsigned long pte_offset)
465{ 474{
466 struct iommu_window *window; 475 struct iommu_window *window;
476 struct page *page;
467 u32 ioid; 477 u32 ioid;
468 478
469 ioid = cell_iommu_get_ioid(np); 479 ioid = cell_iommu_get_ioid(np);
@@ -475,13 +485,11 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
475 window->size = size; 485 window->size = size;
476 window->ioid = ioid; 486 window->ioid = ioid;
477 window->iommu = iommu; 487 window->iommu = iommu;
478 window->pte_offset = pte_offset;
479 488
480 window->table.it_blocksize = 16; 489 window->table.it_blocksize = 16;
481 window->table.it_base = (unsigned long)iommu->ptab; 490 window->table.it_base = (unsigned long)iommu->ptab;
482 window->table.it_index = iommu->nid; 491 window->table.it_index = iommu->nid;
483 window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + 492 window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + pte_offset;
484 window->pte_offset;
485 window->table.it_size = size >> IOMMU_PAGE_SHIFT; 493 window->table.it_size = size >> IOMMU_PAGE_SHIFT;
486 494
487 iommu_init_table(&window->table, iommu->nid); 495 iommu_init_table(&window->table, iommu->nid);
@@ -504,6 +512,11 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
504 * This code also assumes that we have a window that starts at 0, 512 * This code also assumes that we have a window that starts at 0,
505 * which is the case on all spider based blades. 513 * which is the case on all spider based blades.
506 */ 514 */
515 page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
516 BUG_ON(!page);
517 iommu->pad_page = page_address(page);
518 clear_page(iommu->pad_page);
519
507 __set_bit(0, window->table.it_map); 520 __set_bit(0, window->table.it_map);
508 tce_build_cell(&window->table, window->table.it_offset, 1, 521 tce_build_cell(&window->table, window->table.it_offset, 1,
509 (unsigned long)iommu->pad_page, DMA_TO_DEVICE); 522 (unsigned long)iommu->pad_page, DMA_TO_DEVICE);
@@ -549,7 +562,7 @@ static void cell_dma_dev_setup_iommu(struct device *dev)
549 archdata->dma_data = &window->table; 562 archdata->dma_data = &window->table;
550} 563}
551 564
552static void cell_dma_dev_setup_static(struct device *dev); 565static void cell_dma_dev_setup_fixed(struct device *dev);
553 566
554static void cell_dma_dev_setup(struct device *dev) 567static void cell_dma_dev_setup(struct device *dev)
555{ 568{
@@ -557,7 +570,7 @@ static void cell_dma_dev_setup(struct device *dev)
557 570
558 /* Order is important here, these are not mutually exclusive */ 571 /* Order is important here, these are not mutually exclusive */
559 if (get_dma_ops(dev) == &dma_iommu_fixed_ops) 572 if (get_dma_ops(dev) == &dma_iommu_fixed_ops)
560 cell_dma_dev_setup_static(dev); 573 cell_dma_dev_setup_fixed(dev);
561 else if (get_pci_dma_ops() == &dma_iommu_ops) 574 else if (get_pci_dma_ops() == &dma_iommu_ops)
562 cell_dma_dev_setup_iommu(dev); 575 cell_dma_dev_setup_iommu(dev);
563 else if (get_pci_dma_ops() == &dma_direct_ops) 576 else if (get_pci_dma_ops() == &dma_direct_ops)
@@ -858,7 +871,7 @@ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask)
858 return 0; 871 return 0;
859} 872}
860 873
861static void cell_dma_dev_setup_static(struct device *dev) 874static void cell_dma_dev_setup_fixed(struct device *dev)
862{ 875{
863 struct dev_archdata *archdata = &dev->archdata; 876 struct dev_archdata *archdata = &dev->archdata;
864 u64 addr; 877 u64 addr;
@@ -869,35 +882,45 @@ static void cell_dma_dev_setup_static(struct device *dev)
869 dev_dbg(dev, "iommu: fixed addr = %lx\n", addr); 882 dev_dbg(dev, "iommu: fixed addr = %lx\n", addr);
870} 883}
871 884
885static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
886 unsigned long base_pte)
887{
888 unsigned long segment, offset;
889
890 segment = addr >> IO_SEGMENT_SHIFT;
891 offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24));
892 ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long));
893
894 pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n",
895 addr, ptab, segment, offset);
896
897 ptab[offset] = base_pte | (__pa(addr) & IOPTE_RPN_Mask);
898}
899
872static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, 900static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
873 struct device_node *np, unsigned long dbase, unsigned long dsize, 901 struct device_node *np, unsigned long dbase, unsigned long dsize,
874 unsigned long fbase, unsigned long fsize) 902 unsigned long fbase, unsigned long fsize)
875{ 903{
876 unsigned long base_pte, uaddr, *io_pte; 904 unsigned long base_pte, uaddr, ioaddr, *ptab;
877 int i;
878 905
879 dma_iommu_fixed_base = fbase; 906 ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24);
880 907
881 /* convert from bytes into page table indices */ 908 dma_iommu_fixed_base = fbase;
882 dbase = dbase >> IOMMU_PAGE_SHIFT;
883 dsize = dsize >> IOMMU_PAGE_SHIFT;
884 fbase = fbase >> IOMMU_PAGE_SHIFT;
885 fsize = fsize >> IOMMU_PAGE_SHIFT;
886 909
887 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); 910 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase);
888 911
889 io_pte = iommu->ptab;
890 base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW 912 base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW
891 | (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask); 913 | (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask);
892 914
893 uaddr = 0; 915 for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) {
894 for (i = fbase; i < fbase + fsize; i++, uaddr += IOMMU_PAGE_SIZE) {
895 /* Don't touch the dynamic region */ 916 /* Don't touch the dynamic region */
896 if (i >= dbase && i < (dbase + dsize)) { 917 ioaddr = uaddr + fbase;
897 pr_debug("iommu: static/dynamic overlap, skipping\n"); 918 if (ioaddr >= dbase && ioaddr < (dbase + dsize)) {
919 pr_debug("iommu: fixed/dynamic overlap, skipping\n");
898 continue; 920 continue;
899 } 921 }
900 io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask); 922
923 insert_16M_pte(uaddr, ptab, base_pte);
901 } 924 }
902 925
903 mb(); 926 mb();
@@ -995,7 +1018,9 @@ static int __init cell_iommu_fixed_mapping_init(void)
995 "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, 1018 "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase,
996 dbase + dsize, fbase, fbase + fsize); 1019 dbase + dsize, fbase, fbase + fsize);
997 1020
998 cell_iommu_setup_page_tables(iommu, dbase, dsize, fbase, fsize); 1021 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
1022 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0,
1023 IOMMU_PAGE_SHIFT);
999 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, 1024 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
1000 fbase, fsize); 1025 fbase, fsize);
1001 cell_iommu_enable_hardware(iommu); 1026 cell_iommu_enable_hardware(iommu);
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index a7f609b3b876..dda34650cb07 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -149,6 +149,11 @@ static void __init cell_init_irq(void)
149 mpic_init_IRQ(); 149 mpic_init_IRQ();
150} 150}
151 151
152static void __init cell_set_dabrx(void)
153{
154 mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER);
155}
156
152static void __init cell_setup_arch(void) 157static void __init cell_setup_arch(void)
153{ 158{
154#ifdef CONFIG_SPU_BASE 159#ifdef CONFIG_SPU_BASE
@@ -158,6 +163,8 @@ static void __init cell_setup_arch(void)
158 163
159 cbe_regs_init(); 164 cbe_regs_init();
160 165
166 cell_set_dabrx();
167
161#ifdef CONFIG_CBE_RAS 168#ifdef CONFIG_CBE_RAS
162 cbe_ras_init(); 169 cbe_ras_init();
163#endif 170#endif
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 87eb07f94c5f..712001f6b7da 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -81,9 +81,12 @@ struct spu_slb {
81void spu_invalidate_slbs(struct spu *spu) 81void spu_invalidate_slbs(struct spu *spu)
82{ 82{
83 struct spu_priv2 __iomem *priv2 = spu->priv2; 83 struct spu_priv2 __iomem *priv2 = spu->priv2;
84 unsigned long flags;
84 85
86 spin_lock_irqsave(&spu->register_lock, flags);
85 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) 87 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
86 out_be64(&priv2->slb_invalidate_all_W, 0UL); 88 out_be64(&priv2->slb_invalidate_all_W, 0UL);
89 spin_unlock_irqrestore(&spu->register_lock, flags);
87} 90}
88EXPORT_SYMBOL_GPL(spu_invalidate_slbs); 91EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
89 92
@@ -148,7 +151,11 @@ static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
148 __func__, slbe, slb->vsid, slb->esid); 151 __func__, slbe, slb->vsid, slb->esid);
149 152
150 out_be64(&priv2->slb_index_W, slbe); 153 out_be64(&priv2->slb_index_W, slbe);
154 /* set invalid before writing vsid */
155 out_be64(&priv2->slb_esid_RW, 0);
156 /* now it's safe to write the vsid */
151 out_be64(&priv2->slb_vsid_RW, slb->vsid); 157 out_be64(&priv2->slb_vsid_RW, slb->vsid);
158 /* setting the new esid makes the entry valid again */
152 out_be64(&priv2->slb_esid_RW, slb->esid); 159 out_be64(&priv2->slb_esid_RW, slb->esid);
153} 160}
154 161
@@ -290,9 +297,11 @@ void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
290 nr_slbs++; 297 nr_slbs++;
291 } 298 }
292 299
300 spin_lock_irq(&spu->register_lock);
293 /* Add the set of SLBs */ 301 /* Add the set of SLBs */
294 for (i = 0; i < nr_slbs; i++) 302 for (i = 0; i < nr_slbs; i++)
295 spu_load_slb(spu, i, &slbs[i]); 303 spu_load_slb(spu, i, &slbs[i]);
304 spin_unlock_irq(&spu->register_lock);
296} 305}
297EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs); 306EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
298 307
@@ -337,13 +346,14 @@ spu_irq_class_1(int irq, void *data)
337 if (stat & CLASS1_STORAGE_FAULT_INTR) 346 if (stat & CLASS1_STORAGE_FAULT_INTR)
338 spu_mfc_dsisr_set(spu, 0ul); 347 spu_mfc_dsisr_set(spu, 0ul);
339 spu_int_stat_clear(spu, 1, stat); 348 spu_int_stat_clear(spu, 1, stat);
340 spin_unlock(&spu->register_lock);
341 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
342 dar, dsisr);
343 349
344 if (stat & CLASS1_SEGMENT_FAULT_INTR) 350 if (stat & CLASS1_SEGMENT_FAULT_INTR)
345 __spu_trap_data_seg(spu, dar); 351 __spu_trap_data_seg(spu, dar);
346 352
353 spin_unlock(&spu->register_lock);
354 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
355 dar, dsisr);
356
347 if (stat & CLASS1_STORAGE_FAULT_INTR) 357 if (stat & CLASS1_STORAGE_FAULT_INTR)
348 __spu_trap_data_map(spu, dar, dsisr); 358 __spu_trap_data_map(spu, dar, dsisr);
349 359
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 133995ed5cc7..cf6c2c89211d 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -109,13 +109,12 @@ void spu_forget(struct spu_context *ctx)
109 109
110 /* 110 /*
111 * This is basically an open-coded spu_acquire_saved, except that 111 * This is basically an open-coded spu_acquire_saved, except that
112 * we don't acquire the state mutex interruptible. 112 * we don't acquire the state mutex interruptible, and we don't
113 * want this context to be rescheduled on release.
113 */ 114 */
114 mutex_lock(&ctx->state_mutex); 115 mutex_lock(&ctx->state_mutex);
115 if (ctx->state != SPU_STATE_SAVED) { 116 if (ctx->state != SPU_STATE_SAVED)
116 set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags);
117 spu_deactivate(ctx); 117 spu_deactivate(ctx);
118 }
119 118
120 mm = ctx->owner; 119 mm = ctx->owner;
121 ctx->owner = NULL; 120 ctx->owner = NULL;
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index c66c3756970d..f7a7e8635fb6 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -367,6 +367,13 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
367 return NOPFN_SIGBUS; 367 return NOPFN_SIGBUS;
368 368
369 /* 369 /*
370 * Because we release the mmap_sem, the context may be destroyed while
371 * we're in spu_wait. Grab an extra reference so it isn't destroyed
372 * in the meantime.
373 */
374 get_spu_context(ctx);
375
376 /*
370 * We have to wait for context to be loaded before we have 377 * We have to wait for context to be loaded before we have
371 * pages to hand out to the user, but we don't want to wait 378 * pages to hand out to the user, but we don't want to wait
372 * with the mmap_sem held. 379 * with the mmap_sem held.
@@ -375,7 +382,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
375 * hanged. 382 * hanged.
376 */ 383 */
377 if (spu_acquire(ctx)) 384 if (spu_acquire(ctx))
378 return NOPFN_REFAULT; 385 goto refault;
379 386
380 if (ctx->state == SPU_STATE_SAVED) { 387 if (ctx->state == SPU_STATE_SAVED) {
381 up_read(&current->mm->mmap_sem); 388 up_read(&current->mm->mmap_sem);
@@ -391,6 +398,9 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
391 398
392 if (!ret) 399 if (!ret)
393 spu_release(ctx); 400 spu_release(ctx);
401
402refault:
403 put_spu_context(ctx);
394 return NOPFN_REFAULT; 404 return NOPFN_REFAULT;
395} 405}
396 406
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 3a5972117de7..5d5f680cd0b8 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -246,7 +246,7 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
246 spu_switch_notify(spu, ctx); 246 spu_switch_notify(spu, ctx);
247 ctx->state = SPU_STATE_RUNNABLE; 247 ctx->state = SPU_STATE_RUNNABLE;
248 248
249 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); 249 spuctx_switch_state(ctx, SPU_UTIL_USER);
250} 250}
251 251
252/* 252/*
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c
index 01974f7776e1..79aa773f3c99 100644
--- a/arch/powerpc/platforms/cell/spufs/sputrace.c
+++ b/arch/powerpc/platforms/cell/spufs/sputrace.c
@@ -58,12 +58,12 @@ static int sputrace_sprint(char *tbuf, int n)
58 ktime_to_timespec(ktime_sub(t->tstamp, sputrace_start)); 58 ktime_to_timespec(ktime_sub(t->tstamp, sputrace_start));
59 59
60 return snprintf(tbuf, n, 60 return snprintf(tbuf, n,
61 "[%lu.%09lu] %d: %s (thread = %d, spu = %d)\n", 61 "[%lu.%09lu] %d: %s (ctxthread = %d, spu = %d)\n",
62 (unsigned long) tv.tv_sec, 62 (unsigned long) tv.tv_sec,
63 (unsigned long) tv.tv_nsec, 63 (unsigned long) tv.tv_nsec,
64 t->owner_tid,
65 t->name,
66 t->curr_tid, 64 t->curr_tid,
65 t->name,
66 t->owner_tid,
67 t->number); 67 t->number);
68} 68}
69 69
@@ -188,6 +188,7 @@ struct spu_probe spu_probes[] = {
188 { "spufs_ps_nopfn__insert", "%p %p", spu_context_event }, 188 { "spufs_ps_nopfn__insert", "%p %p", spu_context_event },
189 { "spu_acquire_saved__enter", "%p", spu_context_nospu_event }, 189 { "spu_acquire_saved__enter", "%p", spu_context_nospu_event },
190 { "destroy_spu_context__enter", "%p", spu_context_nospu_event }, 190 { "destroy_spu_context__enter", "%p", spu_context_nospu_event },
191 { "spufs_stop_callback__enter", "%p %p", spu_context_event },
191}; 192};
192 193
193static int __init sputrace_init(void) 194static int __init sputrace_init(void)
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 6f5886c7b1f9..e9dc7a55d1b9 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -34,6 +34,7 @@
34 34
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/errno.h> 36#include <linux/errno.h>
37#include <linux/hardirq.h>
37#include <linux/sched.h> 38#include <linux/sched.h>
38#include <linux/kernel.h> 39#include <linux/kernel.h>
39#include <linux/mm.h> 40#include <linux/mm.h>
@@ -117,6 +118,8 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
117 * Write INT_MASK_class1 with value of 0. 118 * Write INT_MASK_class1 with value of 0.
118 * Save INT_Mask_class2 in CSA. 119 * Save INT_Mask_class2 in CSA.
119 * Write INT_MASK_class2 with value of 0. 120 * Write INT_MASK_class2 with value of 0.
121 * Synchronize all three interrupts to be sure
122 * we no longer execute a handler on another CPU.
120 */ 123 */
121 spin_lock_irq(&spu->register_lock); 124 spin_lock_irq(&spu->register_lock);
122 if (csa) { 125 if (csa) {
@@ -129,6 +132,9 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
129 spu_int_mask_set(spu, 2, 0ul); 132 spu_int_mask_set(spu, 2, 0ul);
130 eieio(); 133 eieio();
131 spin_unlock_irq(&spu->register_lock); 134 spin_unlock_irq(&spu->register_lock);
135 synchronize_irq(spu->irqs[0]);
136 synchronize_irq(spu->irqs[1]);
137 synchronize_irq(spu->irqs[2]);
132} 138}
133 139
134static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu) 140static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)
diff --git a/arch/powerpc/platforms/celleb/beat.h b/arch/powerpc/platforms/celleb/beat.h
index b2e292df13ca..ac82ac35b991 100644
--- a/arch/powerpc/platforms/celleb/beat.h
+++ b/arch/powerpc/platforms/celleb/beat.h
@@ -21,9 +21,6 @@
21#ifndef _CELLEB_BEAT_H 21#ifndef _CELLEB_BEAT_H
22#define _CELLEB_BEAT_H 22#define _CELLEB_BEAT_H
23 23
24#define DABRX_KERNEL (1UL<<1)
25#define DABRX_USER (1UL<<0)
26
27int64_t beat_get_term_char(uint64_t,uint64_t*,uint64_t*,uint64_t*); 24int64_t beat_get_term_char(uint64_t,uint64_t*,uint64_t*,uint64_t*);
28int64_t beat_put_term_char(uint64_t,uint64_t,uint64_t,uint64_t); 25int64_t beat_put_term_char(uint64_t,uint64_t,uint64_t,uint64_t);
29int64_t beat_repository_encode(int, const char *, uint64_t[4]); 26int64_t beat_repository_encode(int, const char *, uint64_t[4]);