aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/dma_64.c8
-rw-r--r--arch/powerpc/kernel/iommu.c93
-rw-r--r--arch/powerpc/kernel/legacy_serial.c3
-rw-r--r--arch/powerpc/kernel/vio.c2
-rw-r--r--arch/powerpc/mm/mem.c7
-rw-r--r--arch/powerpc/mm/pgtable_32.c6
-rw-r--r--arch/powerpc/platforms/82xx/mpc8272_ads.c3
-rw-r--r--arch/powerpc/platforms/82xx/pq2fads.c3
-rw-r--r--arch/powerpc/platforms/cell/Kconfig7
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c99
-rw-r--r--arch/powerpc/platforms/cell/setup.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c29
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c7
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c28
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h5
-rw-r--r--arch/powerpc/platforms/cell/spufs/sputrace.c250
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c4
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c1
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c10
23 files changed, 434 insertions, 147 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b94d4502a477..cf030b004415 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -256,6 +256,9 @@ config IOMMU_VMERGE
256 256
257 Most drivers don't have this problem; it is safe to say Y here. 257 Most drivers don't have this problem; it is safe to say Y here.
258 258
259config IOMMU_HELPER
260 def_bool PPC64
261
259config HOTPLUG_CPU 262config HOTPLUG_CPU
260 bool "Support for enabling/disabling CPUs" 263 bool "Support for enabling/disabling CPUs"
261 depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC) 264 depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index ed083feaf6f9..e6e49289f788 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -22,6 +22,7 @@
22#include <linux/mman.h> 22#include <linux/mman.h>
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/suspend.h> 24#include <linux/suspend.h>
25#include <linux/hrtimer.h>
25#ifdef CONFIG_PPC64 26#ifdef CONFIG_PPC64
26#include <linux/time.h> 27#include <linux/time.h>
27#include <linux/hardirq.h> 28#include <linux/hardirq.h>
@@ -312,7 +313,7 @@ int main(void)
312 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); 313 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
313 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); 314 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
314 DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); 315 DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
315 DEFINE(CLOCK_REALTIME_RES, TICK_NSEC); 316 DEFINE(CLOCK_REALTIME_RES, (KTIME_MONOTONIC_RES).tv64);
316 317
317#ifdef CONFIG_BUG 318#ifdef CONFIG_BUG
318 DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry)); 319 DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
index 84239076a5b8..3a317cb0636a 100644
--- a/arch/powerpc/kernel/dma_64.c
+++ b/arch/powerpc/kernel/dma_64.c
@@ -31,8 +31,8 @@ static inline unsigned long device_to_mask(struct device *dev)
31static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, 31static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
32 dma_addr_t *dma_handle, gfp_t flag) 32 dma_addr_t *dma_handle, gfp_t flag)
33{ 33{
34 return iommu_alloc_coherent(dev->archdata.dma_data, size, dma_handle, 34 return iommu_alloc_coherent(dev, dev->archdata.dma_data, size,
35 device_to_mask(dev), flag, 35 dma_handle, device_to_mask(dev), flag,
36 dev->archdata.numa_node); 36 dev->archdata.numa_node);
37} 37}
38 38
@@ -52,7 +52,7 @@ static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr,
52 size_t size, 52 size_t size,
53 enum dma_data_direction direction) 53 enum dma_data_direction direction)
54{ 54{
55 return iommu_map_single(dev->archdata.dma_data, vaddr, size, 55 return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size,
56 device_to_mask(dev), direction); 56 device_to_mask(dev), direction);
57} 57}
58 58
@@ -68,7 +68,7 @@ static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle,
68static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, 68static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
69 int nelems, enum dma_data_direction direction) 69 int nelems, enum dma_data_direction direction)
70{ 70{
71 return iommu_map_sg(dev->archdata.dma_data, sglist, nelems, 71 return iommu_map_sg(dev, sglist, nelems,
72 device_to_mask(dev), direction); 72 device_to_mask(dev), direction);
73} 73}
74 74
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index a3c406aca664..8f1f4e539c4b 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -31,6 +31,7 @@
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
33#include <linux/bitops.h> 33#include <linux/bitops.h>
34#include <linux/iommu-helper.h>
34#include <asm/io.h> 35#include <asm/io.h>
35#include <asm/prom.h> 36#include <asm/prom.h>
36#include <asm/iommu.h> 37#include <asm/iommu.h>
@@ -81,17 +82,19 @@ static int __init setup_iommu(char *str)
81__setup("protect4gb=", setup_protect4gb); 82__setup("protect4gb=", setup_protect4gb);
82__setup("iommu=", setup_iommu); 83__setup("iommu=", setup_iommu);
83 84
84static unsigned long iommu_range_alloc(struct iommu_table *tbl, 85static unsigned long iommu_range_alloc(struct device *dev,
86 struct iommu_table *tbl,
85 unsigned long npages, 87 unsigned long npages,
86 unsigned long *handle, 88 unsigned long *handle,
87 unsigned long mask, 89 unsigned long mask,
88 unsigned int align_order) 90 unsigned int align_order)
89{ 91{
90 unsigned long n, end, i, start; 92 unsigned long n, end, start;
91 unsigned long limit; 93 unsigned long limit;
92 int largealloc = npages > 15; 94 int largealloc = npages > 15;
93 int pass = 0; 95 int pass = 0;
94 unsigned long align_mask; 96 unsigned long align_mask;
97 unsigned long boundary_size;
95 98
96 align_mask = 0xffffffffffffffffl >> (64 - align_order); 99 align_mask = 0xffffffffffffffffl >> (64 - align_order);
97 100
@@ -136,14 +139,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
136 start &= mask; 139 start &= mask;
137 } 140 }
138 141
139 n = find_next_zero_bit(tbl->it_map, limit, start); 142 if (dev)
140 143 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
141 /* Align allocation */ 144 1 << IOMMU_PAGE_SHIFT);
142 n = (n + align_mask) & ~align_mask; 145 else
143 146 boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
144 end = n + npages; 147 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
145 148
146 if (unlikely(end >= limit)) { 149 n = iommu_area_alloc(tbl->it_map, limit, start, npages,
150 tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
151 align_mask);
152 if (n == -1) {
147 if (likely(pass < 2)) { 153 if (likely(pass < 2)) {
148 /* First failure, just rescan the half of the table. 154 /* First failure, just rescan the half of the table.
149 * Second failure, rescan the other half of the table. 155 * Second failure, rescan the other half of the table.
@@ -158,14 +164,7 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
158 } 164 }
159 } 165 }
160 166
161 for (i = n; i < end; i++) 167 end = n + npages;
162 if (test_bit(i, tbl->it_map)) {
163 start = i+1;
164 goto again;
165 }
166
167 for (i = n; i < end; i++)
168 __set_bit(i, tbl->it_map);
169 168
170 /* Bump the hint to a new block for small allocs. */ 169 /* Bump the hint to a new block for small allocs. */
171 if (largealloc) { 170 if (largealloc) {
@@ -184,16 +183,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
184 return n; 183 return n;
185} 184}
186 185
187static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page, 186static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
188 unsigned int npages, enum dma_data_direction direction, 187 void *page, unsigned int npages,
189 unsigned long mask, unsigned int align_order) 188 enum dma_data_direction direction,
189 unsigned long mask, unsigned int align_order)
190{ 190{
191 unsigned long entry, flags; 191 unsigned long entry, flags;
192 dma_addr_t ret = DMA_ERROR_CODE; 192 dma_addr_t ret = DMA_ERROR_CODE;
193 193
194 spin_lock_irqsave(&(tbl->it_lock), flags); 194 spin_lock_irqsave(&(tbl->it_lock), flags);
195 195
196 entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order); 196 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
197 197
198 if (unlikely(entry == DMA_ERROR_CODE)) { 198 if (unlikely(entry == DMA_ERROR_CODE)) {
199 spin_unlock_irqrestore(&(tbl->it_lock), flags); 199 spin_unlock_irqrestore(&(tbl->it_lock), flags);
@@ -224,7 +224,6 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
224 unsigned int npages) 224 unsigned int npages)
225{ 225{
226 unsigned long entry, free_entry; 226 unsigned long entry, free_entry;
227 unsigned long i;
228 227
229 entry = dma_addr >> IOMMU_PAGE_SHIFT; 228 entry = dma_addr >> IOMMU_PAGE_SHIFT;
230 free_entry = entry - tbl->it_offset; 229 free_entry = entry - tbl->it_offset;
@@ -246,9 +245,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
246 } 245 }
247 246
248 ppc_md.tce_free(tbl, entry, npages); 247 ppc_md.tce_free(tbl, entry, npages);
249 248 iommu_area_free(tbl->it_map, free_entry, npages);
250 for (i = 0; i < npages; i++)
251 __clear_bit(free_entry+i, tbl->it_map);
252} 249}
253 250
254static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 251static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
@@ -270,16 +267,18 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
270 spin_unlock_irqrestore(&(tbl->it_lock), flags); 267 spin_unlock_irqrestore(&(tbl->it_lock), flags);
271} 268}
272 269
273int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, 270int iommu_map_sg(struct device *dev, struct scatterlist *sglist,
274 int nelems, unsigned long mask, 271 int nelems, unsigned long mask,
275 enum dma_data_direction direction) 272 enum dma_data_direction direction)
276{ 273{
274 struct iommu_table *tbl = dev->archdata.dma_data;
277 dma_addr_t dma_next = 0, dma_addr; 275 dma_addr_t dma_next = 0, dma_addr;
278 unsigned long flags; 276 unsigned long flags;
279 struct scatterlist *s, *outs, *segstart; 277 struct scatterlist *s, *outs, *segstart;
280 int outcount, incount, i; 278 int outcount, incount, i;
281 unsigned int align; 279 unsigned int align;
282 unsigned long handle; 280 unsigned long handle;
281 unsigned int max_seg_size;
283 282
284 BUG_ON(direction == DMA_NONE); 283 BUG_ON(direction == DMA_NONE);
285 284
@@ -298,6 +297,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
298 297
299 spin_lock_irqsave(&(tbl->it_lock), flags); 298 spin_lock_irqsave(&(tbl->it_lock), flags);
300 299
300 max_seg_size = dma_get_max_seg_size(dev);
301 for_each_sg(sglist, s, nelems, i) { 301 for_each_sg(sglist, s, nelems, i) {
302 unsigned long vaddr, npages, entry, slen; 302 unsigned long vaddr, npages, entry, slen;
303 303
@@ -314,7 +314,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
314 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && 314 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
315 (vaddr & ~PAGE_MASK) == 0) 315 (vaddr & ~PAGE_MASK) == 0)
316 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; 316 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
317 entry = iommu_range_alloc(tbl, npages, &handle, 317 entry = iommu_range_alloc(dev, tbl, npages, &handle,
318 mask >> IOMMU_PAGE_SHIFT, align); 318 mask >> IOMMU_PAGE_SHIFT, align);
319 319
320 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); 320 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
@@ -344,7 +344,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
344 /* We cannot merge if: 344 /* We cannot merge if:
345 * - allocated dma_addr isn't contiguous to previous allocation 345 * - allocated dma_addr isn't contiguous to previous allocation
346 */ 346 */
347 if (novmerge || (dma_addr != dma_next)) { 347 if (novmerge || (dma_addr != dma_next) ||
348 (outs->dma_length + s->length > max_seg_size)) {
348 /* Can't merge: create a new segment */ 349 /* Can't merge: create a new segment */
349 segstart = s; 350 segstart = s;
350 outcount++; 351 outcount++;
@@ -452,9 +453,6 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
452struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) 453struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
453{ 454{
454 unsigned long sz; 455 unsigned long sz;
455 unsigned long start_index, end_index;
456 unsigned long entries_per_4g;
457 unsigned long index;
458 static int welcomed = 0; 456 static int welcomed = 0;
459 struct page *page; 457 struct page *page;
460 458
@@ -476,6 +474,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
476 474
477#ifdef CONFIG_CRASH_DUMP 475#ifdef CONFIG_CRASH_DUMP
478 if (ppc_md.tce_get) { 476 if (ppc_md.tce_get) {
477 unsigned long index;
479 unsigned long tceval; 478 unsigned long tceval;
480 unsigned long tcecount = 0; 479 unsigned long tcecount = 0;
481 480
@@ -506,23 +505,6 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
506 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); 505 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
507#endif 506#endif
508 507
509 /*
510 * DMA cannot cross 4 GB boundary. Mark last entry of each 4
511 * GB chunk as reserved.
512 */
513 if (protect4gb) {
514 entries_per_4g = 0x100000000l >> IOMMU_PAGE_SHIFT;
515
516 /* Mark the last bit before a 4GB boundary as used */
517 start_index = tbl->it_offset | (entries_per_4g - 1);
518 start_index -= tbl->it_offset;
519
520 end_index = tbl->it_size;
521
522 for (index = start_index; index < end_index - 1; index += entries_per_4g)
523 __set_bit(index, tbl->it_map);
524 }
525
526 if (!welcomed) { 508 if (!welcomed) {
527 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", 509 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
528 novmerge ? "disabled" : "enabled"); 510 novmerge ? "disabled" : "enabled");
@@ -570,9 +552,9 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
570 * need not be page aligned, the dma_addr_t returned will point to the same 552 * need not be page aligned, the dma_addr_t returned will point to the same
571 * byte within the page as vaddr. 553 * byte within the page as vaddr.
572 */ 554 */
573dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, 555dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
574 size_t size, unsigned long mask, 556 void *vaddr, size_t size, unsigned long mask,
575 enum dma_data_direction direction) 557 enum dma_data_direction direction)
576{ 558{
577 dma_addr_t dma_handle = DMA_ERROR_CODE; 559 dma_addr_t dma_handle = DMA_ERROR_CODE;
578 unsigned long uaddr; 560 unsigned long uaddr;
@@ -589,7 +571,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
589 ((unsigned long)vaddr & ~PAGE_MASK) == 0) 571 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
590 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; 572 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
591 573
592 dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 574 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
593 mask >> IOMMU_PAGE_SHIFT, align); 575 mask >> IOMMU_PAGE_SHIFT, align);
594 if (dma_handle == DMA_ERROR_CODE) { 576 if (dma_handle == DMA_ERROR_CODE) {
595 if (printk_ratelimit()) { 577 if (printk_ratelimit()) {
@@ -621,8 +603,9 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
621 * Returns the virtual address of the buffer and sets dma_handle 603 * Returns the virtual address of the buffer and sets dma_handle
622 * to the dma address (mapping) of the first page. 604 * to the dma address (mapping) of the first page.
623 */ 605 */
624void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, 606void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
625 dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node) 607 size_t size, dma_addr_t *dma_handle,
608 unsigned long mask, gfp_t flag, int node)
626{ 609{
627 void *ret = NULL; 610 void *ret = NULL;
628 dma_addr_t mapping; 611 dma_addr_t mapping;
@@ -656,7 +639,7 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
656 /* Set up tces to cover the allocated range */ 639 /* Set up tces to cover the allocated range */
657 nio_pages = size >> IOMMU_PAGE_SHIFT; 640 nio_pages = size >> IOMMU_PAGE_SHIFT;
658 io_order = get_iommu_order(size); 641 io_order = get_iommu_order(size);
659 mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL, 642 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
660 mask >> IOMMU_PAGE_SHIFT, io_order); 643 mask >> IOMMU_PAGE_SHIFT, io_order);
661 if (mapping == DMA_ERROR_CODE) { 644 if (mapping == DMA_ERROR_CODE) {
662 free_pages((unsigned long)ret, order); 645 free_pages((unsigned long)ret, order);
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 76b862bd1fe9..61dd17449ddc 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -36,7 +36,8 @@ static struct legacy_serial_info {
36static struct __initdata of_device_id parents[] = { 36static struct __initdata of_device_id parents[] = {
37 {.type = "soc",}, 37 {.type = "soc",},
38 {.type = "tsi-bridge",}, 38 {.type = "tsi-bridge",},
39 {.type = "opb", .compatible = "ibm,opb",}, 39 {.type = "opb", },
40 {.compatible = "ibm,opb",},
40 {.compatible = "simple-bus",}, 41 {.compatible = "simple-bus",},
41 {.compatible = "wrs,epld-localbus",}, 42 {.compatible = "wrs,epld-localbus",},
42}; 43};
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index f0bad7070fb5..f98867252ee2 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -176,7 +176,7 @@ static void __devinit vio_dev_release(struct device *dev)
176 * Returns a pointer to the created vio_dev or NULL if node has 176 * Returns a pointer to the created vio_dev or NULL if node has
177 * NULL device_type or compatible fields. 177 * NULL device_type or compatible fields.
178 */ 178 */
179struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node) 179struct vio_dev *vio_register_device_node(struct device_node *of_node)
180{ 180{
181 struct vio_dev *viodev; 181 struct vio_dev *viodev;
182 const unsigned int *unit_address; 182 const unsigned int *unit_address;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index e8122447f019..c7d7bd43a251 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -483,7 +483,12 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
483 */ 483 */
484 _tlbie(address, 0 /* 8xx doesn't care about PID */); 484 _tlbie(address, 0 /* 8xx doesn't care about PID */);
485#endif 485#endif
486 if (!PageReserved(page) 486 /* The _PAGE_USER test should really be _PAGE_EXEC, but
487 * older glibc versions execute some code from no-exec
488 * pages, which for now we are supporting. If exec-only
489 * pages are ever implemented, this will have to change.
490 */
491 if (!PageReserved(page) && (pte_val(pte) & _PAGE_USER)
487 && !test_bit(PG_arch_1, &page->flags)) { 492 && !test_bit(PG_arch_1, &page->flags)) {
488 if (vma->vm_mm == current->active_mm) { 493 if (vma->vm_mm == current->active_mm) {
489 __flush_dcache_icache((void *) address); 494 __flush_dcache_icache((void *) address);
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 64488723162a..f80f90c4d58b 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -86,7 +86,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
86 return ret; 86 return ret;
87} 87}
88 88
89void pgd_free(pgd_t *pgd) 89void pgd_free(struct mm_struct *mm, pgd_t *pgd)
90{ 90{
91 free_pages((unsigned long)pgd, PGDIR_ORDER); 91 free_pages((unsigned long)pgd, PGDIR_ORDER);
92} 92}
@@ -123,7 +123,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
123 return ptepage; 123 return ptepage;
124} 124}
125 125
126void pte_free_kernel(pte_t *pte) 126void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
127{ 127{
128#ifdef CONFIG_SMP 128#ifdef CONFIG_SMP
129 hash_page_sync(); 129 hash_page_sync();
@@ -131,7 +131,7 @@ void pte_free_kernel(pte_t *pte)
131 free_page((unsigned long)pte); 131 free_page((unsigned long)pte);
132} 132}
133 133
134void pte_free(struct page *ptepage) 134void pte_free(struct mm_struct *mm, struct page *ptepage)
135{ 135{
136#ifdef CONFIG_SMP 136#ifdef CONFIG_SMP
137 hash_page_sync(); 137 hash_page_sync();
diff --git a/arch/powerpc/platforms/82xx/mpc8272_ads.c b/arch/powerpc/platforms/82xx/mpc8272_ads.c
index 3fce6b375dbc..7d3018751988 100644
--- a/arch/powerpc/platforms/82xx/mpc8272_ads.c
+++ b/arch/powerpc/platforms/82xx/mpc8272_ads.c
@@ -134,13 +134,12 @@ static void __init mpc8272_ads_setup_arch(void)
134 } 134 }
135 135
136 bcsr = of_iomap(np, 0); 136 bcsr = of_iomap(np, 0);
137 of_node_put(np);
137 if (!bcsr) { 138 if (!bcsr) {
138 printk(KERN_ERR "Cannot map BCSR registers\n"); 139 printk(KERN_ERR "Cannot map BCSR registers\n");
139 return; 140 return;
140 } 141 }
141 142
142 of_node_put(np);
143
144 clrbits32(&bcsr[1], BCSR1_RS232_EN1 | BCSR1_RS232_EN2 | BCSR1_FETHIEN); 143 clrbits32(&bcsr[1], BCSR1_RS232_EN1 | BCSR1_RS232_EN2 | BCSR1_FETHIEN);
145 setbits32(&bcsr[1], BCSR1_FETH_RST); 144 setbits32(&bcsr[1], BCSR1_FETH_RST);
146 145
diff --git a/arch/powerpc/platforms/82xx/pq2fads.c b/arch/powerpc/platforms/82xx/pq2fads.c
index 68196e349994..e1dceeec4994 100644
--- a/arch/powerpc/platforms/82xx/pq2fads.c
+++ b/arch/powerpc/platforms/82xx/pq2fads.c
@@ -130,13 +130,12 @@ static void __init pq2fads_setup_arch(void)
130 } 130 }
131 131
132 bcsr = of_iomap(np, 0); 132 bcsr = of_iomap(np, 0);
133 of_node_put(np);
133 if (!bcsr) { 134 if (!bcsr) {
134 printk(KERN_ERR "Cannot map BCSR registers\n"); 135 printk(KERN_ERR "Cannot map BCSR registers\n");
135 return; 136 return;
136 } 137 }
137 138
138 of_node_put(np);
139
140 /* Enable the serial and ethernet ports */ 139 /* Enable the serial and ethernet ports */
141 140
142 clrbits32(&bcsr[1], BCSR1_RS232_EN1 | BCSR1_RS232_EN2 | BCSR1_FETHIEN); 141 clrbits32(&bcsr[1], BCSR1_RS232_EN1 | BCSR1_RS232_EN2 | BCSR1_FETHIEN);
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 3a963b4a9be0..2f169991896d 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -54,6 +54,13 @@ config SPU_FS_64K_LS
54 uses 4K pages. This can improve performances of applications 54 uses 4K pages. This can improve performances of applications
55 using multiple SPEs by lowering the TLB pressure on them. 55 using multiple SPEs by lowering the TLB pressure on them.
56 56
57config SPU_TRACE
58 tristate "SPU event tracing support"
59 depends on SPU_FS && MARKERS
60 help
61 This option allows reading a trace of spu-related events through
62 the sputrace file in procfs.
63
57config SPU_BASE 64config SPU_BASE
58 bool 65 bool
59 default n 66 default n
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 095988f13bf4..d95e71dee91f 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -13,7 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/msi.h> 15#include <linux/msi.h>
16#include <linux/reboot.h> 16#include <linux/of_platform.h>
17 17
18#include <asm/dcr.h> 18#include <asm/dcr.h>
19#include <asm/machdep.h> 19#include <asm/machdep.h>
@@ -65,14 +65,12 @@
65 65
66struct axon_msic { 66struct axon_msic {
67 struct irq_host *irq_host; 67 struct irq_host *irq_host;
68 __le32 *fifo; 68 __le32 *fifo_virt;
69 dma_addr_t fifo_phys;
69 dcr_host_t dcr_host; 70 dcr_host_t dcr_host;
70 struct list_head list;
71 u32 read_offset; 71 u32 read_offset;
72}; 72};
73 73
74static LIST_HEAD(axon_msic_list);
75
76static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) 74static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
77{ 75{
78 pr_debug("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n); 76 pr_debug("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
@@ -94,7 +92,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
94 92
95 while (msic->read_offset != write_offset) { 93 while (msic->read_offset != write_offset) {
96 idx = msic->read_offset / sizeof(__le32); 94 idx = msic->read_offset / sizeof(__le32);
97 msi = le32_to_cpu(msic->fifo[idx]); 95 msi = le32_to_cpu(msic->fifo_virt[idx]);
98 msi &= 0xFFFF; 96 msi &= 0xFFFF;
99 97
100 pr_debug("axon_msi: woff %x roff %x msi %x\n", 98 pr_debug("axon_msi: woff %x roff %x msi %x\n",
@@ -139,6 +137,7 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev)
139 137
140 tmp = dn; 138 tmp = dn;
141 dn = of_find_node_by_phandle(*ph); 139 dn = of_find_node_by_phandle(*ph);
140 of_node_put(tmp);
142 if (!dn) { 141 if (!dn) {
143 dev_dbg(&dev->dev, 142 dev_dbg(&dev->dev,
144 "axon_msi: msi-translator doesn't point to a node\n"); 143 "axon_msi: msi-translator doesn't point to a node\n");
@@ -156,7 +155,6 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev)
156 155
157out_error: 156out_error:
158 of_node_put(dn); 157 of_node_put(dn);
159 of_node_put(tmp);
160 158
161 return msic; 159 return msic;
162} 160}
@@ -292,30 +290,24 @@ static struct irq_host_ops msic_host_ops = {
292 .map = msic_host_map, 290 .map = msic_host_map,
293}; 291};
294 292
295static int axon_msi_notify_reboot(struct notifier_block *nb, 293static int axon_msi_shutdown(struct of_device *device)
296 unsigned long code, void *data)
297{ 294{
298 struct axon_msic *msic; 295 struct axon_msic *msic = device->dev.platform_data;
299 u32 tmp; 296 u32 tmp;
300 297
301 list_for_each_entry(msic, &axon_msic_list, list) { 298 pr_debug("axon_msi: disabling %s\n",
302 pr_debug("axon_msi: disabling %s\n", 299 msic->irq_host->of_node->full_name);
303 msic->irq_host->of_node->full_name); 300 tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
304 tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); 301 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
305 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; 302 msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
306 msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
307 }
308 303
309 return 0; 304 return 0;
310} 305}
311 306
312static struct notifier_block axon_msi_reboot_notifier = { 307static int axon_msi_probe(struct of_device *device,
313 .notifier_call = axon_msi_notify_reboot 308 const struct of_device_id *device_id)
314};
315
316static int axon_msi_setup_one(struct device_node *dn)
317{ 309{
318 struct page *page; 310 struct device_node *dn = device->node;
319 struct axon_msic *msic; 311 struct axon_msic *msic;
320 unsigned int virq; 312 unsigned int virq;
321 int dcr_base, dcr_len; 313 int dcr_base, dcr_len;
@@ -346,16 +338,14 @@ static int axon_msi_setup_one(struct device_node *dn)
346 goto out_free_msic; 338 goto out_free_msic;
347 } 339 }
348 340
349 page = alloc_pages_node(of_node_to_nid(dn), GFP_KERNEL, 341 msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
350 get_order(MSIC_FIFO_SIZE_BYTES)); 342 &msic->fifo_phys, GFP_KERNEL);
351 if (!page) { 343 if (!msic->fifo_virt) {
352 printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n", 344 printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n",
353 dn->full_name); 345 dn->full_name);
354 goto out_free_msic; 346 goto out_free_msic;
355 } 347 }
356 348
357 msic->fifo = page_address(page);
358
359 msic->irq_host = irq_alloc_host(of_node_get(dn), IRQ_HOST_MAP_NOMAP, 349 msic->irq_host = irq_alloc_host(of_node_get(dn), IRQ_HOST_MAP_NOMAP,
360 NR_IRQS, &msic_host_ops, 0); 350 NR_IRQS, &msic_host_ops, 0);
361 if (!msic->irq_host) { 351 if (!msic->irq_host) {
@@ -378,14 +368,18 @@ static int axon_msi_setup_one(struct device_node *dn)
378 pr_debug("axon_msi: irq 0x%x setup for axon_msi\n", virq); 368 pr_debug("axon_msi: irq 0x%x setup for axon_msi\n", virq);
379 369
380 /* Enable the MSIC hardware */ 370 /* Enable the MSIC hardware */
381 msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, (u64)msic->fifo >> 32); 371 msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
382 msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG, 372 msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
383 (u64)msic->fifo & 0xFFFFFFFF); 373 msic->fifo_phys & 0xFFFFFFFF);
384 msic_dcr_write(msic, MSIC_CTRL_REG, 374 msic_dcr_write(msic, MSIC_CTRL_REG,
385 MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE | 375 MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
386 MSIC_CTRL_FIFO_SIZE); 376 MSIC_CTRL_FIFO_SIZE);
387 377
388 list_add(&msic->list, &axon_msic_list); 378 device->dev.platform_data = msic;
379
380 ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
381 ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
382 ppc_md.msi_check_device = axon_msi_check_device;
389 383
390 printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name); 384 printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name);
391 385
@@ -394,7 +388,8 @@ static int axon_msi_setup_one(struct device_node *dn)
394out_free_host: 388out_free_host:
395 kfree(msic->irq_host); 389 kfree(msic->irq_host);
396out_free_fifo: 390out_free_fifo:
397 __free_pages(virt_to_page(msic->fifo), get_order(MSIC_FIFO_SIZE_BYTES)); 391 dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
392 msic->fifo_phys);
398out_free_msic: 393out_free_msic:
399 kfree(msic); 394 kfree(msic);
400out: 395out:
@@ -402,28 +397,24 @@ out:
402 return -1; 397 return -1;
403} 398}
404 399
405static int axon_msi_init(void) 400static const struct of_device_id axon_msi_device_id[] = {
406{ 401 {
407 struct device_node *dn; 402 .compatible = "ibm,axon-msic"
408 int found = 0; 403 },
409 404 {}
410 pr_debug("axon_msi: initialising ...\n"); 405};
411
412 for_each_compatible_node(dn, NULL, "ibm,axon-msic") {
413 if (axon_msi_setup_one(dn) == 0)
414 found++;
415 }
416
417 if (found) {
418 ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
419 ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
420 ppc_md.msi_check_device = axon_msi_check_device;
421
422 register_reboot_notifier(&axon_msi_reboot_notifier);
423 406
424 pr_debug("axon_msi: registered callbacks!\n"); 407static struct of_platform_driver axon_msi_driver = {
425 } 408 .match_table = axon_msi_device_id,
409 .probe = axon_msi_probe,
410 .shutdown = axon_msi_shutdown,
411 .driver = {
412 .name = "axon-msi"
413 },
414};
426 415
427 return 0; 416static int __init axon_msi_init(void)
417{
418 return of_register_platform_driver(&axon_msi_driver);
428} 419}
429arch_initcall(axon_msi_init); 420subsys_initcall(axon_msi_init);
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index e6534b519c9a..a7f609b3b876 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -98,7 +98,7 @@ static int __init cell_publish_devices(void)
98 } 98 }
99 return 0; 99 return 0;
100} 100}
101machine_device_initcall(cell, cell_publish_devices); 101machine_subsys_initcall(cell, cell_publish_devices);
102 102
103static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc) 103static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc)
104{ 104{
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index d3a349fb42e5..99610a6361f2 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -4,6 +4,8 @@ spufs-y += inode.o file.o context.o syscalls.o coredump.o
4spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o 4spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
5spufs-y += switch.o fault.o lscsa_alloc.o 5spufs-y += switch.o fault.o lscsa_alloc.o
6 6
7obj-$(CONFIG_SPU_TRACE) += sputrace.o
8
7# Rules to build switch.o with the help of SPU tool chain 9# Rules to build switch.o with the help of SPU tool chain
8SPU_CROSS := spu- 10SPU_CROSS := spu-
9SPU_CC := $(SPU_CROSS)gcc 11SPU_CC := $(SPU_CROSS)gcc
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 3fcd06418b01..1018acd1746b 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -29,6 +29,7 @@
29#include <linux/poll.h> 29#include <linux/poll.h>
30#include <linux/ptrace.h> 30#include <linux/ptrace.h>
31#include <linux/seq_file.h> 31#include <linux/seq_file.h>
32#include <linux/marker.h>
32 33
33#include <asm/io.h> 34#include <asm/io.h>
34#include <asm/semaphore.h> 35#include <asm/semaphore.h>
@@ -358,6 +359,8 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
358 struct spu_context *ctx = vma->vm_file->private_data; 359 struct spu_context *ctx = vma->vm_file->private_data;
359 unsigned long area, offset = address - vma->vm_start; 360 unsigned long area, offset = address - vma->vm_start;
360 361
362 spu_context_nospu_trace(spufs_ps_nopfn__enter, ctx);
363
361 offset += vma->vm_pgoff << PAGE_SHIFT; 364 offset += vma->vm_pgoff << PAGE_SHIFT;
362 if (offset >= ps_size) 365 if (offset >= ps_size)
363 return NOPFN_SIGBUS; 366 return NOPFN_SIGBUS;
@@ -375,11 +378,14 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
375 378
376 if (ctx->state == SPU_STATE_SAVED) { 379 if (ctx->state == SPU_STATE_SAVED) {
377 up_read(&current->mm->mmap_sem); 380 up_read(&current->mm->mmap_sem);
381 spu_context_nospu_trace(spufs_ps_nopfn__sleep, ctx);
378 spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 382 spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
383 spu_context_trace(spufs_ps_nopfn__wake, ctx, ctx->spu);
379 down_read(&current->mm->mmap_sem); 384 down_read(&current->mm->mmap_sem);
380 } else { 385 } else {
381 area = ctx->spu->problem_phys + ps_offs; 386 area = ctx->spu->problem_phys + ps_offs;
382 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); 387 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
388 spu_context_trace(spufs_ps_nopfn__insert, ctx, ctx->spu);
383 } 389 }
384 390
385 spu_release(ctx); 391 spu_release(ctx);
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index c0e968a4c211..90784c029f25 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -322,7 +322,7 @@ static struct spu_context *
322spufs_assert_affinity(unsigned int flags, struct spu_gang *gang, 322spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
323 struct file *filp) 323 struct file *filp)
324{ 324{
325 struct spu_context *tmp, *neighbor; 325 struct spu_context *tmp, *neighbor, *err;
326 int count, node; 326 int count, node;
327 int aff_supp; 327 int aff_supp;
328 328
@@ -354,11 +354,15 @@ spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
354 if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) && 354 if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) &&
355 !list_is_last(&neighbor->aff_list, &gang->aff_list_head) && 355 !list_is_last(&neighbor->aff_list, &gang->aff_list_head) &&
356 !list_entry(neighbor->aff_list.next, struct spu_context, 356 !list_entry(neighbor->aff_list.next, struct spu_context,
357 aff_list)->aff_head) 357 aff_list)->aff_head) {
358 return ERR_PTR(-EEXIST); 358 err = ERR_PTR(-EEXIST);
359 goto out_put_neighbor;
360 }
359 361
360 if (gang != neighbor->gang) 362 if (gang != neighbor->gang) {
361 return ERR_PTR(-EINVAL); 363 err = ERR_PTR(-EINVAL);
364 goto out_put_neighbor;
365 }
362 366
363 count = 1; 367 count = 1;
364 list_for_each_entry(tmp, &gang->aff_list_head, aff_list) 368 list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
@@ -372,11 +376,17 @@ spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
372 break; 376 break;
373 } 377 }
374 378
375 if (node == MAX_NUMNODES) 379 if (node == MAX_NUMNODES) {
376 return ERR_PTR(-EEXIST); 380 err = ERR_PTR(-EEXIST);
381 goto out_put_neighbor;
382 }
377 } 383 }
378 384
379 return neighbor; 385 return neighbor;
386
387out_put_neighbor:
388 put_spu_context(neighbor);
389 return err;
380} 390}
381 391
382static void 392static void
@@ -454,9 +464,12 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
454 if (ret) 464 if (ret)
455 goto out_aff_unlock; 465 goto out_aff_unlock;
456 466
457 if (affinity) 467 if (affinity) {
458 spufs_set_affinity(flags, SPUFS_I(dentry->d_inode)->i_ctx, 468 spufs_set_affinity(flags, SPUFS_I(dentry->d_inode)->i_ctx,
459 neighbor); 469 neighbor);
470 if (neighbor)
471 put_spu_context(neighbor);
472 }
460 473
461 /* 474 /*
462 * get references for dget and mntget, will be released 475 * get references for dget and mntget, will be released
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index c01a09da1e56..b4814c740d8a 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -410,8 +410,11 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
410 * since we have TIF_SINGLESTEP set, thus the kernel will do 410 * since we have TIF_SINGLESTEP set, thus the kernel will do
411 * it upon return from the syscall anyawy 411 * it upon return from the syscall anyawy
412 */ 412 */
413 if ((status & SPU_STATUS_STOPPED_BY_STOP) 413 if (unlikely(status & SPU_STATUS_SINGLE_STEP))
414 && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) { 414 ret = -ERESTARTSYS;
415
416 else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP)
417 && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) {
415 force_sig(SIGTRAP, current); 418 force_sig(SIGTRAP, current);
416 ret = -ERESTARTSYS; 419 ret = -ERESTARTSYS;
417 } 420 }
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 00d914232af1..5915343e2599 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -39,6 +39,7 @@
39#include <linux/pid_namespace.h> 39#include <linux/pid_namespace.h>
40#include <linux/proc_fs.h> 40#include <linux/proc_fs.h>
41#include <linux/seq_file.h> 41#include <linux/seq_file.h>
42#include <linux/marker.h>
42 43
43#include <asm/io.h> 44#include <asm/io.h>
44#include <asm/mmu_context.h> 45#include <asm/mmu_context.h>
@@ -216,8 +217,8 @@ void do_notify_spus_active(void)
216 */ 217 */
217static void spu_bind_context(struct spu *spu, struct spu_context *ctx) 218static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
218{ 219{
219 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid, 220 spu_context_trace(spu_bind_context__enter, ctx, spu);
220 spu->number, spu->node); 221
221 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 222 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
222 223
223 if (ctx->flags & SPU_CREATE_NOSCHED) 224 if (ctx->flags & SPU_CREATE_NOSCHED)
@@ -399,8 +400,8 @@ static int has_affinity(struct spu_context *ctx)
399 */ 400 */
400static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) 401static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
401{ 402{
402 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__, 403 spu_context_trace(spu_unbind_context__enter, ctx, spu);
403 spu->pid, spu->number, spu->node); 404
404 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 405 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
405 406
406 if (spu->ctx->flags & SPU_CREATE_NOSCHED) 407 if (spu->ctx->flags & SPU_CREATE_NOSCHED)
@@ -528,6 +529,8 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
528 struct spu *spu, *aff_ref_spu; 529 struct spu *spu, *aff_ref_spu;
529 int node, n; 530 int node, n;
530 531
532 spu_context_nospu_trace(spu_get_idle__enter, ctx);
533
531 if (ctx->gang) { 534 if (ctx->gang) {
532 mutex_lock(&ctx->gang->aff_mutex); 535 mutex_lock(&ctx->gang->aff_mutex);
533 if (has_affinity(ctx)) { 536 if (has_affinity(ctx)) {
@@ -546,8 +549,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
546 if (atomic_dec_and_test(&ctx->gang->aff_sched_count)) 549 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
547 ctx->gang->aff_ref_spu = NULL; 550 ctx->gang->aff_ref_spu = NULL;
548 mutex_unlock(&ctx->gang->aff_mutex); 551 mutex_unlock(&ctx->gang->aff_mutex);
549 552 goto not_found;
550 return NULL;
551 } 553 }
552 mutex_unlock(&ctx->gang->aff_mutex); 554 mutex_unlock(&ctx->gang->aff_mutex);
553 } 555 }
@@ -565,12 +567,14 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
565 mutex_unlock(&cbe_spu_info[node].list_mutex); 567 mutex_unlock(&cbe_spu_info[node].list_mutex);
566 } 568 }
567 569
570 not_found:
571 spu_context_nospu_trace(spu_get_idle__not_found, ctx);
568 return NULL; 572 return NULL;
569 573
570 found: 574 found:
571 spu->alloc_state = SPU_USED; 575 spu->alloc_state = SPU_USED;
572 mutex_unlock(&cbe_spu_info[node].list_mutex); 576 mutex_unlock(&cbe_spu_info[node].list_mutex);
573 pr_debug("Got SPU %d %d\n", spu->number, spu->node); 577 spu_context_trace(spu_get_idle__found, ctx, spu);
574 spu_init_channels(spu); 578 spu_init_channels(spu);
575 return spu; 579 return spu;
576} 580}
@@ -587,6 +591,8 @@ static struct spu *find_victim(struct spu_context *ctx)
587 struct spu *spu; 591 struct spu *spu;
588 int node, n; 592 int node, n;
589 593
594 spu_context_nospu_trace(spu_find_vitim__enter, ctx);
595
590 /* 596 /*
591 * Look for a possible preemption candidate on the local node first. 597 * Look for a possible preemption candidate on the local node first.
592 * If there is no candidate look at the other nodes. This isn't 598 * If there is no candidate look at the other nodes. This isn't
@@ -640,6 +646,8 @@ static struct spu *find_victim(struct spu_context *ctx)
640 goto restart; 646 goto restart;
641 } 647 }
642 648
649 spu_context_trace(__spu_deactivate__unload, ctx, spu);
650
643 mutex_lock(&cbe_spu_info[node].list_mutex); 651 mutex_lock(&cbe_spu_info[node].list_mutex);
644 cbe_spu_info[node].nr_active--; 652 cbe_spu_info[node].nr_active--;
645 spu_unbind_context(spu, victim); 653 spu_unbind_context(spu, victim);
@@ -822,6 +830,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
822 */ 830 */
823void spu_deactivate(struct spu_context *ctx) 831void spu_deactivate(struct spu_context *ctx)
824{ 832{
833 spu_context_nospu_trace(spu_deactivate__enter, ctx);
825 __spu_deactivate(ctx, 1, MAX_PRIO); 834 __spu_deactivate(ctx, 1, MAX_PRIO);
826} 835}
827 836
@@ -835,6 +844,7 @@ void spu_deactivate(struct spu_context *ctx)
835 */ 844 */
836void spu_yield(struct spu_context *ctx) 845void spu_yield(struct spu_context *ctx)
837{ 846{
847 spu_context_nospu_trace(spu_yield__enter, ctx);
838 if (!(ctx->flags & SPU_CREATE_NOSCHED)) { 848 if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
839 mutex_lock(&ctx->state_mutex); 849 mutex_lock(&ctx->state_mutex);
840 __spu_deactivate(ctx, 0, MAX_PRIO); 850 __spu_deactivate(ctx, 0, MAX_PRIO);
@@ -864,11 +874,15 @@ static noinline void spusched_tick(struct spu_context *ctx)
864 goto out; 874 goto out;
865 875
866 spu = ctx->spu; 876 spu = ctx->spu;
877
878 spu_context_trace(spusched_tick__preempt, ctx, spu);
879
867 new = grab_runnable_context(ctx->prio + 1, spu->node); 880 new = grab_runnable_context(ctx->prio + 1, spu->node);
868 if (new) { 881 if (new) {
869 spu_unschedule(spu, ctx); 882 spu_unschedule(spu, ctx);
870 spu_add_to_rq(ctx); 883 spu_add_to_rq(ctx);
871 } else { 884 } else {
885 spu_context_nospu_trace(spusched_tick__newslice, ctx);
872 ctx->time_slice++; 886 ctx->time_slice++;
873 } 887 }
874out: 888out:
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 0e114038ea6f..795a1b52538b 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -325,4 +325,9 @@ extern void spu_free_lscsa(struct spu_state *csa);
325extern void spuctx_switch_state(struct spu_context *ctx, 325extern void spuctx_switch_state(struct spu_context *ctx,
326 enum spu_utilization_state new_state); 326 enum spu_utilization_state new_state);
327 327
328#define spu_context_trace(name, ctx, spu) \
329 trace_mark(name, "%p %p", ctx, spu);
330#define spu_context_nospu_trace(name, ctx) \
331 trace_mark(name, "%p", ctx);
332
328#endif 333#endif
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c
new file mode 100644
index 000000000000..2b1953f6f12e
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/sputrace.c
@@ -0,0 +1,250 @@
1/*
2 * Copyright (C) 2007 IBM Deutschland Entwicklung GmbH
3 * Released under GPL v2.
4 *
5 * Partially based on net/ipv4/tcp_probe.c.
6 *
7 * Simple tracing facility for spu contexts.
8 */
9#include <linux/sched.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/marker.h>
13#include <linux/proc_fs.h>
14#include <linux/wait.h>
15#include <asm/atomic.h>
16#include <asm/uaccess.h>
17#include "spufs.h"
18
19struct spu_probe {
20 const char *name;
21 const char *format;
22 marker_probe_func *probe_func;
23};
24
25struct sputrace {
26 ktime_t tstamp;
27 int owner_tid; /* owner */
28 int curr_tid;
29 const char *name;
30 int number;
31};
32
33static int bufsize __read_mostly = 16384;
34MODULE_PARM_DESC(bufsize, "Log buffer size (number of records)");
35module_param(bufsize, int, 0);
36
37
38static DEFINE_SPINLOCK(sputrace_lock);
39static DECLARE_WAIT_QUEUE_HEAD(sputrace_wait);
40static ktime_t sputrace_start;
41static unsigned long sputrace_head, sputrace_tail;
42static struct sputrace *sputrace_log;
43
44static int sputrace_used(void)
45{
46 return (sputrace_head - sputrace_tail) % bufsize;
47}
48
49static inline int sputrace_avail(void)
50{
51 return bufsize - sputrace_used();
52}
53
54static int sputrace_sprint(char *tbuf, int n)
55{
56 const struct sputrace *t = sputrace_log + sputrace_tail % bufsize;
57 struct timespec tv =
58 ktime_to_timespec(ktime_sub(t->tstamp, sputrace_start));
59
60 return snprintf(tbuf, n,
61 "[%lu.%09lu] %d: %s (thread = %d, spu = %d)\n",
62 (unsigned long) tv.tv_sec,
63 (unsigned long) tv.tv_nsec,
64 t->owner_tid,
65 t->name,
66 t->curr_tid,
67 t->number);
68}
69
70static ssize_t sputrace_read(struct file *file, char __user *buf,
71 size_t len, loff_t *ppos)
72{
73 int error = 0, cnt = 0;
74
75 if (!buf || len < 0)
76 return -EINVAL;
77
78 while (cnt < len) {
79 char tbuf[128];
80 int width;
81
82 error = wait_event_interruptible(sputrace_wait,
83 sputrace_used() > 0);
84 if (error)
85 break;
86
87 spin_lock(&sputrace_lock);
88 if (sputrace_head == sputrace_tail) {
89 spin_unlock(&sputrace_lock);
90 continue;
91 }
92
93 width = sputrace_sprint(tbuf, sizeof(tbuf));
94 if (width < len)
95 sputrace_tail = (sputrace_tail + 1) % bufsize;
96 spin_unlock(&sputrace_lock);
97
98 if (width >= len)
99 break;
100
101 error = copy_to_user(buf + cnt, tbuf, width);
102 if (error)
103 break;
104 cnt += width;
105 }
106
107 return cnt == 0 ? error : cnt;
108}
109
110static int sputrace_open(struct inode *inode, struct file *file)
111{
112 spin_lock(&sputrace_lock);
113 sputrace_head = sputrace_tail = 0;
114 sputrace_start = ktime_get();
115 spin_unlock(&sputrace_lock);
116
117 return 0;
118}
119
120static const struct file_operations sputrace_fops = {
121 .owner = THIS_MODULE,
122 .open = sputrace_open,
123 .read = sputrace_read,
124};
125
126static void sputrace_log_item(const char *name, struct spu_context *ctx,
127 struct spu *spu)
128{
129 spin_lock(&sputrace_lock);
130 if (sputrace_avail() > 1) {
131 struct sputrace *t = sputrace_log + sputrace_head;
132
133 t->tstamp = ktime_get();
134 t->owner_tid = ctx->tid;
135 t->name = name;
136 t->curr_tid = current->pid;
137 t->number = spu ? spu->number : -1;
138
139 sputrace_head = (sputrace_head + 1) % bufsize;
140 } else {
141 printk(KERN_WARNING
142 "sputrace: lost samples due to full buffer.\n");
143 }
144 spin_unlock(&sputrace_lock);
145
146 wake_up(&sputrace_wait);
147}
148
149static void spu_context_event(const struct marker *mdata,
150 void *private, const char *format, ...)
151{
152 struct spu_probe *p = mdata->private;
153 va_list ap;
154 struct spu_context *ctx;
155 struct spu *spu;
156
157 va_start(ap, format);
158 ctx = va_arg(ap, struct spu_context *);
159 spu = va_arg(ap, struct spu *);
160
161 sputrace_log_item(p->name, ctx, spu);
162 va_end(ap);
163}
164
165static void spu_context_nospu_event(const struct marker *mdata,
166 void *private, const char *format, ...)
167{
168 struct spu_probe *p = mdata->private;
169 va_list ap;
170 struct spu_context *ctx;
171
172 va_start(ap, format);
173 ctx = va_arg(ap, struct spu_context *);
174
175 sputrace_log_item(p->name, ctx, NULL);
176 va_end(ap);
177}
178
179struct spu_probe spu_probes[] = {
180 { "spu_bind_context__enter", "%p %p", spu_context_event },
181 { "spu_unbind_context__enter", "%p %p", spu_context_event },
182 { "spu_get_idle__enter", "%p", spu_context_nospu_event },
183 { "spu_get_idle__found", "%p %p", spu_context_event },
184 { "spu_get_idle__not_found", "%p", spu_context_nospu_event },
185 { "spu_find_victim__enter", "%p", spu_context_nospu_event },
186 { "spusched_tick__preempt", "%p %p", spu_context_event },
187 { "spusched_tick__newslice", "%p", spu_context_nospu_event },
188 { "spu_yield__enter", "%p", spu_context_nospu_event },
189 { "spu_deactivate__enter", "%p", spu_context_nospu_event },
190 { "__spu_deactivate__unload", "%p %p", spu_context_event },
191 { "spufs_ps_nopfn__enter", "%p", spu_context_nospu_event },
192 { "spufs_ps_nopfn__sleep", "%p", spu_context_nospu_event },
193 { "spufs_ps_nopfn__wake", "%p %p", spu_context_event },
194 { "spufs_ps_nopfn__insert", "%p %p", spu_context_event },
195 { "spu_acquire_saved__enter", "%p", spu_context_nospu_event },
196 { "destroy_spu_context__enter", "%p", spu_context_nospu_event },
197};
198
199static int __init sputrace_init(void)
200{
201 struct proc_dir_entry *entry;
202 int i, error = -ENOMEM;
203
204 sputrace_log = kcalloc(sizeof(struct sputrace),
205 bufsize, GFP_KERNEL);
206 if (!sputrace_log)
207 goto out;
208
209 entry = create_proc_entry("sputrace", S_IRUSR, NULL);
210 if (!entry)
211 goto out_free_log;
212 entry->proc_fops = &sputrace_fops;
213
214 for (i = 0; i < ARRAY_SIZE(spu_probes); i++) {
215 struct spu_probe *p = &spu_probes[i];
216
217 error = marker_probe_register(p->name, p->format,
218 p->probe_func, p);
219 if (error)
220 printk(KERN_INFO "Unable to register probe %s\n",
221 p->name);
222
223 error = marker_arm(p->name);
224 if (error)
225 printk(KERN_INFO "Unable to arm probe %s\n", p->name);
226 }
227
228 return 0;
229
230out_free_log:
231 kfree(sputrace_log);
232out:
233 return -ENOMEM;
234}
235
236static void __exit sputrace_exit(void)
237{
238 int i;
239
240 for (i = 0; i < ARRAY_SIZE(spu_probes); i++)
241 marker_probe_unregister(spu_probes[i].name);
242
243 remove_proc_entry("sputrace", NULL);
244 kfree(sputrace_log);
245}
246
247module_init(sputrace_init);
248module_exit(sputrace_exit);
249
250MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index 6a0c6f6675cd..11fa3c772ed5 100644
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -199,7 +199,7 @@ static struct iommu_table vio_iommu_table;
199 199
200void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag) 200void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag)
201{ 201{
202 return iommu_alloc_coherent(&vio_iommu_table, size, dma_handle, 202 return iommu_alloc_coherent(NULL, &vio_iommu_table, size, dma_handle,
203 DMA_32BIT_MASK, flag, -1); 203 DMA_32BIT_MASK, flag, -1);
204} 204}
205EXPORT_SYMBOL_GPL(iseries_hv_alloc); 205EXPORT_SYMBOL_GPL(iseries_hv_alloc);
@@ -213,7 +213,7 @@ EXPORT_SYMBOL_GPL(iseries_hv_free);
213dma_addr_t iseries_hv_map(void *vaddr, size_t size, 213dma_addr_t iseries_hv_map(void *vaddr, size_t size,
214 enum dma_data_direction direction) 214 enum dma_data_direction direction)
215{ 215{
216 return iommu_map_single(&vio_iommu_table, vaddr, size, 216 return iommu_map_single(NULL, &vio_iommu_table, vaddr, size,
217 DMA_32BIT_MASK, direction); 217 DMA_32BIT_MASK, direction);
218} 218}
219 219
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index c02f8742c54d..2800fced8c7c 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -167,6 +167,7 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
167 167
168 if ((child = of_get_next_child(np, NULL))) { 168 if ((child = of_get_next_child(np, NULL))) {
169 of_node_put(child); 169 of_node_put(child);
170 of_node_put(parent);
170 return -EBUSY; 171 return -EBUSY;
171 } 172 }
172 173
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index 0e74a4bd9827..5d2d5522ef41 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -174,15 +174,19 @@ int mpc8xx_pic_init(void)
174 goto out; 174 goto out;
175 175
176 siu_reg = ioremap(res.start, res.end - res.start + 1); 176 siu_reg = ioremap(res.start, res.end - res.start + 1);
177 if (siu_reg == NULL) 177 if (siu_reg == NULL) {
178 return -EINVAL; 178 ret = -EINVAL;
179 goto out;
180 }
179 181
180 mpc8xx_pic_host = irq_alloc_host(of_node_get(np), IRQ_HOST_MAP_LINEAR, 182 mpc8xx_pic_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR,
181 64, &mpc8xx_pic_host_ops, 64); 183 64, &mpc8xx_pic_host_ops, 64);
182 if (mpc8xx_pic_host == NULL) { 184 if (mpc8xx_pic_host == NULL) {
183 printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n"); 185 printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n");
184 ret = -ENOMEM; 186 ret = -ENOMEM;
187 goto out;
185 } 188 }
189 return 0;
186 190
187out: 191out:
188 of_node_put(np); 192 of_node_put(np);