diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-21 18:50:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-21 18:50:49 -0400 |
commit | 9a64388d83f6ef08dfff405a9d122e3dbcb6bf38 (patch) | |
tree | a77532ce4d6d56be6c6c7f405cd901a0184250fb /arch/powerpc/platforms/cell | |
parent | e80ab411e589e00550e2e6e5a6a02d59cc730357 (diff) | |
parent | 14b3ca4022f050f8622ed282b734ddf445464583 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (202 commits)
[POWERPC] Fix compile breakage for 64-bit UP configs
[POWERPC] Define copy_siginfo_from_user32
[POWERPC] Add compat handler for PTRACE_GETSIGINFO
[POWERPC] i2c: Fix build breakage introduced by OF helpers
[POWERPC] Optimize fls64() on 64-bit processors
[POWERPC] irqtrace support for 64-bit powerpc
[POWERPC] Stacktrace support for lockdep
[POWERPC] Move stackframe definitions to common header
[POWERPC] Fix device-tree locking vs. interrupts
[POWERPC] Make pci_bus_to_host()'s struct pci_bus * argument const
[POWERPC] Remove unused __max_memory variable
[POWERPC] Simplify xics direct/lpar irq_host setup
[POWERPC] Use pseries_setup_i8259_cascade() in pseries_mpic_init_IRQ()
[POWERPC] Turn xics_setup_8259_cascade() into a generic pseries_setup_i8259_cascade()
[POWERPC] Move xics_setup_8259_cascade() into platforms/pseries/setup.c
[POWERPC] Use asm-generic/bitops/find.h in bitops.h
[POWERPC] 83xx: mpc8315 - fix USB UTMI Host setup
[POWERPC] 85xx: Fix the size of qe muram for MPC8568E
[POWERPC] 86xx: mpc86xx_hpcn - Temporarily accept old dts node identifier.
[POWERPC] 86xx: mark functions static, other minor cleanups
...
Diffstat (limited to 'arch/powerpc/platforms/cell')
-rw-r--r-- | arch/powerpc/platforms/cell/iommu.c | 10 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/pervasive.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/ras.c | 10 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 8 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spu_callbacks.c | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spu_manage.c | 8 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/coredump.c | 8 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/file.c | 8 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/run.c | 14 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/switch.c | 2 |
10 files changed, 40 insertions, 32 deletions
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index d75ccded7f10..45646b2b4af4 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c | |||
@@ -28,13 +28,13 @@ | |||
28 | #include <linux/notifier.h> | 28 | #include <linux/notifier.h> |
29 | #include <linux/of.h> | 29 | #include <linux/of.h> |
30 | #include <linux/of_platform.h> | 30 | #include <linux/of_platform.h> |
31 | #include <linux/lmb.h> | ||
31 | 32 | ||
32 | #include <asm/prom.h> | 33 | #include <asm/prom.h> |
33 | #include <asm/iommu.h> | 34 | #include <asm/iommu.h> |
34 | #include <asm/machdep.h> | 35 | #include <asm/machdep.h> |
35 | #include <asm/pci-bridge.h> | 36 | #include <asm/pci-bridge.h> |
36 | #include <asm/udbg.h> | 37 | #include <asm/udbg.h> |
37 | #include <asm/lmb.h> | ||
38 | #include <asm/firmware.h> | 38 | #include <asm/firmware.h> |
39 | #include <asm/cell-regs.h> | 39 | #include <asm/cell-regs.h> |
40 | 40 | ||
@@ -316,7 +316,7 @@ static void cell_iommu_setup_stab(struct cbe_iommu *iommu, | |||
316 | segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT; | 316 | segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT; |
317 | 317 | ||
318 | pr_debug("%s: iommu[%d]: segments: %lu\n", | 318 | pr_debug("%s: iommu[%d]: segments: %lu\n", |
319 | __FUNCTION__, iommu->nid, segments); | 319 | __func__, iommu->nid, segments); |
320 | 320 | ||
321 | /* set up the segment table */ | 321 | /* set up the segment table */ |
322 | stab_size = segments * sizeof(unsigned long); | 322 | stab_size = segments * sizeof(unsigned long); |
@@ -343,7 +343,7 @@ static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu, | |||
343 | (1 << 12) / sizeof(unsigned long)); | 343 | (1 << 12) / sizeof(unsigned long)); |
344 | 344 | ||
345 | ptab_size = segments * pages_per_segment * sizeof(unsigned long); | 345 | ptab_size = segments * pages_per_segment * sizeof(unsigned long); |
346 | pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__, | 346 | pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__, |
347 | iommu->nid, ptab_size, get_order(ptab_size)); | 347 | iommu->nid, ptab_size, get_order(ptab_size)); |
348 | page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); | 348 | page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); |
349 | BUG_ON(!page); | 349 | BUG_ON(!page); |
@@ -355,7 +355,7 @@ static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu, | |||
355 | n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12; | 355 | n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12; |
356 | 356 | ||
357 | pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", | 357 | pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", |
358 | __FUNCTION__, iommu->nid, iommu->stab, ptab, | 358 | __func__, iommu->nid, iommu->stab, ptab, |
359 | n_pte_pages); | 359 | n_pte_pages); |
360 | 360 | ||
361 | /* initialise the STEs */ | 361 | /* initialise the STEs */ |
@@ -394,7 +394,7 @@ static void cell_iommu_enable_hardware(struct cbe_iommu *iommu) | |||
394 | 394 | ||
395 | if (cell_iommu_find_ioc(iommu->nid, &xlate_base)) | 395 | if (cell_iommu_find_ioc(iommu->nid, &xlate_base)) |
396 | panic("%s: missing IOC register mappings for node %d\n", | 396 | panic("%s: missing IOC register mappings for node %d\n", |
397 | __FUNCTION__, iommu->nid); | 397 | __func__, iommu->nid); |
398 | 398 | ||
399 | iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size); | 399 | iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size); |
400 | iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset; | 400 | iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset; |
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c index 0304589c0a80..8a3631ce912b 100644 --- a/arch/powerpc/platforms/cell/pervasive.c +++ b/arch/powerpc/platforms/cell/pervasive.c | |||
@@ -65,7 +65,7 @@ static void cbe_power_save(void) | |||
65 | break; | 65 | break; |
66 | default: | 66 | default: |
67 | printk(KERN_WARNING "%s: unknown configuration\n", | 67 | printk(KERN_WARNING "%s: unknown configuration\n", |
68 | __FUNCTION__); | 68 | __func__); |
69 | break; | 69 | break; |
70 | } | 70 | } |
71 | mtspr(SPRN_TSC_CELL, thread_switch_control); | 71 | mtspr(SPRN_TSC_CELL, thread_switch_control); |
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index e43024c0392e..655704ad03cf 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c | |||
@@ -132,7 +132,7 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order) | |||
132 | (unsigned int)(addr >> 32), | 132 | (unsigned int)(addr >> 32), |
133 | (unsigned int)(addr & 0xffffffff))) { | 133 | (unsigned int)(addr & 0xffffffff))) { |
134 | printk(KERN_ERR "%s: error enabling PTCAL on node %d!\n", | 134 | printk(KERN_ERR "%s: error enabling PTCAL on node %d!\n", |
135 | __FUNCTION__, nid); | 135 | __func__, nid); |
136 | goto out_free_pages; | 136 | goto out_free_pages; |
137 | } | 137 | } |
138 | 138 | ||
@@ -162,7 +162,7 @@ static int __init cbe_ptcal_enable(void) | |||
162 | if (!size) | 162 | if (!size) |
163 | return -ENODEV; | 163 | return -ENODEV; |
164 | 164 | ||
165 | pr_debug("%s: enabling PTCAL, size = 0x%x\n", __FUNCTION__, *size); | 165 | pr_debug("%s: enabling PTCAL, size = 0x%x\n", __func__, *size); |
166 | order = get_order(*size); | 166 | order = get_order(*size); |
167 | of_node_put(np); | 167 | of_node_put(np); |
168 | 168 | ||
@@ -180,7 +180,7 @@ static int __init cbe_ptcal_enable(void) | |||
180 | const u32 *nid = of_get_property(np, "node-id", NULL); | 180 | const u32 *nid = of_get_property(np, "node-id", NULL); |
181 | if (!nid) { | 181 | if (!nid) { |
182 | printk(KERN_ERR "%s: node %s is missing node-id?\n", | 182 | printk(KERN_ERR "%s: node %s is missing node-id?\n", |
183 | __FUNCTION__, np->full_name); | 183 | __func__, np->full_name); |
184 | continue; | 184 | continue; |
185 | } | 185 | } |
186 | cbe_ptcal_enable_on_node(*nid, order); | 186 | cbe_ptcal_enable_on_node(*nid, order); |
@@ -195,13 +195,13 @@ static int cbe_ptcal_disable(void) | |||
195 | struct ptcal_area *area, *tmp; | 195 | struct ptcal_area *area, *tmp; |
196 | int ret = 0; | 196 | int ret = 0; |
197 | 197 | ||
198 | pr_debug("%s: disabling PTCAL\n", __FUNCTION__); | 198 | pr_debug("%s: disabling PTCAL\n", __func__); |
199 | 199 | ||
200 | list_for_each_entry_safe(area, tmp, &ptcal_list, list) { | 200 | list_for_each_entry_safe(area, tmp, &ptcal_list, list) { |
201 | /* disable ptcal on this node */ | 201 | /* disable ptcal on this node */ |
202 | if (rtas_call(ptcal_stop_tok, 1, 1, NULL, area->nid)) { | 202 | if (rtas_call(ptcal_stop_tok, 1, 1, NULL, area->nid)) { |
203 | printk(KERN_ERR "%s: error disabling PTCAL " | 203 | printk(KERN_ERR "%s: error disabling PTCAL " |
204 | "on node %d!\n", __FUNCTION__, | 204 | "on node %d!\n", __func__, |
205 | area->nid); | 205 | area->nid); |
206 | ret = -EIO; | 206 | ret = -EIO; |
207 | continue; | 207 | continue; |
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 712001f6b7da..6bab44b7716b 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -165,7 +165,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) | |||
165 | struct spu_slb slb; | 165 | struct spu_slb slb; |
166 | int psize; | 166 | int psize; |
167 | 167 | ||
168 | pr_debug("%s\n", __FUNCTION__); | 168 | pr_debug("%s\n", __func__); |
169 | 169 | ||
170 | slb.esid = (ea & ESID_MASK) | SLB_ESID_V; | 170 | slb.esid = (ea & ESID_MASK) | SLB_ESID_V; |
171 | 171 | ||
@@ -215,7 +215,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) | |||
215 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX | 215 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX |
216 | static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) | 216 | static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) |
217 | { | 217 | { |
218 | pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea); | 218 | pr_debug("%s, %lx, %lx\n", __func__, dsisr, ea); |
219 | 219 | ||
220 | /* Handle kernel space hash faults immediately. | 220 | /* Handle kernel space hash faults immediately. |
221 | User hash faults need to be deferred to process context. */ | 221 | User hash faults need to be deferred to process context. */ |
@@ -351,7 +351,7 @@ spu_irq_class_1(int irq, void *data) | |||
351 | __spu_trap_data_seg(spu, dar); | 351 | __spu_trap_data_seg(spu, dar); |
352 | 352 | ||
353 | spin_unlock(&spu->register_lock); | 353 | spin_unlock(&spu->register_lock); |
354 | pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, | 354 | pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat, |
355 | dar, dsisr); | 355 | dar, dsisr); |
356 | 356 | ||
357 | if (stat & CLASS1_STORAGE_FAULT_INTR) | 357 | if (stat & CLASS1_STORAGE_FAULT_INTR) |
@@ -726,7 +726,7 @@ static int __init init_spu_base(void) | |||
726 | 726 | ||
727 | if (ret < 0) { | 727 | if (ret < 0) { |
728 | printk(KERN_WARNING "%s: Error initializing spus\n", | 728 | printk(KERN_WARNING "%s: Error initializing spus\n", |
729 | __FUNCTION__); | 729 | __func__); |
730 | goto out_unregister_sysdev_class; | 730 | goto out_unregister_sysdev_class; |
731 | } | 731 | } |
732 | 732 | ||
diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c index dceb8b6a9382..19f6bfdbb933 100644 --- a/arch/powerpc/platforms/cell/spu_callbacks.c +++ b/arch/powerpc/platforms/cell/spu_callbacks.c | |||
@@ -54,7 +54,7 @@ long spu_sys_callback(struct spu_syscall_block *s) | |||
54 | long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6); | 54 | long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6); |
55 | 55 | ||
56 | if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) { | 56 | if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) { |
57 | pr_debug("%s: invalid syscall #%ld", __FUNCTION__, s->nr_ret); | 57 | pr_debug("%s: invalid syscall #%ld", __func__, s->nr_ret); |
58 | return -ENOSYS; | 58 | return -ENOSYS; |
59 | } | 59 | } |
60 | 60 | ||
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c index d351bdebf5f1..4c506c1463cd 100644 --- a/arch/powerpc/platforms/cell/spu_manage.c +++ b/arch/powerpc/platforms/cell/spu_manage.c | |||
@@ -92,7 +92,7 @@ static int __init spu_map_interrupts_old(struct spu *spu, | |||
92 | 92 | ||
93 | tmp = of_get_property(np->parent->parent, "node-id", NULL); | 93 | tmp = of_get_property(np->parent->parent, "node-id", NULL); |
94 | if (!tmp) { | 94 | if (!tmp) { |
95 | printk(KERN_WARNING "%s: can't find node-id\n", __FUNCTION__); | 95 | printk(KERN_WARNING "%s: can't find node-id\n", __func__); |
96 | nid = spu->node; | 96 | nid = spu->node; |
97 | } else | 97 | } else |
98 | nid = tmp[0]; | 98 | nid = tmp[0]; |
@@ -296,7 +296,7 @@ static int __init of_enumerate_spus(int (*fn)(void *data)) | |||
296 | ret = fn(node); | 296 | ret = fn(node); |
297 | if (ret) { | 297 | if (ret) { |
298 | printk(KERN_WARNING "%s: Error initializing %s\n", | 298 | printk(KERN_WARNING "%s: Error initializing %s\n", |
299 | __FUNCTION__, node->name); | 299 | __func__, node->name); |
300 | break; | 300 | break; |
301 | } | 301 | } |
302 | n++; | 302 | n++; |
@@ -327,7 +327,7 @@ static int __init of_create_spu(struct spu *spu, void *data) | |||
327 | if (!legacy_map) { | 327 | if (!legacy_map) { |
328 | legacy_map = 1; | 328 | legacy_map = 1; |
329 | printk(KERN_WARNING "%s: Legacy device tree found, " | 329 | printk(KERN_WARNING "%s: Legacy device tree found, " |
330 | "trying to map old style\n", __FUNCTION__); | 330 | "trying to map old style\n", __func__); |
331 | } | 331 | } |
332 | ret = spu_map_device_old(spu); | 332 | ret = spu_map_device_old(spu); |
333 | if (ret) { | 333 | if (ret) { |
@@ -342,7 +342,7 @@ static int __init of_create_spu(struct spu *spu, void *data) | |||
342 | if (!legacy_irq) { | 342 | if (!legacy_irq) { |
343 | legacy_irq = 1; | 343 | legacy_irq = 1; |
344 | printk(KERN_WARNING "%s: Legacy device tree found, " | 344 | printk(KERN_WARNING "%s: Legacy device tree found, " |
345 | "trying old style irq\n", __FUNCTION__); | 345 | "trying old style irq\n", __func__); |
346 | } | 346 | } |
347 | ret = spu_map_interrupts_old(spu, spe); | 347 | ret = spu_map_interrupts_old(spu, spe); |
348 | if (ret) { | 348 | if (ret) { |
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c index 0c6a96b82b2d..b962c3ab470c 100644 --- a/arch/powerpc/platforms/cell/spufs/coredump.c +++ b/arch/powerpc/platforms/cell/spufs/coredump.c | |||
@@ -133,8 +133,6 @@ static struct spu_context *coredump_next_context(int *fd) | |||
133 | if (ctx->flags & SPU_CREATE_NOSCHED) | 133 | if (ctx->flags & SPU_CREATE_NOSCHED) |
134 | continue; | 134 | continue; |
135 | 135 | ||
136 | /* start searching the next fd next time we're called */ | ||
137 | (*fd)++; | ||
138 | break; | 136 | break; |
139 | } | 137 | } |
140 | 138 | ||
@@ -157,6 +155,9 @@ int spufs_coredump_extra_notes_size(void) | |||
157 | break; | 155 | break; |
158 | 156 | ||
159 | size += rc; | 157 | size += rc; |
158 | |||
159 | /* start searching the next fd next time */ | ||
160 | fd++; | ||
160 | } | 161 | } |
161 | 162 | ||
162 | return size; | 163 | return size; |
@@ -239,6 +240,9 @@ int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset) | |||
239 | } | 240 | } |
240 | 241 | ||
241 | spu_release_saved(ctx); | 242 | spu_release_saved(ctx); |
243 | |||
244 | /* start searching the next fd next time */ | ||
245 | fd++; | ||
242 | } | 246 | } |
243 | 247 | ||
244 | return 0; | 248 | return 0; |
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index c7df0a6cfa1b..08f44d1971ac 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c | |||
@@ -1336,7 +1336,7 @@ static u64 spufs_signal1_type_get(struct spu_context *ctx) | |||
1336 | return ctx->ops->signal1_type_get(ctx); | 1336 | return ctx->ops->signal1_type_get(ctx); |
1337 | } | 1337 | } |
1338 | DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get, | 1338 | DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get, |
1339 | spufs_signal1_type_set, "%llu", SPU_ATTR_ACQUIRE); | 1339 | spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE); |
1340 | 1340 | ||
1341 | 1341 | ||
1342 | static int spufs_signal2_type_set(void *data, u64 val) | 1342 | static int spufs_signal2_type_set(void *data, u64 val) |
@@ -1358,7 +1358,7 @@ static u64 spufs_signal2_type_get(struct spu_context *ctx) | |||
1358 | return ctx->ops->signal2_type_get(ctx); | 1358 | return ctx->ops->signal2_type_get(ctx); |
1359 | } | 1359 | } |
1360 | DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, | 1360 | DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, |
1361 | spufs_signal2_type_set, "%llu", SPU_ATTR_ACQUIRE); | 1361 | spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE); |
1362 | 1362 | ||
1363 | #if SPUFS_MMAP_4K | 1363 | #if SPUFS_MMAP_4K |
1364 | static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma, | 1364 | static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma, |
@@ -1555,7 +1555,7 @@ void spufs_mfc_callback(struct spu *spu) | |||
1555 | 1555 | ||
1556 | wake_up_all(&ctx->mfc_wq); | 1556 | wake_up_all(&ctx->mfc_wq); |
1557 | 1557 | ||
1558 | pr_debug("%s %s\n", __FUNCTION__, spu->name); | 1558 | pr_debug("%s %s\n", __func__, spu->name); |
1559 | if (ctx->mfc_fasync) { | 1559 | if (ctx->mfc_fasync) { |
1560 | u32 free_elements, tagstatus; | 1560 | u32 free_elements, tagstatus; |
1561 | unsigned int mask; | 1561 | unsigned int mask; |
@@ -1789,7 +1789,7 @@ static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait) | |||
1789 | if (tagstatus & ctx->tagwait) | 1789 | if (tagstatus & ctx->tagwait) |
1790 | mask |= POLLIN | POLLRDNORM; | 1790 | mask |= POLLIN | POLLRDNORM; |
1791 | 1791 | ||
1792 | pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__, | 1792 | pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__, |
1793 | free_elements, tagstatus, ctx->tagwait); | 1793 | free_elements, tagstatus, ctx->tagwait); |
1794 | 1794 | ||
1795 | return mask; | 1795 | return mask; |
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index cac69e116776..96bf7c2b86fc 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c | |||
@@ -98,7 +98,7 @@ static int spu_setup_isolated(struct spu_context *ctx) | |||
98 | != MFC_CNTL_PURGE_DMA_COMPLETE) { | 98 | != MFC_CNTL_PURGE_DMA_COMPLETE) { |
99 | if (time_after(jiffies, timeout)) { | 99 | if (time_after(jiffies, timeout)) { |
100 | printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n", | 100 | printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n", |
101 | __FUNCTION__); | 101 | __func__); |
102 | ret = -EIO; | 102 | ret = -EIO; |
103 | goto out; | 103 | goto out; |
104 | } | 104 | } |
@@ -124,7 +124,7 @@ static int spu_setup_isolated(struct spu_context *ctx) | |||
124 | status_loading) { | 124 | status_loading) { |
125 | if (time_after(jiffies, timeout)) { | 125 | if (time_after(jiffies, timeout)) { |
126 | printk(KERN_ERR "%s: timeout waiting for loader\n", | 126 | printk(KERN_ERR "%s: timeout waiting for loader\n", |
127 | __FUNCTION__); | 127 | __func__); |
128 | ret = -EIO; | 128 | ret = -EIO; |
129 | goto out_drop_priv; | 129 | goto out_drop_priv; |
130 | } | 130 | } |
@@ -134,7 +134,7 @@ static int spu_setup_isolated(struct spu_context *ctx) | |||
134 | if (!(status & SPU_STATUS_RUNNING)) { | 134 | if (!(status & SPU_STATUS_RUNNING)) { |
135 | /* If isolated LOAD has failed: run SPU, we will get a stop-and | 135 | /* If isolated LOAD has failed: run SPU, we will get a stop-and |
136 | * signal later. */ | 136 | * signal later. */ |
137 | pr_debug("%s: isolated LOAD failed\n", __FUNCTION__); | 137 | pr_debug("%s: isolated LOAD failed\n", __func__); |
138 | ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); | 138 | ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); |
139 | ret = -EACCES; | 139 | ret = -EACCES; |
140 | goto out_drop_priv; | 140 | goto out_drop_priv; |
@@ -142,7 +142,7 @@ static int spu_setup_isolated(struct spu_context *ctx) | |||
142 | 142 | ||
143 | if (!(status & SPU_STATUS_ISOLATED_STATE)) { | 143 | if (!(status & SPU_STATUS_ISOLATED_STATE)) { |
144 | /* This isn't allowed by the CBEA, but check anyway */ | 144 | /* This isn't allowed by the CBEA, but check anyway */ |
145 | pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__); | 145 | pr_debug("%s: SPU fell out of isolated mode?\n", __func__); |
146 | ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP); | 146 | ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP); |
147 | ret = -EINVAL; | 147 | ret = -EINVAL; |
148 | goto out_drop_priv; | 148 | goto out_drop_priv; |
@@ -282,7 +282,7 @@ static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret, | |||
282 | break; | 282 | break; |
283 | default: | 283 | default: |
284 | printk(KERN_WARNING "%s: unexpected return code %ld\n", | 284 | printk(KERN_WARNING "%s: unexpected return code %ld\n", |
285 | __FUNCTION__, *spu_ret); | 285 | __func__, *spu_ret); |
286 | ret = 0; | 286 | ret = 0; |
287 | } | 287 | } |
288 | return ret; | 288 | return ret; |
@@ -323,6 +323,10 @@ static int spu_process_callback(struct spu_context *ctx) | |||
323 | return -EINTR; | 323 | return -EINTR; |
324 | } | 324 | } |
325 | 325 | ||
326 | /* need to re-get the ls, as it may have changed when we released the | ||
327 | * spu */ | ||
328 | ls = (void __iomem *)ctx->ops->get_ls(ctx); | ||
329 | |||
326 | /* write result, jump over indirect pointer */ | 330 | /* write result, jump over indirect pointer */ |
327 | memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret)); | 331 | memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret)); |
328 | ctx->ops->npc_write(ctx, npc); | 332 | ctx->ops->npc_write(ctx, npc); |
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c index e9dc7a55d1b9..d2a1249d36dd 100644 --- a/arch/powerpc/platforms/cell/spufs/switch.c +++ b/arch/powerpc/platforms/cell/spufs/switch.c | |||
@@ -1815,6 +1815,7 @@ static void save_csa(struct spu_state *prev, struct spu *spu) | |||
1815 | save_mfc_csr_ato(prev, spu); /* Step 24. */ | 1815 | save_mfc_csr_ato(prev, spu); /* Step 24. */ |
1816 | save_mfc_tclass_id(prev, spu); /* Step 25. */ | 1816 | save_mfc_tclass_id(prev, spu); /* Step 25. */ |
1817 | set_mfc_tclass_id(prev, spu); /* Step 26. */ | 1817 | set_mfc_tclass_id(prev, spu); /* Step 26. */ |
1818 | save_mfc_cmd(prev, spu); /* Step 26a - moved from 44. */ | ||
1818 | purge_mfc_queue(prev, spu); /* Step 27. */ | 1819 | purge_mfc_queue(prev, spu); /* Step 27. */ |
1819 | wait_purge_complete(prev, spu); /* Step 28. */ | 1820 | wait_purge_complete(prev, spu); /* Step 28. */ |
1820 | setup_mfc_sr1(prev, spu); /* Step 30. */ | 1821 | setup_mfc_sr1(prev, spu); /* Step 30. */ |
@@ -1831,7 +1832,6 @@ static void save_csa(struct spu_state *prev, struct spu *spu) | |||
1831 | save_ppuint_mb(prev, spu); /* Step 41. */ | 1832 | save_ppuint_mb(prev, spu); /* Step 41. */ |
1832 | save_ch_part1(prev, spu); /* Step 42. */ | 1833 | save_ch_part1(prev, spu); /* Step 42. */ |
1833 | save_spu_mb(prev, spu); /* Step 43. */ | 1834 | save_spu_mb(prev, spu); /* Step 43. */ |
1834 | save_mfc_cmd(prev, spu); /* Step 44. */ | ||
1835 | reset_ch(prev, spu); /* Step 45. */ | 1835 | reset_ch(prev, spu); /* Step 45. */ |
1836 | } | 1836 | } |
1837 | 1837 | ||