aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/cell')
-rw-r--r--arch/powerpc/platforms/cell/Makefile4
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c35
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.c2
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c2
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c2
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c3
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.h271
-rw-r--r--arch/powerpc/platforms/cell/cbe_thermal.c2
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c4
-rw-r--r--arch/powerpc/platforms/cell/iommu.c2
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c2
-rw-r--r--arch/powerpc/platforms/cell/pmu.c2
-rw-r--r--arch/powerpc/platforms/cell/ras.c2
-rw-r--r--arch/powerpc/platforms/cell/setup.c17
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c22
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c8
-rw-r--r--arch/powerpc/platforms/cell/spu_callbacks.c4
-rw-r--r--arch/powerpc/platforms/cell/spu_coredump.c79
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c4
-rw-r--r--arch/powerpc/platforms/cell/spu_syscalls.c142
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c236
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c251
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c15
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c49
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h7
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c31
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c48
28 files changed, 421 insertions, 829 deletions
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index f88a7c76f29..61d12f18303 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -13,15 +13,13 @@ obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o
13endif 13endif
14 14
15# needed only when building loadable spufs.ko 15# needed only when building loadable spufs.ko
16spufs-modular-$(CONFIG_SPU_FS) += spu_syscalls.o
17spu-priv1-$(CONFIG_PPC_CELL_NATIVE) += spu_priv1_mmio.o 16spu-priv1-$(CONFIG_PPC_CELL_NATIVE) += spu_priv1_mmio.o
18 17
19spu-manage-$(CONFIG_PPC_CELLEB) += spu_manage.o 18spu-manage-$(CONFIG_PPC_CELLEB) += spu_manage.o
20spu-manage-$(CONFIG_PPC_CELL_NATIVE) += spu_manage.o 19spu-manage-$(CONFIG_PPC_CELL_NATIVE) += spu_manage.o
21 20
22obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \ 21obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
23 spu_coredump.o \ 22 spu_syscalls.o \
24 $(spufs-modular-m) \
25 $(spu-priv1-y) \ 23 $(spu-priv1-y) \
26 $(spu-manage-y) \ 24 $(spu-manage-y) \
27 spufs/ 25 spufs/
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 4c9ab5b70ba..1245b2f517b 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -64,13 +64,11 @@
64 64
65 65
66struct axon_msic { 66struct axon_msic {
67 struct device_node *dn;
68 struct irq_host *irq_host; 67 struct irq_host *irq_host;
69 __le32 *fifo; 68 __le32 *fifo;
70 dcr_host_t dcr_host; 69 dcr_host_t dcr_host;
71 struct list_head list; 70 struct list_head list;
72 u32 read_offset; 71 u32 read_offset;
73 u32 dcr_base;
74}; 72};
75 73
76static LIST_HEAD(axon_msic_list); 74static LIST_HEAD(axon_msic_list);
@@ -79,12 +77,12 @@ static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
79{ 77{
80 pr_debug("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n); 78 pr_debug("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
81 79
82 dcr_write(msic->dcr_host, msic->dcr_base + dcr_n, val); 80 dcr_write(msic->dcr_host, msic->dcr_host.base + dcr_n, val);
83} 81}
84 82
85static u32 msic_dcr_read(struct axon_msic *msic, unsigned int dcr_n) 83static u32 msic_dcr_read(struct axon_msic *msic, unsigned int dcr_n)
86{ 84{
87 return dcr_read(msic->dcr_host, msic->dcr_base + dcr_n); 85 return dcr_read(msic->dcr_host, msic->dcr_host.base + dcr_n);
88} 86}
89 87
90static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) 88static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
@@ -126,7 +124,7 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev)
126 const phandle *ph; 124 const phandle *ph;
127 struct axon_msic *msic = NULL; 125 struct axon_msic *msic = NULL;
128 126
129 dn = pci_device_to_OF_node(dev); 127 dn = of_node_get(pci_device_to_OF_node(dev));
130 if (!dn) { 128 if (!dn) {
131 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); 129 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
132 return NULL; 130 return NULL;
@@ -183,7 +181,7 @@ static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
183 int len; 181 int len;
184 const u32 *prop; 182 const u32 *prop;
185 183
186 dn = pci_device_to_OF_node(dev); 184 dn = of_node_get(pci_device_to_OF_node(dev));
187 if (!dn) { 185 if (!dn) {
188 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); 186 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
189 return -ENODEV; 187 return -ENODEV;
@@ -295,15 +293,7 @@ static int msic_host_map(struct irq_host *h, unsigned int virq,
295 return 0; 293 return 0;
296} 294}
297 295
298static int msic_host_match(struct irq_host *host, struct device_node *dn)
299{
300 struct axon_msic *msic = host->host_data;
301
302 return msic->dn == dn;
303}
304
305static struct irq_host_ops msic_host_ops = { 296static struct irq_host_ops msic_host_ops = {
306 .match = msic_host_match,
307 .map = msic_host_map, 297 .map = msic_host_map,
308}; 298};
309 299
@@ -314,7 +304,8 @@ static int axon_msi_notify_reboot(struct notifier_block *nb,
314 u32 tmp; 304 u32 tmp;
315 305
316 list_for_each_entry(msic, &axon_msic_list, list) { 306 list_for_each_entry(msic, &axon_msic_list, list) {
317 pr_debug("axon_msi: disabling %s\n", msic->dn->full_name); 307 pr_debug("axon_msi: disabling %s\n",
308 msic->irq_host->of_node->full_name);
318 tmp = msic_dcr_read(msic, MSIC_CTRL_REG); 309 tmp = msic_dcr_read(msic, MSIC_CTRL_REG);
319 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; 310 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
320 msic_dcr_write(msic, MSIC_CTRL_REG, tmp); 311 msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
@@ -332,7 +323,7 @@ static int axon_msi_setup_one(struct device_node *dn)
332 struct page *page; 323 struct page *page;
333 struct axon_msic *msic; 324 struct axon_msic *msic;
334 unsigned int virq; 325 unsigned int virq;
335 int dcr_len; 326 int dcr_base, dcr_len;
336 327
337 pr_debug("axon_msi: setting up dn %s\n", dn->full_name); 328 pr_debug("axon_msi: setting up dn %s\n", dn->full_name);
338 329
@@ -343,17 +334,17 @@ static int axon_msi_setup_one(struct device_node *dn)
343 goto out; 334 goto out;
344 } 335 }
345 336
346 msic->dcr_base = dcr_resource_start(dn, 0); 337 dcr_base = dcr_resource_start(dn, 0);
347 dcr_len = dcr_resource_len(dn, 0); 338 dcr_len = dcr_resource_len(dn, 0);
348 339
349 if (msic->dcr_base == 0 || dcr_len == 0) { 340 if (dcr_base == 0 || dcr_len == 0) {
350 printk(KERN_ERR 341 printk(KERN_ERR
351 "axon_msi: couldn't parse dcr properties on %s\n", 342 "axon_msi: couldn't parse dcr properties on %s\n",
352 dn->full_name); 343 dn->full_name);
353 goto out; 344 goto out;
354 } 345 }
355 346
356 msic->dcr_host = dcr_map(dn, msic->dcr_base, dcr_len); 347 msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
357 if (!DCR_MAP_OK(msic->dcr_host)) { 348 if (!DCR_MAP_OK(msic->dcr_host)) {
358 printk(KERN_ERR "axon_msi: dcr_map failed for %s\n", 349 printk(KERN_ERR "axon_msi: dcr_map failed for %s\n",
359 dn->full_name); 350 dn->full_name);
@@ -370,8 +361,8 @@ static int axon_msi_setup_one(struct device_node *dn)
370 361
371 msic->fifo = page_address(page); 362 msic->fifo = page_address(page);
372 363
373 msic->irq_host = irq_alloc_host(IRQ_HOST_MAP_NOMAP, NR_IRQS, 364 msic->irq_host = irq_alloc_host(of_node_get(dn), IRQ_HOST_MAP_NOMAP,
374 &msic_host_ops, 0); 365 NR_IRQS, &msic_host_ops, 0);
375 if (!msic->irq_host) { 366 if (!msic->irq_host) {
376 printk(KERN_ERR "axon_msi: couldn't allocate irq_host for %s\n", 367 printk(KERN_ERR "axon_msi: couldn't allocate irq_host for %s\n",
377 dn->full_name); 368 dn->full_name);
@@ -387,8 +378,6 @@ static int axon_msi_setup_one(struct device_node *dn)
387 goto out_free_host; 378 goto out_free_host;
388 } 379 }
389 380
390 msic->dn = of_node_get(dn);
391
392 set_irq_data(virq, msic); 381 set_irq_data(virq, msic);
393 set_irq_chained_handler(virq, axon_msi_cascade); 382 set_irq_chained_handler(virq, axon_msi_cascade);
394 pr_debug("axon_msi: irq 0x%x setup for axon_msi\n", virq); 383 pr_debug("axon_msi: irq 0x%x setup for axon_msi\n", virq);
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
index 0b6e8ee85ab..901236fa0f0 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -24,7 +24,7 @@
24#include <asm/machdep.h> 24#include <asm/machdep.h>
25#include <asm/of_platform.h> 25#include <asm/of_platform.h>
26#include <asm/prom.h> 26#include <asm/prom.h>
27#include "cbe_regs.h" 27#include <asm/cell-regs.h>
28#include "cbe_cpufreq.h" 28#include "cbe_cpufreq.h"
29 29
30static DEFINE_MUTEX(cbe_switch_mutex); 30static DEFINE_MUTEX(cbe_switch_mutex);
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
index 163263b3e1c..70fa7aef5ed 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
@@ -28,8 +28,8 @@
28#include <linux/time.h> 28#include <linux/time.h>
29#include <asm/machdep.h> 29#include <asm/machdep.h>
30#include <asm/hw_irq.h> 30#include <asm/hw_irq.h>
31#include <asm/cell-regs.h>
31 32
32#include "cbe_regs.h"
33#include "cbe_cpufreq.h" 33#include "cbe_cpufreq.h"
34 34
35/* to write to MIC register */ 35/* to write to MIC register */
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
index fc6f38982ff..6a2c1b0a9a9 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
@@ -27,12 +27,12 @@
27#include <asm/processor.h> 27#include <asm/processor.h>
28#include <asm/prom.h> 28#include <asm/prom.h>
29#include <asm/pmi.h> 29#include <asm/pmi.h>
30#include <asm/cell-regs.h>
30 31
31#ifdef DEBUG 32#ifdef DEBUG
32#include <asm/time.h> 33#include <asm/time.h>
33#endif 34#endif
34 35
35#include "cbe_regs.h"
36#include "cbe_cpufreq.h" 36#include "cbe_cpufreq.h"
37 37
38static u8 pmi_slow_mode_limit[MAX_CBE]; 38static u8 pmi_slow_mode_limit[MAX_CBE];
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index c8f7f000742..16a9b07e7b0 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -16,8 +16,7 @@
16#include <asm/ptrace.h> 16#include <asm/ptrace.h>
17#include <asm/of_device.h> 17#include <asm/of_device.h>
18#include <asm/of_platform.h> 18#include <asm/of_platform.h>
19 19#include <asm/cell-regs.h>
20#include "cbe_regs.h"
21 20
22/* 21/*
23 * Current implementation uses "cpu" nodes. We build our own mapping 22 * Current implementation uses "cpu" nodes. We build our own mapping
diff --git a/arch/powerpc/platforms/cell/cbe_regs.h b/arch/powerpc/platforms/cell/cbe_regs.h
deleted file mode 100644
index b24025f2ac7..00000000000
--- a/arch/powerpc/platforms/cell/cbe_regs.h
+++ /dev/null
@@ -1,271 +0,0 @@
1/*
2 * cbe_regs.h
3 *
4 * This file is intended to hold the various register definitions for CBE
5 * on-chip system devices (memory controller, IO controller, etc...)
6 *
7 * (C) Copyright IBM Corporation 2001,2006
8 *
9 * Authors: Maximino Aguilar (maguilar@us.ibm.com)
10 * David J. Erb (djerb@us.ibm.com)
11 *
12 * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
13 */
14
15#ifndef CBE_REGS_H
16#define CBE_REGS_H
17
18#include <asm/cell-pmu.h>
19
20/*
21 *
22 * Some HID register definitions
23 *
24 */
25
26/* CBE specific HID0 bits */
27#define HID0_CBE_THERM_WAKEUP 0x0000020000000000ul
28#define HID0_CBE_SYSERR_WAKEUP 0x0000008000000000ul
29#define HID0_CBE_THERM_INT_EN 0x0000000400000000ul
30#define HID0_CBE_SYSERR_INT_EN 0x0000000200000000ul
31
32#define MAX_CBE 2
33
34/*
35 *
36 * Pervasive unit register definitions
37 *
38 */
39
40union spe_reg {
41 u64 val;
42 u8 spe[8];
43};
44
45union ppe_spe_reg {
46 u64 val;
47 struct {
48 u32 ppe;
49 u32 spe;
50 };
51};
52
53
54struct cbe_pmd_regs {
55 /* Debug Bus Control */
56 u64 pad_0x0000; /* 0x0000 */
57
58 u64 group_control; /* 0x0008 */
59
60 u8 pad_0x0010_0x00a8 [0x00a8 - 0x0010]; /* 0x0010 */
61
62 u64 debug_bus_control; /* 0x00a8 */
63
64 u8 pad_0x00b0_0x0100 [0x0100 - 0x00b0]; /* 0x00b0 */
65
66 u64 trace_aux_data; /* 0x0100 */
67 u64 trace_buffer_0_63; /* 0x0108 */
68 u64 trace_buffer_64_127; /* 0x0110 */
69 u64 trace_address; /* 0x0118 */
70 u64 ext_tr_timer; /* 0x0120 */
71
72 u8 pad_0x0128_0x0400 [0x0400 - 0x0128]; /* 0x0128 */
73
74 /* Performance Monitor */
75 u64 pm_status; /* 0x0400 */
76 u64 pm_control; /* 0x0408 */
77 u64 pm_interval; /* 0x0410 */
78 u64 pm_ctr[4]; /* 0x0418 */
79 u64 pm_start_stop; /* 0x0438 */
80 u64 pm07_control[8]; /* 0x0440 */
81
82 u8 pad_0x0480_0x0800 [0x0800 - 0x0480]; /* 0x0480 */
83
84 /* Thermal Sensor Registers */
85 union spe_reg ts_ctsr1; /* 0x0800 */
86 u64 ts_ctsr2; /* 0x0808 */
87 union spe_reg ts_mtsr1; /* 0x0810 */
88 u64 ts_mtsr2; /* 0x0818 */
89 union spe_reg ts_itr1; /* 0x0820 */
90 u64 ts_itr2; /* 0x0828 */
91 u64 ts_gitr; /* 0x0830 */
92 u64 ts_isr; /* 0x0838 */
93 u64 ts_imr; /* 0x0840 */
94 union spe_reg tm_cr1; /* 0x0848 */
95 u64 tm_cr2; /* 0x0850 */
96 u64 tm_simr; /* 0x0858 */
97 union ppe_spe_reg tm_tpr; /* 0x0860 */
98 union spe_reg tm_str1; /* 0x0868 */
99 u64 tm_str2; /* 0x0870 */
100 union ppe_spe_reg tm_tsr; /* 0x0878 */
101
102 /* Power Management */
103 u64 pmcr; /* 0x0880 */
104#define CBE_PMD_PAUSE_ZERO_CONTROL 0x10000
105 u64 pmsr; /* 0x0888 */
106
107 /* Time Base Register */
108 u64 tbr; /* 0x0890 */
109
110 u8 pad_0x0898_0x0c00 [0x0c00 - 0x0898]; /* 0x0898 */
111
112 /* Fault Isolation Registers */
113 u64 checkstop_fir; /* 0x0c00 */
114 u64 recoverable_fir; /* 0x0c08 */
115 u64 spec_att_mchk_fir; /* 0x0c10 */
116 u32 fir_mode_reg; /* 0x0c18 */
117 u8 pad_0x0c1c_0x0c20 [4]; /* 0x0c1c */
118#define CBE_PMD_FIR_MODE_M8 0x00800
119 u64 fir_enable_mask; /* 0x0c20 */
120
121 u8 pad_0x0c28_0x0ca8 [0x0ca8 - 0x0c28]; /* 0x0c28 */
122 u64 ras_esc_0; /* 0x0ca8 */
123 u8 pad_0x0cb0_0x1000 [0x1000 - 0x0cb0]; /* 0x0cb0 */
124};
125
126extern struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np);
127extern struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu);
128
129/*
130 * PMU shadow registers
131 *
132 * Many of the registers in the performance monitoring unit are write-only,
133 * so we need to save a copy of what we write to those registers.
134 *
135 * The actual data counters are read/write. However, writing to the counters
136 * only takes effect if the PMU is enabled. Otherwise the value is stored in
137 * a hardware latch until the next time the PMU is enabled. So we save a copy
138 * of the counter values if we need to read them back while the PMU is
139 * disabled. The counter_value_in_latch field is a bitmap indicating which
140 * counters currently have a value waiting to be written.
141 */
142
143struct cbe_pmd_shadow_regs {
144 u32 group_control;
145 u32 debug_bus_control;
146 u32 trace_address;
147 u32 ext_tr_timer;
148 u32 pm_status;
149 u32 pm_control;
150 u32 pm_interval;
151 u32 pm_start_stop;
152 u32 pm07_control[NR_CTRS];
153
154 u32 pm_ctr[NR_PHYS_CTRS];
155 u32 counter_value_in_latch;
156};
157
158extern struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np);
159extern struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu);
160
161/*
162 *
163 * IIC unit register definitions
164 *
165 */
166
167struct cbe_iic_pending_bits {
168 u32 data;
169 u8 flags;
170 u8 class;
171 u8 source;
172 u8 prio;
173};
174
175#define CBE_IIC_IRQ_VALID 0x80
176#define CBE_IIC_IRQ_IPI 0x40
177
178struct cbe_iic_thread_regs {
179 struct cbe_iic_pending_bits pending;
180 struct cbe_iic_pending_bits pending_destr;
181 u64 generate;
182 u64 prio;
183};
184
185struct cbe_iic_regs {
186 u8 pad_0x0000_0x0400[0x0400 - 0x0000]; /* 0x0000 */
187
188 /* IIC interrupt registers */
189 struct cbe_iic_thread_regs thread[2]; /* 0x0400 */
190
191 u64 iic_ir; /* 0x0440 */
192#define CBE_IIC_IR_PRIO(x) (((x) & 0xf) << 12)
193#define CBE_IIC_IR_DEST_NODE(x) (((x) & 0xf) << 4)
194#define CBE_IIC_IR_DEST_UNIT(x) ((x) & 0xf)
195#define CBE_IIC_IR_IOC_0 0x0
196#define CBE_IIC_IR_IOC_1S 0xb
197#define CBE_IIC_IR_PT_0 0xe
198#define CBE_IIC_IR_PT_1 0xf
199
200 u64 iic_is; /* 0x0448 */
201#define CBE_IIC_IS_PMI 0x2
202
203 u8 pad_0x0450_0x0500[0x0500 - 0x0450]; /* 0x0450 */
204
205 /* IOC FIR */
206 u64 ioc_fir_reset; /* 0x0500 */
207 u64 ioc_fir_set; /* 0x0508 */
208 u64 ioc_checkstop_enable; /* 0x0510 */
209 u64 ioc_fir_error_mask; /* 0x0518 */
210 u64 ioc_syserr_enable; /* 0x0520 */
211 u64 ioc_fir; /* 0x0528 */
212
213 u8 pad_0x0530_0x1000[0x1000 - 0x0530]; /* 0x0530 */
214};
215
216extern struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np);
217extern struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu);
218
219
220struct cbe_mic_tm_regs {
221 u8 pad_0x0000_0x0040[0x0040 - 0x0000]; /* 0x0000 */
222
223 u64 mic_ctl_cnfg2; /* 0x0040 */
224#define CBE_MIC_ENABLE_AUX_TRC 0x8000000000000000LL
225#define CBE_MIC_DISABLE_PWR_SAV_2 0x0200000000000000LL
226#define CBE_MIC_DISABLE_AUX_TRC_WRAP 0x0100000000000000LL
227#define CBE_MIC_ENABLE_AUX_TRC_INT 0x0080000000000000LL
228
229 u64 pad_0x0048; /* 0x0048 */
230
231 u64 mic_aux_trc_base; /* 0x0050 */
232 u64 mic_aux_trc_max_addr; /* 0x0058 */
233 u64 mic_aux_trc_cur_addr; /* 0x0060 */
234 u64 mic_aux_trc_grf_addr; /* 0x0068 */
235 u64 mic_aux_trc_grf_data; /* 0x0070 */
236
237 u64 pad_0x0078; /* 0x0078 */
238
239 u64 mic_ctl_cnfg_0; /* 0x0080 */
240#define CBE_MIC_DISABLE_PWR_SAV_0 0x8000000000000000LL
241
242 u64 pad_0x0088; /* 0x0088 */
243
244 u64 slow_fast_timer_0; /* 0x0090 */
245 u64 slow_next_timer_0; /* 0x0098 */
246
247 u8 pad_0x00a0_0x01c0[0x01c0 - 0x0a0]; /* 0x00a0 */
248
249 u64 mic_ctl_cnfg_1; /* 0x01c0 */
250#define CBE_MIC_DISABLE_PWR_SAV_1 0x8000000000000000LL
251 u64 pad_0x01c8; /* 0x01c8 */
252
253 u64 slow_fast_timer_1; /* 0x01d0 */
254 u64 slow_next_timer_1; /* 0x01d8 */
255
256 u8 pad_0x01e0_0x1000[0x1000 - 0x01e0]; /* 0x01e0 */
257};
258
259extern struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np);
260extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu);
261
262/* some utility functions to deal with SMT */
263extern u32 cbe_get_hw_thread_id(int cpu);
264extern u32 cbe_cpu_to_node(int cpu);
265extern u32 cbe_node_to_cpu(int node);
266
267/* Init this module early */
268extern void cbe_regs_init(void);
269
270
271#endif /* CBE_REGS_H */
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
index fb5eda48467..4852bf312d8 100644
--- a/arch/powerpc/platforms/cell/cbe_thermal.c
+++ b/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -52,8 +52,8 @@
52#include <asm/spu.h> 52#include <asm/spu.h>
53#include <asm/io.h> 53#include <asm/io.h>
54#include <asm/prom.h> 54#include <asm/prom.h>
55#include <asm/cell-regs.h>
55 56
56#include "cbe_regs.h"
57#include "spu_priv1_mmio.h" 57#include "spu_priv1_mmio.h"
58 58
59#define TEMP_MIN 65 59#define TEMP_MIN 65
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 47264e72202..151fd8b82d6 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -41,9 +41,9 @@
41#include <asm/prom.h> 41#include <asm/prom.h>
42#include <asm/ptrace.h> 42#include <asm/ptrace.h>
43#include <asm/machdep.h> 43#include <asm/machdep.h>
44#include <asm/cell-regs.h>
44 45
45#include "interrupt.h" 46#include "interrupt.h"
46#include "cbe_regs.h"
47 47
48struct iic { 48struct iic {
49 struct cbe_iic_thread_regs __iomem *regs; 49 struct cbe_iic_thread_regs __iomem *regs;
@@ -381,7 +381,7 @@ static int __init setup_iic(void)
381void __init iic_init_IRQ(void) 381void __init iic_init_IRQ(void)
382{ 382{
383 /* Setup an irq host data structure */ 383 /* Setup an irq host data structure */
384 iic_host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, IIC_SOURCE_COUNT, 384 iic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_LINEAR, IIC_SOURCE_COUNT,
385 &iic_host_ops, IIC_IRQ_INVALID); 385 &iic_host_ops, IIC_IRQ_INVALID);
386 BUG_ON(iic_host == NULL); 386 BUG_ON(iic_host == NULL);
387 irq_set_default_host(iic_host); 387 irq_set_default_host(iic_host);
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 760caa76841..faabc3fdc13 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -34,8 +34,8 @@
34#include <asm/udbg.h> 34#include <asm/udbg.h>
35#include <asm/of_platform.h> 35#include <asm/of_platform.h>
36#include <asm/lmb.h> 36#include <asm/lmb.h>
37#include <asm/cell-regs.h>
37 38
38#include "cbe_regs.h"
39#include "interrupt.h" 39#include "interrupt.h"
40 40
41/* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages 41/* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index 4ede22d363f..0304589c0a8 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -34,9 +34,9 @@
34#include <asm/prom.h> 34#include <asm/prom.h>
35#include <asm/pgtable.h> 35#include <asm/pgtable.h>
36#include <asm/reg.h> 36#include <asm/reg.h>
37#include <asm/cell-regs.h>
37 38
38#include "pervasive.h" 39#include "pervasive.h"
39#include "cbe_regs.h"
40 40
41static int sysreset_hack; 41static int sysreset_hack;
42 42
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
index 66ca4b5a1db..1ed30367888 100644
--- a/arch/powerpc/platforms/cell/pmu.c
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -30,8 +30,8 @@
30#include <asm/pmc.h> 30#include <asm/pmc.h>
31#include <asm/reg.h> 31#include <asm/reg.h>
32#include <asm/spu.h> 32#include <asm/spu.h>
33#include <asm/cell-regs.h>
33 34
34#include "cbe_regs.h"
35#include "interrupt.h" 35#include "interrupt.h"
36 36
37/* 37/*
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 3961a085b43..b2494ebcdbe 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -10,9 +10,9 @@
10#include <asm/prom.h> 10#include <asm/prom.h>
11#include <asm/machdep.h> 11#include <asm/machdep.h>
12#include <asm/rtas.h> 12#include <asm/rtas.h>
13#include <asm/cell-regs.h>
13 14
14#include "ras.h" 15#include "ras.h"
15#include "cbe_regs.h"
16 16
17 17
18static void dump_fir(int cpu) 18static void dump_fir(int cpu)
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index db6654272e1..98e7ef8e6fc 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -52,9 +52,9 @@
52#include <asm/udbg.h> 52#include <asm/udbg.h>
53#include <asm/mpic.h> 53#include <asm/mpic.h>
54#include <asm/of_platform.h> 54#include <asm/of_platform.h>
55#include <asm/cell-regs.h>
55 56
56#include "interrupt.h" 57#include "interrupt.h"
57#include "cbe_regs.h"
58#include "pervasive.h" 58#include "pervasive.h"
59#include "ras.h" 59#include "ras.h"
60 60
@@ -83,12 +83,22 @@ static void cell_progress(char *s, unsigned short hex)
83 83
84static int __init cell_publish_devices(void) 84static int __init cell_publish_devices(void)
85{ 85{
86 int node;
87
86 if (!machine_is(cell)) 88 if (!machine_is(cell))
87 return 0; 89 return 0;
88 90
89 /* Publish OF platform devices for southbridge IOs */ 91 /* Publish OF platform devices for southbridge IOs */
90 of_platform_bus_probe(NULL, NULL, NULL); 92 of_platform_bus_probe(NULL, NULL, NULL);
91 93
94 /* There is no device for the MIC memory controller, thus we create
95 * a platform device for it to attach the EDAC driver to.
96 */
97 for_each_online_node(node) {
98 if (cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(node)) == NULL)
99 continue;
100 platform_device_register_simple("cbe-mic", node, NULL, 0);
101 }
92 return 0; 102 return 0;
93} 103}
94device_initcall(cell_publish_devices); 104device_initcall(cell_publish_devices);
@@ -161,11 +171,6 @@ static void __init cell_setup_arch(void)
161 /* init to some ~sane value until calibrate_delay() runs */ 171 /* init to some ~sane value until calibrate_delay() runs */
162 loops_per_jiffy = 50000000; 172 loops_per_jiffy = 50000000;
163 173
164 if (ROOT_DEV == 0) {
165 printk("No ramdisk, default root is /dev/hda2\n");
166 ROOT_DEV = Root_HDA2;
167 }
168
169 /* Find and initialize PCI host bridges */ 174 /* Find and initialize PCI host bridges */
170 init_pci_config_tokens(); 175 init_pci_config_tokens();
171 find_and_init_phbs(); 176 find_and_init_phbs();
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 05f4b3d3d75..3f4b4aef756 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -63,7 +63,6 @@ enum {
63 63
64struct spider_pic { 64struct spider_pic {
65 struct irq_host *host; 65 struct irq_host *host;
66 struct device_node *of_node;
67 void __iomem *regs; 66 void __iomem *regs;
68 unsigned int node_id; 67 unsigned int node_id;
69}; 68};
@@ -176,12 +175,6 @@ static struct irq_chip spider_pic = {
176 .set_type = spider_set_irq_type, 175 .set_type = spider_set_irq_type,
177}; 176};
178 177
179static int spider_host_match(struct irq_host *h, struct device_node *node)
180{
181 struct spider_pic *pic = h->host_data;
182 return node == pic->of_node;
183}
184
185static int spider_host_map(struct irq_host *h, unsigned int virq, 178static int spider_host_map(struct irq_host *h, unsigned int virq,
186 irq_hw_number_t hw) 179 irq_hw_number_t hw)
187{ 180{
@@ -208,7 +201,6 @@ static int spider_host_xlate(struct irq_host *h, struct device_node *ct,
208} 201}
209 202
210static struct irq_host_ops spider_host_ops = { 203static struct irq_host_ops spider_host_ops = {
211 .match = spider_host_match,
212 .map = spider_host_map, 204 .map = spider_host_map,
213 .xlate = spider_host_xlate, 205 .xlate = spider_host_xlate,
214}; 206};
@@ -247,18 +239,18 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
247 * tree in case the device-tree is ever fixed 239 * tree in case the device-tree is ever fixed
248 */ 240 */
249 struct of_irq oirq; 241 struct of_irq oirq;
250 if (of_irq_map_one(pic->of_node, 0, &oirq) == 0) { 242 if (of_irq_map_one(pic->host->of_node, 0, &oirq) == 0) {
251 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 243 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
252 oirq.size); 244 oirq.size);
253 return virq; 245 return virq;
254 } 246 }
255 247
256 /* Now do the horrible hacks */ 248 /* Now do the horrible hacks */
257 tmp = of_get_property(pic->of_node, "#interrupt-cells", NULL); 249 tmp = of_get_property(pic->host->of_node, "#interrupt-cells", NULL);
258 if (tmp == NULL) 250 if (tmp == NULL)
259 return NO_IRQ; 251 return NO_IRQ;
260 intsize = *tmp; 252 intsize = *tmp;
261 imap = of_get_property(pic->of_node, "interrupt-map", &imaplen); 253 imap = of_get_property(pic->host->of_node, "interrupt-map", &imaplen);
262 if (imap == NULL || imaplen < (intsize + 1)) 254 if (imap == NULL || imaplen < (intsize + 1))
263 return NO_IRQ; 255 return NO_IRQ;
264 iic = of_find_node_by_phandle(imap[intsize]); 256 iic = of_find_node_by_phandle(imap[intsize]);
@@ -308,15 +300,13 @@ static void __init spider_init_one(struct device_node *of_node, int chip,
308 panic("spider_pic: can't map registers !"); 300 panic("spider_pic: can't map registers !");
309 301
310 /* Allocate a host */ 302 /* Allocate a host */
311 pic->host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, SPIDER_SRC_COUNT, 303 pic->host = irq_alloc_host(of_node_get(of_node), IRQ_HOST_MAP_LINEAR,
312 &spider_host_ops, SPIDER_IRQ_INVALID); 304 SPIDER_SRC_COUNT, &spider_host_ops,
305 SPIDER_IRQ_INVALID);
313 if (pic->host == NULL) 306 if (pic->host == NULL)
314 panic("spider_pic: can't allocate irq host !"); 307 panic("spider_pic: can't allocate irq host !");
315 pic->host->host_data = pic; 308 pic->host->host_data = pic;
316 309
317 /* Fill out other bits */
318 pic->of_node = of_node_get(of_node);
319
320 /* Go through all sources and disable them */ 310 /* Go through all sources and disable them */
321 for (i = 0; i < SPIDER_SRC_COUNT; i++) { 311 for (i = 0; i < SPIDER_SRC_COUNT; i++) {
322 void __iomem *cfg = pic->regs + TIR_CFGA + 8 * i; 312 void __iomem *cfg = pic->regs + TIR_CFGA + 8 * i;
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 106d2921e2d..c83c3e3f517 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -168,7 +168,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
168#else 168#else
169 psize = mm->context.user_psize; 169 psize = mm->context.user_psize;
170#endif 170#endif
171 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | 171 vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
172 SLB_VSID_USER; 172 SLB_VSID_USER;
173 break; 173 break;
174 case VMALLOC_REGION_ID: 174 case VMALLOC_REGION_ID:
@@ -176,12 +176,12 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
176 psize = mmu_vmalloc_psize; 176 psize = mmu_vmalloc_psize;
177 else 177 else
178 psize = mmu_io_psize; 178 psize = mmu_io_psize;
179 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | 179 vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
180 SLB_VSID_KERNEL; 180 SLB_VSID_KERNEL;
181 break; 181 break;
182 case KERNEL_REGION_ID: 182 case KERNEL_REGION_ID:
183 psize = mmu_linear_psize; 183 psize = mmu_linear_psize;
184 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | 184 vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
185 SLB_VSID_KERNEL; 185 SLB_VSID_KERNEL;
186 break; 186 break;
187 default: 187 default:
@@ -458,7 +458,7 @@ static int spu_shutdown(struct sys_device *sysdev)
458 return 0; 458 return 0;
459} 459}
460 460
461struct sysdev_class spu_sysdev_class = { 461static struct sysdev_class spu_sysdev_class = {
462 set_kset_name("spu"), 462 set_kset_name("spu"),
463 .shutdown = spu_shutdown, 463 .shutdown = spu_shutdown,
464}; 464};
diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c
index 47ec3be3edc..dceb8b6a938 100644
--- a/arch/powerpc/platforms/cell/spu_callbacks.c
+++ b/arch/powerpc/platforms/cell/spu_callbacks.c
@@ -2,7 +2,7 @@
2 * System call callback functions for SPUs 2 * System call callback functions for SPUs
3 */ 3 */
4 4
5#define DEBUG 5#undef DEBUG
6 6
7#include <linux/kallsyms.h> 7#include <linux/kallsyms.h>
8#include <linux/module.h> 8#include <linux/module.h>
@@ -33,7 +33,7 @@
33 * mbind, mq_open, ipc, ... 33 * mbind, mq_open, ipc, ...
34 */ 34 */
35 35
36void *spu_syscall_table[] = { 36static void *spu_syscall_table[] = {
37#define SYSCALL(func) sys_ni_syscall, 37#define SYSCALL(func) sys_ni_syscall,
38#define COMPAT_SYS(func) sys_ni_syscall, 38#define COMPAT_SYS(func) sys_ni_syscall,
39#define PPC_SYS(func) sys_ni_syscall, 39#define PPC_SYS(func) sys_ni_syscall,
diff --git a/arch/powerpc/platforms/cell/spu_coredump.c b/arch/powerpc/platforms/cell/spu_coredump.c
deleted file mode 100644
index 4fd37ff1e21..00000000000
--- a/arch/powerpc/platforms/cell/spu_coredump.c
+++ /dev/null
@@ -1,79 +0,0 @@
1/*
2 * SPU core dump code
3 *
4 * (C) Copyright 2006 IBM Corp.
5 *
6 * Author: Dwayne Grant McConnell <decimal@us.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/file.h>
24#include <linux/module.h>
25#include <linux/syscalls.h>
26
27#include <asm/spu.h>
28
29static struct spu_coredump_calls *spu_coredump_calls;
30static DEFINE_MUTEX(spu_coredump_mutex);
31
32int arch_notes_size(void)
33{
34 long ret;
35
36 ret = -ENOSYS;
37 mutex_lock(&spu_coredump_mutex);
38 if (spu_coredump_calls && try_module_get(spu_coredump_calls->owner)) {
39 ret = spu_coredump_calls->arch_notes_size();
40 module_put(spu_coredump_calls->owner);
41 }
42 mutex_unlock(&spu_coredump_mutex);
43 return ret;
44}
45
46void arch_write_notes(struct file *file)
47{
48 mutex_lock(&spu_coredump_mutex);
49 if (spu_coredump_calls && try_module_get(spu_coredump_calls->owner)) {
50 spu_coredump_calls->arch_write_notes(file);
51 module_put(spu_coredump_calls->owner);
52 }
53 mutex_unlock(&spu_coredump_mutex);
54}
55
56int register_arch_coredump_calls(struct spu_coredump_calls *calls)
57{
58 int ret = 0;
59
60
61 mutex_lock(&spu_coredump_mutex);
62 if (spu_coredump_calls)
63 ret = -EBUSY;
64 else
65 spu_coredump_calls = calls;
66 mutex_unlock(&spu_coredump_mutex);
67 return ret;
68}
69EXPORT_SYMBOL_GPL(register_arch_coredump_calls);
70
71void unregister_arch_coredump_calls(struct spu_coredump_calls *calls)
72{
73 BUG_ON(spu_coredump_calls != calls);
74
75 mutex_lock(&spu_coredump_mutex);
76 spu_coredump_calls = NULL;
77 mutex_unlock(&spu_coredump_mutex);
78}
79EXPORT_SYMBOL_GPL(unregister_arch_coredump_calls);
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index 0e14f532500..1b010707488 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -377,10 +377,10 @@ static int qs20_reg_memory[QS20_SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 };
377static struct spu *spu_lookup_reg(int node, u32 reg) 377static struct spu *spu_lookup_reg(int node, u32 reg)
378{ 378{
379 struct spu *spu; 379 struct spu *spu;
380 u32 *spu_reg; 380 const u32 *spu_reg;
381 381
382 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { 382 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
383 spu_reg = (u32*)of_get_property(spu_devnode(spu), "reg", NULL); 383 spu_reg = of_get_property(spu_devnode(spu), "reg", NULL);
384 if (*spu_reg == reg) 384 if (*spu_reg == reg)
385 return spu; 385 return spu;
386 } 386 }
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c
index 027ac32cc63..a9438b719fe 100644
--- a/arch/powerpc/platforms/cell/spu_syscalls.c
+++ b/arch/powerpc/platforms/cell/spu_syscalls.c
@@ -2,6 +2,7 @@
2 * SPU file system -- system call stubs 2 * SPU file system -- system call stubs
3 * 3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 * (C) Copyright 2006-2007, IBM Corporation
5 * 6 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com> 7 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 * 8 *
@@ -20,44 +21,73 @@
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */ 22 */
22#include <linux/file.h> 23#include <linux/file.h>
24#include <linux/fs.h>
23#include <linux/module.h> 25#include <linux/module.h>
24#include <linux/syscalls.h> 26#include <linux/syscalls.h>
27#include <linux/rcupdate.h>
25 28
26#include <asm/spu.h> 29#include <asm/spu.h>
27 30
28struct spufs_calls spufs_calls = { 31/* protected by rcu */
29 .owner = NULL, 32static struct spufs_calls *spufs_calls;
30};
31 33
32/* These stub syscalls are needed to have the actual implementation 34#ifdef CONFIG_SPU_FS_MODULE
33 * within a loadable module. When spufs is built into the kernel, 35
34 * this file is not used and the syscalls directly enter the fs code */ 36static inline struct spufs_calls *spufs_calls_get(void)
37{
38 struct spufs_calls *calls = NULL;
39
40 rcu_read_lock();
41 calls = rcu_dereference(spufs_calls);
42 if (calls && !try_module_get(calls->owner))
43 calls = NULL;
44 rcu_read_unlock();
45
46 return calls;
47}
48
49static inline void spufs_calls_put(struct spufs_calls *calls)
50{
51 BUG_ON(calls != spufs_calls);
52
53 /* we don't need to rcu this, as we hold a reference to the module */
54 module_put(spufs_calls->owner);
55}
56
57#else /* !defined CONFIG_SPU_FS_MODULE */
58
59static inline struct spufs_calls *spufs_calls_get(void)
60{
61 return spufs_calls;
62}
63
64static inline void spufs_calls_put(struct spufs_calls *calls) { }
65
66#endif /* CONFIG_SPU_FS_MODULE */
35 67
36asmlinkage long sys_spu_create(const char __user *name, 68asmlinkage long sys_spu_create(const char __user *name,
37 unsigned int flags, mode_t mode, int neighbor_fd) 69 unsigned int flags, mode_t mode, int neighbor_fd)
38{ 70{
39 long ret; 71 long ret;
40 struct module *owner = spufs_calls.owner;
41 struct file *neighbor; 72 struct file *neighbor;
42 int fput_needed; 73 int fput_needed;
74 struct spufs_calls *calls;
43 75
44 ret = -ENOSYS; 76 calls = spufs_calls_get();
45 if (owner && try_module_get(owner)) { 77 if (!calls)
46 if (flags & SPU_CREATE_AFFINITY_SPU) { 78 return -ENOSYS;
47 neighbor = fget_light(neighbor_fd, &fput_needed); 79
48 ret = -EBADF; 80 if (flags & SPU_CREATE_AFFINITY_SPU) {
49 if (neighbor) { 81 ret = -EBADF;
50 ret = spufs_calls.create_thread(name, flags, 82 neighbor = fget_light(neighbor_fd, &fput_needed);
51 mode, neighbor); 83 if (neighbor) {
52 fput_light(neighbor, fput_needed); 84 ret = calls->create_thread(name, flags, mode, neighbor);
53 } 85 fput_light(neighbor, fput_needed);
54 }
55 else {
56 ret = spufs_calls.create_thread(name, flags,
57 mode, NULL);
58 } 86 }
59 module_put(owner); 87 } else
60 } 88 ret = calls->create_thread(name, flags, mode, NULL);
89
90 spufs_calls_put(calls);
61 return ret; 91 return ret;
62} 92}
63 93
@@ -66,37 +96,69 @@ asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
66 long ret; 96 long ret;
67 struct file *filp; 97 struct file *filp;
68 int fput_needed; 98 int fput_needed;
69 struct module *owner = spufs_calls.owner; 99 struct spufs_calls *calls;
70 100
71 ret = -ENOSYS; 101 calls = spufs_calls_get();
72 if (owner && try_module_get(owner)) { 102 if (!calls)
73 ret = -EBADF; 103 return -ENOSYS;
74 filp = fget_light(fd, &fput_needed); 104
75 if (filp) { 105 ret = -EBADF;
76 ret = spufs_calls.spu_run(filp, unpc, ustatus); 106 filp = fget_light(fd, &fput_needed);
77 fput_light(filp, fput_needed); 107 if (filp) {
78 } 108 ret = calls->spu_run(filp, unpc, ustatus);
79 module_put(owner); 109 fput_light(filp, fput_needed);
80 } 110 }
111
112 spufs_calls_put(calls);
113 return ret;
114}
115
116int elf_coredump_extra_notes_size(void)
117{
118 struct spufs_calls *calls;
119 int ret;
120
121 calls = spufs_calls_get();
122 if (!calls)
123 return 0;
124
125 ret = calls->coredump_extra_notes_size();
126
127 spufs_calls_put(calls);
128
129 return ret;
130}
131
132int elf_coredump_extra_notes_write(struct file *file, loff_t *foffset)
133{
134 struct spufs_calls *calls;
135 int ret;
136
137 calls = spufs_calls_get();
138 if (!calls)
139 return 0;
140
141 ret = calls->coredump_extra_notes_write(file, foffset);
142
143 spufs_calls_put(calls);
144
81 return ret; 145 return ret;
82} 146}
83 147
84int register_spu_syscalls(struct spufs_calls *calls) 148int register_spu_syscalls(struct spufs_calls *calls)
85{ 149{
86 if (spufs_calls.owner) 150 if (spufs_calls)
87 return -EBUSY; 151 return -EBUSY;
88 152
89 spufs_calls.create_thread = calls->create_thread; 153 rcu_assign_pointer(spufs_calls, calls);
90 spufs_calls.spu_run = calls->spu_run;
91 smp_mb();
92 spufs_calls.owner = calls->owner;
93 return 0; 154 return 0;
94} 155}
95EXPORT_SYMBOL_GPL(register_spu_syscalls); 156EXPORT_SYMBOL_GPL(register_spu_syscalls);
96 157
97void unregister_spu_syscalls(struct spufs_calls *calls) 158void unregister_spu_syscalls(struct spufs_calls *calls)
98{ 159{
99 BUG_ON(spufs_calls.owner != calls->owner); 160 BUG_ON(spufs_calls->owner != calls->owner);
100 spufs_calls.owner = NULL; 161 rcu_assign_pointer(spufs_calls, NULL);
162 synchronize_rcu();
101} 163}
102EXPORT_SYMBOL_GPL(unregister_spu_syscalls); 164EXPORT_SYMBOL_GPL(unregister_spu_syscalls);
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 5e31799b1e3..80f62363e1c 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -31,16 +31,7 @@
31 31
32#include "spufs.h" 32#include "spufs.h"
33 33
34struct spufs_ctx_info { 34static ssize_t do_coredump_read(int num, struct spu_context *ctx, void *buffer,
35 struct list_head list;
36 int dfd;
37 int memsize; /* in bytes */
38 struct spu_context *ctx;
39};
40
41static LIST_HEAD(ctx_info_list);
42
43static ssize_t do_coredump_read(int num, struct spu_context *ctx, void __user *buffer,
44 size_t size, loff_t *off) 35 size_t size, loff_t *off)
45{ 36{
46 u64 data; 37 u64 data;
@@ -50,49 +41,57 @@ static ssize_t do_coredump_read(int num, struct spu_context *ctx, void __user *b
50 return spufs_coredump_read[num].read(ctx, buffer, size, off); 41 return spufs_coredump_read[num].read(ctx, buffer, size, off);
51 42
52 data = spufs_coredump_read[num].get(ctx); 43 data = spufs_coredump_read[num].get(ctx);
53 ret = copy_to_user(buffer, &data, 8); 44 ret = snprintf(buffer, size, "0x%.16lx", data);
54 return ret ? -EFAULT : 8; 45 if (ret >= size)
46 return size;
47 return ++ret; /* count trailing NULL */
55} 48}
56 49
57/* 50/*
58 * These are the only things you should do on a core-file: use only these 51 * These are the only things you should do on a core-file: use only these
59 * functions to write out all the necessary info. 52 * functions to write out all the necessary info.
60 */ 53 */
61static int spufs_dump_write(struct file *file, const void *addr, int nr) 54static int spufs_dump_write(struct file *file, const void *addr, int nr, loff_t *foffset)
62{ 55{
63 return file->f_op->write(file, addr, nr, &file->f_pos) == nr; 56 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
64} 57 ssize_t written;
65 58
66static int spufs_dump_seek(struct file *file, loff_t off) 59 if (*foffset + nr > limit)
67{ 60 return -EIO;
68 if (file->f_op->llseek) { 61
69 if (file->f_op->llseek(file, off, 0) != off) 62 written = file->f_op->write(file, addr, nr, &file->f_pos);
70 return 0; 63 *foffset += written;
71 } else 64
72 file->f_pos = off; 65 if (written != nr)
73 return 1; 66 return -EIO;
67
68 return 0;
74} 69}
75 70
76static void spufs_fill_memsize(struct spufs_ctx_info *ctx_info) 71static int spufs_dump_align(struct file *file, char *buf, loff_t new_off,
72 loff_t *foffset)
77{ 73{
78 struct spu_context *ctx; 74 int rc, size;
79 unsigned long long lslr; 75
76 size = min((loff_t)PAGE_SIZE, new_off - *foffset);
77 memset(buf, 0, size);
78
79 rc = 0;
80 while (rc == 0 && new_off > *foffset) {
81 size = min((loff_t)PAGE_SIZE, new_off - *foffset);
82 rc = spufs_dump_write(file, buf, size, foffset);
83 }
80 84
81 ctx = ctx_info->ctx; 85 return rc;
82 lslr = ctx->csa.priv2.spu_lslr_RW;
83 ctx_info->memsize = lslr + 1;
84} 86}
85 87
86static int spufs_ctx_note_size(struct spufs_ctx_info *ctx_info) 88static int spufs_ctx_note_size(struct spu_context *ctx, int dfd)
87{ 89{
88 int dfd, memsize, i, sz, total = 0; 90 int i, sz, total = 0;
89 char *name; 91 char *name;
90 char fullname[80]; 92 char fullname[80];
91 93
92 dfd = ctx_info->dfd; 94 for (i = 0; spufs_coredump_read[i].name != NULL; i++) {
93 memsize = ctx_info->memsize;
94
95 for (i = 0; spufs_coredump_read[i].name; i++) {
96 name = spufs_coredump_read[i].name; 95 name = spufs_coredump_read[i].name;
97 sz = spufs_coredump_read[i].size; 96 sz = spufs_coredump_read[i].size;
98 97
@@ -100,39 +99,12 @@ static int spufs_ctx_note_size(struct spufs_ctx_info *ctx_info)
100 99
101 total += sizeof(struct elf_note); 100 total += sizeof(struct elf_note);
102 total += roundup(strlen(fullname) + 1, 4); 101 total += roundup(strlen(fullname) + 1, 4);
103 if (!strcmp(name, "mem")) 102 total += roundup(sz, 4);
104 total += roundup(memsize, 4);
105 else
106 total += roundup(sz, 4);
107 } 103 }
108 104
109 return total; 105 return total;
110} 106}
111 107
112static int spufs_add_one_context(struct file *file, int dfd)
113{
114 struct spu_context *ctx;
115 struct spufs_ctx_info *ctx_info;
116 int size;
117
118 ctx = SPUFS_I(file->f_dentry->d_inode)->i_ctx;
119 if (ctx->flags & SPU_CREATE_NOSCHED)
120 return 0;
121
122 ctx_info = kzalloc(sizeof(*ctx_info), GFP_KERNEL);
123 if (unlikely(!ctx_info))
124 return -ENOMEM;
125
126 ctx_info->dfd = dfd;
127 ctx_info->ctx = ctx;
128
129 spufs_fill_memsize(ctx_info);
130
131 size = spufs_ctx_note_size(ctx_info);
132 list_add(&ctx_info->list, &ctx_info_list);
133 return size;
134}
135
136/* 108/*
137 * The additional architecture-specific notes for Cell are various 109 * The additional architecture-specific notes for Cell are various
138 * context files in the spu context. 110 * context files in the spu context.
@@ -142,33 +114,57 @@ static int spufs_add_one_context(struct file *file, int dfd)
142 * internal functionality to dump them without needing to actually 114 * internal functionality to dump them without needing to actually
143 * open the files. 115 * open the files.
144 */ 116 */
145static int spufs_arch_notes_size(void) 117static struct spu_context *coredump_next_context(int *fd)
146{ 118{
147 struct fdtable *fdt = files_fdtable(current->files); 119 struct fdtable *fdt = files_fdtable(current->files);
148 int size = 0, fd; 120 struct file *file;
121 struct spu_context *ctx = NULL;
149 122
150 for (fd = 0; fd < fdt->max_fds; fd++) { 123 for (; *fd < fdt->max_fds; (*fd)++) {
151 if (FD_ISSET(fd, fdt->open_fds)) { 124 if (!FD_ISSET(*fd, fdt->open_fds))
152 struct file *file = fcheck(fd); 125 continue;
153 126
154 if (file && file->f_op == &spufs_context_fops) { 127 file = fcheck(*fd);
155 int rval = spufs_add_one_context(file, fd); 128
156 if (rval < 0) 129 if (!file || file->f_op != &spufs_context_fops)
157 break; 130 continue;
158 size += rval; 131
159 } 132 ctx = SPUFS_I(file->f_dentry->d_inode)->i_ctx;
160 } 133 if (ctx->flags & SPU_CREATE_NOSCHED)
134 continue;
135
136 /* start searching the next fd next time we're called */
137 (*fd)++;
138 break;
161 } 139 }
162 140
163 return size; 141 return ctx;
164} 142}
165 143
166static void spufs_arch_write_note(struct spufs_ctx_info *ctx_info, int i, 144int spufs_coredump_extra_notes_size(void)
167 struct file *file)
168{ 145{
169 struct spu_context *ctx; 146 struct spu_context *ctx;
147 int size = 0, rc, fd;
148
149 fd = 0;
150 while ((ctx = coredump_next_context(&fd)) != NULL) {
151 spu_acquire_saved(ctx);
152 rc = spufs_ctx_note_size(ctx, fd);
153 spu_release_saved(ctx);
154 if (rc < 0)
155 break;
156
157 size += rc;
158 }
159
160 return size;
161}
162
163static int spufs_arch_write_note(struct spu_context *ctx, int i,
164 struct file *file, int dfd, loff_t *foffset)
165{
170 loff_t pos = 0; 166 loff_t pos = 0;
171 int sz, dfd, rc, total = 0; 167 int sz, rc, nread, total = 0;
172 const int bufsz = PAGE_SIZE; 168 const int bufsz = PAGE_SIZE;
173 char *name; 169 char *name;
174 char fullname[80], *buf; 170 char fullname[80], *buf;
@@ -176,64 +172,70 @@ static void spufs_arch_write_note(struct spufs_ctx_info *ctx_info, int i,
176 172
177 buf = (void *)get_zeroed_page(GFP_KERNEL); 173 buf = (void *)get_zeroed_page(GFP_KERNEL);
178 if (!buf) 174 if (!buf)
179 return; 175 return -ENOMEM;
180 176
181 dfd = ctx_info->dfd;
182 name = spufs_coredump_read[i].name; 177 name = spufs_coredump_read[i].name;
183 178 sz = spufs_coredump_read[i].size;
184 if (!strcmp(name, "mem"))
185 sz = ctx_info->memsize;
186 else
187 sz = spufs_coredump_read[i].size;
188
189 ctx = ctx_info->ctx;
190 if (!ctx)
191 goto out;
192 179
193 sprintf(fullname, "SPU/%d/%s", dfd, name); 180 sprintf(fullname, "SPU/%d/%s", dfd, name);
194 en.n_namesz = strlen(fullname) + 1; 181 en.n_namesz = strlen(fullname) + 1;
195 en.n_descsz = sz; 182 en.n_descsz = sz;
196 en.n_type = NT_SPU; 183 en.n_type = NT_SPU;
197 184
198 if (!spufs_dump_write(file, &en, sizeof(en))) 185 rc = spufs_dump_write(file, &en, sizeof(en), foffset);
186 if (rc)
199 goto out; 187 goto out;
200 if (!spufs_dump_write(file, fullname, en.n_namesz)) 188
189 rc = spufs_dump_write(file, fullname, en.n_namesz, foffset);
190 if (rc)
201 goto out; 191 goto out;
202 if (!spufs_dump_seek(file, roundup((unsigned long)file->f_pos, 4))) 192
193 rc = spufs_dump_align(file, buf, roundup(*foffset, 4), foffset);
194 if (rc)
203 goto out; 195 goto out;
204 196
205 do { 197 do {
206 rc = do_coredump_read(i, ctx, buf, bufsz, &pos); 198 nread = do_coredump_read(i, ctx, buf, bufsz, &pos);
207 if (rc > 0) { 199 if (nread > 0) {
208 if (!spufs_dump_write(file, buf, rc)) 200 rc = spufs_dump_write(file, buf, nread, foffset);
201 if (rc)
209 goto out; 202 goto out;
210 total += rc; 203 total += nread;
211 } 204 }
212 } while (rc == bufsz && total < sz); 205 } while (nread == bufsz && total < sz);
206
207 if (nread < 0) {
208 rc = nread;
209 goto out;
210 }
211
212 rc = spufs_dump_align(file, buf, roundup(*foffset - total + sz, 4),
213 foffset);
213 214
214 spufs_dump_seek(file, roundup((unsigned long)file->f_pos
215 - total + sz, 4));
216out: 215out:
217 free_page((unsigned long)buf); 216 free_page((unsigned long)buf);
217 return rc;
218} 218}
219 219
220static void spufs_arch_write_notes(struct file *file) 220int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset)
221{ 221{
222 int j; 222 struct spu_context *ctx;
223 struct spufs_ctx_info *ctx_info, *next; 223 int fd, j, rc;
224 224
225 list_for_each_entry_safe(ctx_info, next, &ctx_info_list, list) { 225 fd = 0;
226 spu_acquire_saved(ctx_info->ctx); 226 while ((ctx = coredump_next_context(&fd)) != NULL) {
227 for (j = 0; j < spufs_coredump_num_notes; j++) 227 spu_acquire_saved(ctx);
228 spufs_arch_write_note(ctx_info, j, file); 228
229 spu_release_saved(ctx_info->ctx); 229 for (j = 0; spufs_coredump_read[j].name != NULL; j++) {
230 list_del(&ctx_info->list); 230 rc = spufs_arch_write_note(ctx, j, file, fd, foffset);
231 kfree(ctx_info); 231 if (rc) {
232 spu_release_saved(ctx);
233 return rc;
234 }
235 }
236
237 spu_release_saved(ctx);
232 } 238 }
233}
234 239
235struct spu_coredump_calls spufs_coredump_calls = { 240 return 0;
236 .arch_notes_size = spufs_arch_notes_size, 241}
237 .arch_write_notes = spufs_arch_write_notes,
238 .owner = THIS_MODULE,
239};
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 7de4e919687..d72b16d6816 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -199,9 +199,9 @@ static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
199} 199}
200 200
201#ifdef CONFIG_SPU_FS_64K_LS 201#ifdef CONFIG_SPU_FS_64K_LS
202unsigned long spufs_get_unmapped_area(struct file *file, unsigned long addr, 202static unsigned long spufs_get_unmapped_area(struct file *file,
203 unsigned long len, unsigned long pgoff, 203 unsigned long addr, unsigned long len, unsigned long pgoff,
204 unsigned long flags) 204 unsigned long flags)
205{ 205{
206 struct spu_context *ctx = file->private_data; 206 struct spu_context *ctx = file->private_data;
207 struct spu_state *csa = &ctx->csa; 207 struct spu_state *csa = &ctx->csa;
@@ -1076,6 +1076,36 @@ static const struct file_operations spufs_signal2_nosched_fops = {
1076 .mmap = spufs_signal2_mmap, 1076 .mmap = spufs_signal2_mmap,
1077}; 1077};
1078 1078
1079/*
1080 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1081 * work of acquiring (or not) the SPU context before calling through
1082 * to the actual get routine. The set routine is called directly.
1083 */
1084#define SPU_ATTR_NOACQUIRE 0
1085#define SPU_ATTR_ACQUIRE 1
1086#define SPU_ATTR_ACQUIRE_SAVED 2
1087
1088#define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
1089static u64 __##__get(void *data) \
1090{ \
1091 struct spu_context *ctx = data; \
1092 u64 ret; \
1093 \
1094 if (__acquire == SPU_ATTR_ACQUIRE) { \
1095 spu_acquire(ctx); \
1096 ret = __get(ctx); \
1097 spu_release(ctx); \
1098 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
1099 spu_acquire_saved(ctx); \
1100 ret = __get(ctx); \
1101 spu_release_saved(ctx); \
1102 } else \
1103 ret = __get(ctx); \
1104 \
1105 return ret; \
1106} \
1107DEFINE_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1108
1079static void spufs_signal1_type_set(void *data, u64 val) 1109static void spufs_signal1_type_set(void *data, u64 val)
1080{ 1110{
1081 struct spu_context *ctx = data; 1111 struct spu_context *ctx = data;
@@ -1085,25 +1115,13 @@ static void spufs_signal1_type_set(void *data, u64 val)
1085 spu_release(ctx); 1115 spu_release(ctx);
1086} 1116}
1087 1117
1088static u64 __spufs_signal1_type_get(void *data) 1118static u64 spufs_signal1_type_get(struct spu_context *ctx)
1089{ 1119{
1090 struct spu_context *ctx = data;
1091 return ctx->ops->signal1_type_get(ctx); 1120 return ctx->ops->signal1_type_get(ctx);
1092} 1121}
1122DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1123 spufs_signal1_type_set, "%llu", SPU_ATTR_ACQUIRE);
1093 1124
1094static u64 spufs_signal1_type_get(void *data)
1095{
1096 struct spu_context *ctx = data;
1097 u64 ret;
1098
1099 spu_acquire(ctx);
1100 ret = __spufs_signal1_type_get(data);
1101 spu_release(ctx);
1102
1103 return ret;
1104}
1105DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1106 spufs_signal1_type_set, "%llu");
1107 1125
1108static void spufs_signal2_type_set(void *data, u64 val) 1126static void spufs_signal2_type_set(void *data, u64 val)
1109{ 1127{
@@ -1114,25 +1132,12 @@ static void spufs_signal2_type_set(void *data, u64 val)
1114 spu_release(ctx); 1132 spu_release(ctx);
1115} 1133}
1116 1134
1117static u64 __spufs_signal2_type_get(void *data) 1135static u64 spufs_signal2_type_get(struct spu_context *ctx)
1118{ 1136{
1119 struct spu_context *ctx = data;
1120 return ctx->ops->signal2_type_get(ctx); 1137 return ctx->ops->signal2_type_get(ctx);
1121} 1138}
1122 1139DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1123static u64 spufs_signal2_type_get(void *data) 1140 spufs_signal2_type_set, "%llu", SPU_ATTR_ACQUIRE);
1124{
1125 struct spu_context *ctx = data;
1126 u64 ret;
1127
1128 spu_acquire(ctx);
1129 ret = __spufs_signal2_type_get(data);
1130 spu_release(ctx);
1131
1132 return ret;
1133}
1134DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1135 spufs_signal2_type_set, "%llu");
1136 1141
1137#if SPUFS_MMAP_4K 1142#if SPUFS_MMAP_4K
1138static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma, 1143static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,
@@ -1608,17 +1613,12 @@ static void spufs_npc_set(void *data, u64 val)
1608 spu_release(ctx); 1613 spu_release(ctx);
1609} 1614}
1610 1615
1611static u64 spufs_npc_get(void *data) 1616static u64 spufs_npc_get(struct spu_context *ctx)
1612{ 1617{
1613 struct spu_context *ctx = data; 1618 return ctx->ops->npc_read(ctx);
1614 u64 ret;
1615 spu_acquire(ctx);
1616 ret = ctx->ops->npc_read(ctx);
1617 spu_release(ctx);
1618 return ret;
1619} 1619}
1620DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, 1620DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1621 "0x%llx\n") 1621 "0x%llx\n", SPU_ATTR_ACQUIRE);
1622 1622
1623static void spufs_decr_set(void *data, u64 val) 1623static void spufs_decr_set(void *data, u64 val)
1624{ 1624{
@@ -1629,24 +1629,13 @@ static void spufs_decr_set(void *data, u64 val)
1629 spu_release_saved(ctx); 1629 spu_release_saved(ctx);
1630} 1630}
1631 1631
1632static u64 __spufs_decr_get(void *data) 1632static u64 spufs_decr_get(struct spu_context *ctx)
1633{ 1633{
1634 struct spu_context *ctx = data;
1635 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1634 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1636 return lscsa->decr.slot[0]; 1635 return lscsa->decr.slot[0];
1637} 1636}
1638 1637DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1639static u64 spufs_decr_get(void *data) 1638 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
1640{
1641 struct spu_context *ctx = data;
1642 u64 ret;
1643 spu_acquire_saved(ctx);
1644 ret = __spufs_decr_get(data);
1645 spu_release_saved(ctx);
1646 return ret;
1647}
1648DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1649 "0x%llx\n")
1650 1639
1651static void spufs_decr_status_set(void *data, u64 val) 1640static void spufs_decr_status_set(void *data, u64 val)
1652{ 1641{
@@ -1659,26 +1648,16 @@ static void spufs_decr_status_set(void *data, u64 val)
1659 spu_release_saved(ctx); 1648 spu_release_saved(ctx);
1660} 1649}
1661 1650
1662static u64 __spufs_decr_status_get(void *data) 1651static u64 spufs_decr_status_get(struct spu_context *ctx)
1663{ 1652{
1664 struct spu_context *ctx = data;
1665 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) 1653 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1666 return SPU_DECR_STATUS_RUNNING; 1654 return SPU_DECR_STATUS_RUNNING;
1667 else 1655 else
1668 return 0; 1656 return 0;
1669} 1657}
1670 1658DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1671static u64 spufs_decr_status_get(void *data) 1659 spufs_decr_status_set, "0x%llx\n",
1672{ 1660 SPU_ATTR_ACQUIRE_SAVED);
1673 struct spu_context *ctx = data;
1674 u64 ret;
1675 spu_acquire_saved(ctx);
1676 ret = __spufs_decr_status_get(data);
1677 spu_release_saved(ctx);
1678 return ret;
1679}
1680DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1681 spufs_decr_status_set, "0x%llx\n")
1682 1661
1683static void spufs_event_mask_set(void *data, u64 val) 1662static void spufs_event_mask_set(void *data, u64 val)
1684{ 1663{
@@ -1689,28 +1668,18 @@ static void spufs_event_mask_set(void *data, u64 val)
1689 spu_release_saved(ctx); 1668 spu_release_saved(ctx);
1690} 1669}
1691 1670
1692static u64 __spufs_event_mask_get(void *data) 1671static u64 spufs_event_mask_get(struct spu_context *ctx)
1693{ 1672{
1694 struct spu_context *ctx = data;
1695 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1673 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1696 return lscsa->event_mask.slot[0]; 1674 return lscsa->event_mask.slot[0];
1697} 1675}
1698 1676
1699static u64 spufs_event_mask_get(void *data) 1677DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1700{ 1678 spufs_event_mask_set, "0x%llx\n",
1701 struct spu_context *ctx = data; 1679 SPU_ATTR_ACQUIRE_SAVED);
1702 u64 ret;
1703 spu_acquire_saved(ctx);
1704 ret = __spufs_event_mask_get(data);
1705 spu_release_saved(ctx);
1706 return ret;
1707}
1708DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1709 spufs_event_mask_set, "0x%llx\n")
1710 1680
1711static u64 __spufs_event_status_get(void *data) 1681static u64 spufs_event_status_get(struct spu_context *ctx)
1712{ 1682{
1713 struct spu_context *ctx = data;
1714 struct spu_state *state = &ctx->csa; 1683 struct spu_state *state = &ctx->csa;
1715 u64 stat; 1684 u64 stat;
1716 stat = state->spu_chnlcnt_RW[0]; 1685 stat = state->spu_chnlcnt_RW[0];
@@ -1718,19 +1687,8 @@ static u64 __spufs_event_status_get(void *data)
1718 return state->spu_chnldata_RW[0]; 1687 return state->spu_chnldata_RW[0];
1719 return 0; 1688 return 0;
1720} 1689}
1721 1690DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1722static u64 spufs_event_status_get(void *data) 1691 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1723{
1724 struct spu_context *ctx = data;
1725 u64 ret = 0;
1726
1727 spu_acquire_saved(ctx);
1728 ret = __spufs_event_status_get(data);
1729 spu_release_saved(ctx);
1730 return ret;
1731}
1732DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1733 NULL, "0x%llx\n")
1734 1692
1735static void spufs_srr0_set(void *data, u64 val) 1693static void spufs_srr0_set(void *data, u64 val)
1736{ 1694{
@@ -1741,45 +1699,32 @@ static void spufs_srr0_set(void *data, u64 val)
1741 spu_release_saved(ctx); 1699 spu_release_saved(ctx);
1742} 1700}
1743 1701
1744static u64 spufs_srr0_get(void *data) 1702static u64 spufs_srr0_get(struct spu_context *ctx)
1745{ 1703{
1746 struct spu_context *ctx = data;
1747 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1704 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1748 u64 ret; 1705 return lscsa->srr0.slot[0];
1749 spu_acquire_saved(ctx);
1750 ret = lscsa->srr0.slot[0];
1751 spu_release_saved(ctx);
1752 return ret;
1753} 1706}
1754DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set, 1707DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1755 "0x%llx\n") 1708 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1756 1709
1757static u64 spufs_id_get(void *data) 1710static u64 spufs_id_get(struct spu_context *ctx)
1758{ 1711{
1759 struct spu_context *ctx = data;
1760 u64 num; 1712 u64 num;
1761 1713
1762 spu_acquire(ctx);
1763 if (ctx->state == SPU_STATE_RUNNABLE) 1714 if (ctx->state == SPU_STATE_RUNNABLE)
1764 num = ctx->spu->number; 1715 num = ctx->spu->number;
1765 else 1716 else
1766 num = (unsigned int)-1; 1717 num = (unsigned int)-1;
1767 spu_release(ctx);
1768 1718
1769 return num; 1719 return num;
1770} 1720}
1771DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n") 1721DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
1772 1722 SPU_ATTR_ACQUIRE)
1773static u64 __spufs_object_id_get(void *data)
1774{
1775 struct spu_context *ctx = data;
1776 return ctx->object_id;
1777}
1778 1723
1779static u64 spufs_object_id_get(void *data) 1724static u64 spufs_object_id_get(struct spu_context *ctx)
1780{ 1725{
1781 /* FIXME: Should there really be no locking here? */ 1726 /* FIXME: Should there really be no locking here? */
1782 return __spufs_object_id_get(data); 1727 return ctx->object_id;
1783} 1728}
1784 1729
1785static void spufs_object_id_set(void *data, u64 id) 1730static void spufs_object_id_set(void *data, u64 id)
@@ -1788,27 +1733,15 @@ static void spufs_object_id_set(void *data, u64 id)
1788 ctx->object_id = id; 1733 ctx->object_id = id;
1789} 1734}
1790 1735
1791DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get, 1736DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1792 spufs_object_id_set, "0x%llx\n"); 1737 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
1793 1738
1794static u64 __spufs_lslr_get(void *data) 1739static u64 spufs_lslr_get(struct spu_context *ctx)
1795{ 1740{
1796 struct spu_context *ctx = data;
1797 return ctx->csa.priv2.spu_lslr_RW; 1741 return ctx->csa.priv2.spu_lslr_RW;
1798} 1742}
1799 1743DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
1800static u64 spufs_lslr_get(void *data) 1744 SPU_ATTR_ACQUIRE_SAVED);
1801{
1802 struct spu_context *ctx = data;
1803 u64 ret;
1804
1805 spu_acquire_saved(ctx);
1806 ret = __spufs_lslr_get(data);
1807 spu_release_saved(ctx);
1808
1809 return ret;
1810}
1811DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n")
1812 1745
1813static int spufs_info_open(struct inode *inode, struct file *file) 1746static int spufs_info_open(struct inode *inode, struct file *file)
1814{ 1747{
@@ -2231,25 +2164,25 @@ struct tree_descr spufs_dir_nosched_contents[] = {
2231}; 2164};
2232 2165
2233struct spufs_coredump_reader spufs_coredump_read[] = { 2166struct spufs_coredump_reader spufs_coredump_read[] = {
2234 { "regs", __spufs_regs_read, NULL, 128 * 16 }, 2167 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
2235 { "fpcr", __spufs_fpcr_read, NULL, 16 }, 2168 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
2236 { "lslr", NULL, __spufs_lslr_get, 11 }, 2169 { "lslr", NULL, spufs_lslr_get, 19 },
2237 { "decr", NULL, __spufs_decr_get, 11 }, 2170 { "decr", NULL, spufs_decr_get, 19 },
2238 { "decr_status", NULL, __spufs_decr_status_get, 11 }, 2171 { "decr_status", NULL, spufs_decr_status_get, 19 },
2239 { "mem", __spufs_mem_read, NULL, 256 * 1024, }, 2172 { "mem", __spufs_mem_read, NULL, LS_SIZE, },
2240 { "signal1", __spufs_signal1_read, NULL, 4 }, 2173 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) },
2241 { "signal1_type", NULL, __spufs_signal1_type_get, 2 }, 2174 { "signal1_type", NULL, spufs_signal1_type_get, 19 },
2242 { "signal2", __spufs_signal2_read, NULL, 4 }, 2175 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) },
2243 { "signal2_type", NULL, __spufs_signal2_type_get, 2 }, 2176 { "signal2_type", NULL, spufs_signal2_type_get, 19 },
2244 { "event_mask", NULL, __spufs_event_mask_get, 8 }, 2177 { "event_mask", NULL, spufs_event_mask_get, 19 },
2245 { "event_status", NULL, __spufs_event_status_get, 8 }, 2178 { "event_status", NULL, spufs_event_status_get, 19 },
2246 { "mbox_info", __spufs_mbox_info_read, NULL, 4 }, 2179 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) },
2247 { "ibox_info", __spufs_ibox_info_read, NULL, 4 }, 2180 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) },
2248 { "wbox_info", __spufs_wbox_info_read, NULL, 16 }, 2181 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)},
2249 { "dma_info", __spufs_dma_info_read, NULL, 69 * 8 }, 2182 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)},
2250 { "proxydma_info", __spufs_proxydma_info_read, NULL, 35 * 8 }, 2183 { "proxydma_info", __spufs_proxydma_info_read,
2251 { "object-id", NULL, __spufs_object_id_get, 19 }, 2184 NULL, sizeof(struct spu_proxydma_info)},
2252 { }, 2185 { "object-id", NULL, spufs_object_id_get, 19 },
2186 { "npc", NULL, spufs_npc_get, 19 },
2187 { NULL },
2253}; 2188};
2254int spufs_coredump_num_notes = ARRAY_SIZE(spufs_coredump_read) - 1;
2255
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index b3d0dd118dd..11098747d09 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -43,6 +43,7 @@
43 43
44static struct kmem_cache *spufs_inode_cache; 44static struct kmem_cache *spufs_inode_cache;
45char *isolated_loader; 45char *isolated_loader;
46static int isolated_loader_size;
46 47
47static struct inode * 48static struct inode *
48spufs_alloc_inode(struct super_block *sb) 49spufs_alloc_inode(struct super_block *sb)
@@ -667,7 +668,8 @@ spufs_parse_options(char *options, struct inode *root)
667 668
668static void spufs_exit_isolated_loader(void) 669static void spufs_exit_isolated_loader(void)
669{ 670{
670 kfree(isolated_loader); 671 free_pages((unsigned long) isolated_loader,
672 get_order(isolated_loader_size));
671} 673}
672 674
673static void 675static void
@@ -685,11 +687,12 @@ spufs_init_isolated_loader(void)
685 if (!loader) 687 if (!loader)
686 return; 688 return;
687 689
688 /* kmalloc should align on a 16 byte boundary..* */ 690 /* the loader must be align on a 16 byte boundary */
689 isolated_loader = kmalloc(size, GFP_KERNEL); 691 isolated_loader = (char *)__get_free_pages(GFP_KERNEL, get_order(size));
690 if (!isolated_loader) 692 if (!isolated_loader)
691 return; 693 return;
692 694
695 isolated_loader_size = size;
693 memcpy(isolated_loader, loader, size); 696 memcpy(isolated_loader, loader, size);
694 printk(KERN_INFO "spufs: SPU isolation mode enabled\n"); 697 printk(KERN_INFO "spufs: SPU isolation mode enabled\n");
695} 698}
@@ -787,16 +790,11 @@ static int __init spufs_init(void)
787 ret = register_spu_syscalls(&spufs_calls); 790 ret = register_spu_syscalls(&spufs_calls);
788 if (ret) 791 if (ret)
789 goto out_fs; 792 goto out_fs;
790 ret = register_arch_coredump_calls(&spufs_coredump_calls);
791 if (ret)
792 goto out_syscalls;
793 793
794 spufs_init_isolated_loader(); 794 spufs_init_isolated_loader();
795 795
796 return 0; 796 return 0;
797 797
798out_syscalls:
799 unregister_spu_syscalls(&spufs_calls);
800out_fs: 798out_fs:
801 unregister_filesystem(&spufs_type); 799 unregister_filesystem(&spufs_type);
802out_sched: 800out_sched:
@@ -812,7 +810,6 @@ static void __exit spufs_exit(void)
812{ 810{
813 spu_sched_exit(); 811 spu_sched_exit();
814 spufs_exit_isolated_loader(); 812 spufs_exit_isolated_loader();
815 unregister_arch_coredump_calls(&spufs_coredump_calls);
816 unregister_spu_syscalls(&spufs_calls); 813 unregister_spu_syscalls(&spufs_calls);
817 unregister_filesystem(&spufs_type); 814 unregister_filesystem(&spufs_type);
818 kmem_cache_destroy(spufs_inode_cache); 815 kmem_cache_destroy(spufs_inode_cache);
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 958f10e90fd..1ce5e22ea5f 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -205,7 +205,7 @@ static int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
205 * This means we can only do a very rough approximation of POSIX 205 * This means we can only do a very rough approximation of POSIX
206 * signal semantics. 206 * signal semantics.
207 */ 207 */
208int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret, 208static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
209 unsigned int *npc) 209 unsigned int *npc)
210{ 210{
211 int ret; 211 int ret;
@@ -241,7 +241,7 @@ int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
241 return ret; 241 return ret;
242} 242}
243 243
244int spu_process_callback(struct spu_context *ctx) 244static int spu_process_callback(struct spu_context *ctx)
245{ 245{
246 struct spu_syscall_block s; 246 struct spu_syscall_block s;
247 u32 ls_pointer, npc; 247 u32 ls_pointer, npc;
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 5bebe7fbe05..4d257b3f933 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -230,8 +230,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
230 230
231 if (ctx->flags & SPU_CREATE_NOSCHED) 231 if (ctx->flags & SPU_CREATE_NOSCHED)
232 atomic_inc(&cbe_spu_info[spu->node].reserved_spus); 232 atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
233 if (!list_empty(&ctx->aff_list))
234 atomic_inc(&ctx->gang->aff_sched_count);
235 233
236 ctx->stats.slb_flt_base = spu->stats.slb_flt; 234 ctx->stats.slb_flt_base = spu->stats.slb_flt;
237 ctx->stats.class2_intr_base = spu->stats.class2_intr; 235 ctx->stats.class2_intr_base = spu->stats.class2_intr;
@@ -392,7 +390,6 @@ static int has_affinity(struct spu_context *ctx)
392 if (list_empty(&ctx->aff_list)) 390 if (list_empty(&ctx->aff_list))
393 return 0; 391 return 0;
394 392
395 mutex_lock(&gang->aff_mutex);
396 if (!gang->aff_ref_spu) { 393 if (!gang->aff_ref_spu) {
397 if (!(gang->aff_flags & AFF_MERGED)) 394 if (!(gang->aff_flags & AFF_MERGED))
398 aff_merge_remaining_ctxs(gang); 395 aff_merge_remaining_ctxs(gang);
@@ -400,7 +397,6 @@ static int has_affinity(struct spu_context *ctx)
400 aff_set_offsets(gang); 397 aff_set_offsets(gang);
401 aff_set_ref_point_location(gang); 398 aff_set_ref_point_location(gang);
402 } 399 }
403 mutex_unlock(&gang->aff_mutex);
404 400
405 return gang->aff_ref_spu != NULL; 401 return gang->aff_ref_spu != NULL;
406} 402}
@@ -418,9 +414,16 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
418 414
419 if (spu->ctx->flags & SPU_CREATE_NOSCHED) 415 if (spu->ctx->flags & SPU_CREATE_NOSCHED)
420 atomic_dec(&cbe_spu_info[spu->node].reserved_spus); 416 atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
421 if (!list_empty(&ctx->aff_list)) 417
422 if (atomic_dec_and_test(&ctx->gang->aff_sched_count)) 418 if (ctx->gang){
423 ctx->gang->aff_ref_spu = NULL; 419 mutex_lock(&ctx->gang->aff_mutex);
420 if (has_affinity(ctx)) {
421 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
422 ctx->gang->aff_ref_spu = NULL;
423 }
424 mutex_unlock(&ctx->gang->aff_mutex);
425 }
426
424 spu_switch_notify(spu, NULL); 427 spu_switch_notify(spu, NULL);
425 spu_unmap_mappings(ctx); 428 spu_unmap_mappings(ctx);
426 spu_save(&ctx->csa, spu); 429 spu_save(&ctx->csa, spu);
@@ -511,20 +514,32 @@ static void spu_prio_wait(struct spu_context *ctx)
511 514
512static struct spu *spu_get_idle(struct spu_context *ctx) 515static struct spu *spu_get_idle(struct spu_context *ctx)
513{ 516{
514 struct spu *spu; 517 struct spu *spu, *aff_ref_spu;
515 int node, n; 518 int node, n;
516 519
517 if (has_affinity(ctx)) { 520 if (ctx->gang) {
518 node = ctx->gang->aff_ref_spu->node; 521 mutex_lock(&ctx->gang->aff_mutex);
522 if (has_affinity(ctx)) {
523 aff_ref_spu = ctx->gang->aff_ref_spu;
524 atomic_inc(&ctx->gang->aff_sched_count);
525 mutex_unlock(&ctx->gang->aff_mutex);
526 node = aff_ref_spu->node;
519 527
520 mutex_lock(&cbe_spu_info[node].list_mutex); 528 mutex_lock(&cbe_spu_info[node].list_mutex);
521 spu = ctx_location(ctx->gang->aff_ref_spu, ctx->aff_offset, node); 529 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
522 if (spu && spu->alloc_state == SPU_FREE) 530 if (spu && spu->alloc_state == SPU_FREE)
523 goto found; 531 goto found;
524 mutex_unlock(&cbe_spu_info[node].list_mutex); 532 mutex_unlock(&cbe_spu_info[node].list_mutex);
525 return NULL;
526 }
527 533
534 mutex_lock(&ctx->gang->aff_mutex);
535 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
536 ctx->gang->aff_ref_spu = NULL;
537 mutex_unlock(&ctx->gang->aff_mutex);
538
539 return NULL;
540 }
541 mutex_unlock(&ctx->gang->aff_mutex);
542 }
528 node = cpu_to_node(raw_smp_processor_id()); 543 node = cpu_to_node(raw_smp_processor_id());
529 for (n = 0; n < MAX_NUMNODES; n++, node++) { 544 for (n = 0; n < MAX_NUMNODES; n++, node++) {
530 node = (node < MAX_NUMNODES) ? node : 0; 545 node = (node < MAX_NUMNODES) ? node : 0;
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 2bfdeb8ea8b..ca47b991bda 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -200,9 +200,14 @@ extern struct tree_descr spufs_dir_contents[];
200extern struct tree_descr spufs_dir_nosched_contents[]; 200extern struct tree_descr spufs_dir_nosched_contents[];
201 201
202/* system call implementation */ 202/* system call implementation */
203extern struct spufs_calls spufs_calls;
203long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status); 204long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status);
204long spufs_create(struct nameidata *nd, unsigned int flags, 205long spufs_create(struct nameidata *nd, unsigned int flags,
205 mode_t mode, struct file *filp); 206 mode_t mode, struct file *filp);
207/* ELF coredump callbacks for writing SPU ELF notes */
208extern int spufs_coredump_extra_notes_size(void);
209extern int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset);
210
206extern const struct file_operations spufs_context_fops; 211extern const struct file_operations spufs_context_fops;
207 212
208/* gang management */ 213/* gang management */
@@ -295,7 +300,7 @@ struct spufs_coredump_reader {
295 char *name; 300 char *name;
296 ssize_t (*read)(struct spu_context *ctx, 301 ssize_t (*read)(struct spu_context *ctx,
297 char __user *buffer, size_t size, loff_t *pos); 302 char __user *buffer, size_t size, loff_t *pos);
298 u64 (*get)(void *data); 303 u64 (*get)(struct spu_context *ctx);
299 size_t size; 304 size_t size;
300}; 305};
301extern struct spufs_coredump_reader spufs_coredump_read[]; 306extern struct spufs_coredump_reader spufs_coredump_read[];
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 27ffdae98e5..3d64c81cc6e 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -699,7 +699,7 @@ static inline void get_kernel_slb(u64 ea, u64 slb[2])
699 llp = mmu_psize_defs[mmu_linear_psize].sllp; 699 llp = mmu_psize_defs[mmu_linear_psize].sllp;
700 else 700 else
701 llp = mmu_psize_defs[mmu_virtual_psize].sllp; 701 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
702 slb[0] = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | 702 slb[0] = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
703 SLB_VSID_KERNEL | llp; 703 SLB_VSID_KERNEL | llp;
704 slb[1] = (ea & ESID_MASK) | SLB_ESID_V; 704 slb[1] = (ea & ESID_MASK) | SLB_ESID_V;
705} 705}
@@ -1559,15 +1559,15 @@ static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
1559 * "wrapped" flag is set, OR in a '1' to 1559 * "wrapped" flag is set, OR in a '1' to
1560 * CSA.SPU_Event_Status[Tm]. 1560 * CSA.SPU_Event_Status[Tm].
1561 */ 1561 */
1562 if (csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED) { 1562 if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED))
1563 csa->spu_chnldata_RW[0] |= 0x20; 1563 return;
1564 } 1564
1565 if ((csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED) && 1565 if ((csa->spu_chnlcnt_RW[0] == 0) &&
1566 (csa->spu_chnlcnt_RW[0] == 0 && 1566 (csa->spu_chnldata_RW[1] & 0x20) &&
1567 ((csa->spu_chnldata_RW[2] & 0x20) == 0x0) && 1567 !(csa->spu_chnldata_RW[0] & 0x20))
1568 ((csa->spu_chnldata_RW[0] & 0x20) != 0x1))) {
1569 csa->spu_chnlcnt_RW[0] = 1; 1568 csa->spu_chnlcnt_RW[0] = 1;
1570 } 1569
1570 csa->spu_chnldata_RW[0] |= 0x20;
1571} 1571}
1572 1572
1573static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu) 1573static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
@@ -2146,19 +2146,6 @@ int spu_restore(struct spu_state *new, struct spu *spu)
2146} 2146}
2147EXPORT_SYMBOL_GPL(spu_restore); 2147EXPORT_SYMBOL_GPL(spu_restore);
2148 2148
2149/**
2150 * spu_harvest - SPU harvest (reset) operation
2151 * @spu: pointer to SPU iomem structure.
2152 *
2153 * Perform SPU harvest (reset) operation.
2154 */
2155void spu_harvest(struct spu *spu)
2156{
2157 acquire_spu_lock(spu);
2158 harvest(NULL, spu);
2159 release_spu_lock(spu);
2160}
2161
2162static void init_prob(struct spu_state *csa) 2149static void init_prob(struct spu_state *csa)
2163{ 2150{
2164 csa->spu_chnlcnt_RW[9] = 1; 2151 csa->spu_chnlcnt_RW[9] = 1;
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index 43f0fb88abb..2c34f717019 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -58,26 +58,8 @@ out:
58 return ret; 58 return ret;
59} 59}
60 60
61#ifndef MODULE 61static long do_spu_create(const char __user *pathname, unsigned int flags,
62asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus) 62 mode_t mode, struct file *neighbor)
63{
64 int fput_needed;
65 struct file *filp;
66 long ret;
67
68 ret = -EBADF;
69 filp = fget_light(fd, &fput_needed);
70 if (filp) {
71 ret = do_spu_run(filp, unpc, ustatus);
72 fput_light(filp, fput_needed);
73 }
74
75 return ret;
76}
77#endif
78
79asmlinkage long do_spu_create(const char __user *pathname, unsigned int flags,
80 mode_t mode, struct file *neighbor)
81{ 63{
82 char *tmp; 64 char *tmp;
83 int ret; 65 int ret;
@@ -99,32 +81,10 @@ asmlinkage long do_spu_create(const char __user *pathname, unsigned int flags,
99 return ret; 81 return ret;
100} 82}
101 83
102#ifndef MODULE
103asmlinkage long sys_spu_create(const char __user *pathname, unsigned int flags,
104 mode_t mode, int neighbor_fd)
105{
106 int fput_needed;
107 struct file *neighbor;
108 long ret;
109
110 if (flags & SPU_CREATE_AFFINITY_SPU) {
111 ret = -EBADF;
112 neighbor = fget_light(neighbor_fd, &fput_needed);
113 if (neighbor) {
114 ret = do_spu_create(pathname, flags, mode, neighbor);
115 fput_light(neighbor, fput_needed);
116 }
117 }
118 else {
119 ret = do_spu_create(pathname, flags, mode, NULL);
120 }
121
122 return ret;
123}
124#endif
125
126struct spufs_calls spufs_calls = { 84struct spufs_calls spufs_calls = {
127 .create_thread = do_spu_create, 85 .create_thread = do_spu_create,
128 .spu_run = do_spu_run, 86 .spu_run = do_spu_run,
87 .coredump_extra_notes_size = spufs_coredump_extra_notes_size,
88 .coredump_extra_notes_write = spufs_coredump_extra_notes_write,
129 .owner = THIS_MODULE, 89 .owner = THIS_MODULE,
130}; 90};