aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/cell')
-rw-r--r--arch/powerpc/platforms/cell/Kconfig14
-rw-r--r--arch/powerpc/platforms/cell/Makefile7
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.c248
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c71
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.h203
-rw-r--r--arch/powerpc/platforms/cell/cbe_thermal.c226
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c16
-rw-r--r--arch/powerpc/platforms/cell/interrupt.h2
-rw-r--r--arch/powerpc/platforms/cell/io-workarounds.c346
-rw-r--r--arch/powerpc/platforms/cell/iommu.c1049
-rw-r--r--arch/powerpc/platforms/cell/iommu.h65
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c101
-rw-r--r--arch/powerpc/platforms/cell/pmu.c429
-rw-r--r--arch/powerpc/platforms/cell/setup.c78
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c415
-rw-r--r--arch/powerpc/platforms/cell/spu_coredump.c81
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.c428
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.h26
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/spufs/backing_ops.c31
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c27
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c238
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c536
-rw-r--r--arch/powerpc/platforms/cell/spufs/hw_ops.c51
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c58
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c149
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h33
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c63
28 files changed, 3916 insertions, 1077 deletions
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 3e430b489bb7..06a85b704331 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -20,4 +20,18 @@ config CBE_RAS
20 bool "RAS features for bare metal Cell BE" 20 bool "RAS features for bare metal Cell BE"
21 default y 21 default y
22 22
23config CBE_THERM
24 tristate "CBE thermal support"
25 default m
26 depends on CBE_RAS
27
28config CBE_CPUFREQ
29 tristate "CBE frequency scaling"
30 depends on CBE_RAS && CPU_FREQ
31 default m
32 help
33 This adds the cpufreq driver for Cell BE processors.
34 For details, take a look at <file:Documentation/cpu-freq/>.
35 If you don't have such processor, say N
36
23endmenu 37endmenu
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index c89cdd67383b..f90e8337796c 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -1,7 +1,11 @@
1obj-$(CONFIG_PPC_CELL_NATIVE) += interrupt.o iommu.o setup.o \ 1obj-$(CONFIG_PPC_CELL_NATIVE) += interrupt.o iommu.o setup.o \
2 cbe_regs.o spider-pic.o pervasive.o 2 cbe_regs.o spider-pic.o \
3 pervasive.o pmu.o io-workarounds.o
3obj-$(CONFIG_CBE_RAS) += ras.o 4obj-$(CONFIG_CBE_RAS) += ras.o
4 5
6obj-$(CONFIG_CBE_THERM) += cbe_thermal.o
7obj-$(CONFIG_CBE_CPUFREQ) += cbe_cpufreq.o
8
5ifeq ($(CONFIG_SMP),y) 9ifeq ($(CONFIG_SMP),y)
6obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o 10obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o
7endif 11endif
@@ -11,5 +15,6 @@ spufs-modular-$(CONFIG_SPU_FS) += spu_syscalls.o
11spu-priv1-$(CONFIG_PPC_CELL_NATIVE) += spu_priv1_mmio.o 15spu-priv1-$(CONFIG_PPC_CELL_NATIVE) += spu_priv1_mmio.o
12 16
13obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \ 17obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
18 spu_coredump.o \
14 $(spufs-modular-m) \ 19 $(spufs-modular-m) \
15 $(spu-priv1-y) spufs/ 20 $(spu-priv1-y) spufs/
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
new file mode 100644
index 000000000000..a3850fd1e94c
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -0,0 +1,248 @@
1/*
2 * cpufreq driver for the cell processor
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Christian Krafft <krafft@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/cpufreq.h>
24#include <linux/timer.h>
25
26#include <asm/hw_irq.h>
27#include <asm/io.h>
28#include <asm/processor.h>
29#include <asm/prom.h>
30#include <asm/time.h>
31
32#include "cbe_regs.h"
33
34static DEFINE_MUTEX(cbe_switch_mutex);
35
36
37/* the CBE supports an 8 step frequency scaling */
38static struct cpufreq_frequency_table cbe_freqs[] = {
39 {1, 0},
40 {2, 0},
41 {3, 0},
42 {4, 0},
43 {5, 0},
44 {6, 0},
45 {8, 0},
46 {10, 0},
47 {0, CPUFREQ_TABLE_END},
48};
49
50/* to write to MIC register */
51static u64 MIC_Slow_Fast_Timer_table[] = {
52 [0 ... 7] = 0x007fc00000000000ull,
53};
54
55/* more values for the MIC */
56static u64 MIC_Slow_Next_Timer_table[] = {
57 0x0000240000000000ull,
58 0x0000268000000000ull,
59 0x000029C000000000ull,
60 0x00002D0000000000ull,
61 0x0000300000000000ull,
62 0x0000334000000000ull,
63 0x000039C000000000ull,
64 0x00003FC000000000ull,
65};
66
67/*
68 * hardware specific functions
69 */
70
71static int get_pmode(int cpu)
72{
73 int ret;
74 struct cbe_pmd_regs __iomem *pmd_regs;
75
76 pmd_regs = cbe_get_cpu_pmd_regs(cpu);
77 ret = in_be64(&pmd_regs->pmsr) & 0x07;
78
79 return ret;
80}
81
82static int set_pmode(int cpu, unsigned int pmode)
83{
84 struct cbe_pmd_regs __iomem *pmd_regs;
85 struct cbe_mic_tm_regs __iomem *mic_tm_regs;
86 u64 flags;
87 u64 value;
88
89 local_irq_save(flags);
90
91 mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
92 pmd_regs = cbe_get_cpu_pmd_regs(cpu);
93
94 pr_debug("pm register is mapped at %p\n", &pmd_regs->pmcr);
95 pr_debug("mic register is mapped at %p\n", &mic_tm_regs->slow_fast_timer_0);
96
97 out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
98 out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
99
100 out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
101 out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
102
103 value = in_be64(&pmd_regs->pmcr);
104 /* set bits to zero */
105 value &= 0xFFFFFFFFFFFFFFF8ull;
106 /* set bits to next pmode */
107 value |= pmode;
108
109 out_be64(&pmd_regs->pmcr, value);
110
111 /* wait until new pmode appears in status register */
112 value = in_be64(&pmd_regs->pmsr) & 0x07;
113 while(value != pmode) {
114 cpu_relax();
115 value = in_be64(&pmd_regs->pmsr) & 0x07;
116 }
117
118 local_irq_restore(flags);
119
120 return 0;
121}
122
123/*
124 * cpufreq functions
125 */
126
127static int cbe_cpufreq_cpu_init (struct cpufreq_policy *policy)
128{
129 u32 *max_freq;
130 int i, cur_pmode;
131 struct device_node *cpu;
132
133 cpu = of_get_cpu_node(policy->cpu, NULL);
134
135 if(!cpu)
136 return -ENODEV;
137
138 pr_debug("init cpufreq on CPU %d\n", policy->cpu);
139
140 max_freq = (u32*) get_property(cpu, "clock-frequency", NULL);
141
142 if(!max_freq)
143 return -EINVAL;
144
145 // we need the freq in kHz
146 *max_freq /= 1000;
147
148 pr_debug("max clock-frequency is at %u kHz\n", *max_freq);
149 pr_debug("initializing frequency table\n");
150
151 // initialize frequency table
152 for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
153 cbe_freqs[i].frequency = *max_freq / cbe_freqs[i].index;
154 pr_debug("%d: %d\n", i, cbe_freqs[i].frequency);
155 }
156
157 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
158 /* if DEBUG is enabled set_pmode() measures the correct latency of a transition */
159 policy->cpuinfo.transition_latency = 25000;
160
161 cur_pmode = get_pmode(policy->cpu);
162 pr_debug("current pmode is at %d\n",cur_pmode);
163
164 policy->cur = cbe_freqs[cur_pmode].frequency;
165
166#ifdef CONFIG_SMP
167 policy->cpus = cpu_sibling_map[policy->cpu];
168#endif
169
170 cpufreq_frequency_table_get_attr (cbe_freqs, policy->cpu);
171
172 /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max are set correctly */
173 return cpufreq_frequency_table_cpuinfo (policy, cbe_freqs);
174}
175
176static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
177{
178 cpufreq_frequency_table_put_attr(policy->cpu);
179 return 0;
180}
181
182static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
183{
184 return cpufreq_frequency_table_verify(policy, cbe_freqs);
185}
186
187
188static int cbe_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq,
189 unsigned int relation)
190{
191 int rc;
192 struct cpufreq_freqs freqs;
193 int cbe_pmode_new;
194
195 cpufreq_frequency_table_target(policy,
196 cbe_freqs,
197 target_freq,
198 relation,
199 &cbe_pmode_new);
200
201 freqs.old = policy->cur;
202 freqs.new = cbe_freqs[cbe_pmode_new].frequency;
203 freqs.cpu = policy->cpu;
204
205 mutex_lock (&cbe_switch_mutex);
206 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
207
208 pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
209 policy->cpu,
210 cbe_freqs[cbe_pmode_new].frequency,
211 cbe_freqs[cbe_pmode_new].index);
212
213 rc = set_pmode(policy->cpu, cbe_pmode_new);
214 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
215 mutex_unlock(&cbe_switch_mutex);
216
217 return rc;
218}
219
220static struct cpufreq_driver cbe_cpufreq_driver = {
221 .verify = cbe_cpufreq_verify,
222 .target = cbe_cpufreq_target,
223 .init = cbe_cpufreq_cpu_init,
224 .exit = cbe_cpufreq_cpu_exit,
225 .name = "cbe-cpufreq",
226 .owner = THIS_MODULE,
227 .flags = CPUFREQ_CONST_LOOPS,
228};
229
230/*
231 * module init and destoy
232 */
233
234static int __init cbe_cpufreq_init(void)
235{
236 return cpufreq_register_driver(&cbe_cpufreq_driver);
237}
238
239static void __exit cbe_cpufreq_exit(void)
240{
241 cpufreq_unregister_driver(&cbe_cpufreq_driver);
242}
243
244module_init(cbe_cpufreq_init);
245module_exit(cbe_cpufreq_exit);
246
247MODULE_LICENSE("GPL");
248MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index 2f194ba29899..9a0ee62691d5 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/percpu.h> 9#include <linux/percpu.h>
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/module.h>
11 12
12#include <asm/io.h> 13#include <asm/io.h>
13#include <asm/pgtable.h> 14#include <asm/pgtable.h>
@@ -16,8 +17,6 @@
16 17
17#include "cbe_regs.h" 18#include "cbe_regs.h"
18 19
19#define MAX_CBE 2
20
21/* 20/*
22 * Current implementation uses "cpu" nodes. We build our own mapping 21 * Current implementation uses "cpu" nodes. We build our own mapping
23 * array of cpu numbers to cpu nodes locally for now to allow interrupt 22 * array of cpu numbers to cpu nodes locally for now to allow interrupt
@@ -30,6 +29,8 @@ static struct cbe_regs_map
30 struct device_node *cpu_node; 29 struct device_node *cpu_node;
31 struct cbe_pmd_regs __iomem *pmd_regs; 30 struct cbe_pmd_regs __iomem *pmd_regs;
32 struct cbe_iic_regs __iomem *iic_regs; 31 struct cbe_iic_regs __iomem *iic_regs;
32 struct cbe_mic_tm_regs __iomem *mic_tm_regs;
33 struct cbe_pmd_shadow_regs pmd_shadow_regs;
33} cbe_regs_maps[MAX_CBE]; 34} cbe_regs_maps[MAX_CBE];
34static int cbe_regs_map_count; 35static int cbe_regs_map_count;
35 36
@@ -42,6 +43,19 @@ static struct cbe_thread_map
42static struct cbe_regs_map *cbe_find_map(struct device_node *np) 43static struct cbe_regs_map *cbe_find_map(struct device_node *np)
43{ 44{
44 int i; 45 int i;
46 struct device_node *tmp_np;
47
48 if (strcasecmp(np->type, "spe") == 0) {
49 if (np->data == NULL) {
50 /* walk up path until cpu node was found */
51 tmp_np = np->parent;
52 while (tmp_np != NULL && strcasecmp(tmp_np->type, "cpu") != 0)
53 tmp_np = tmp_np->parent;
54
55 np->data = cbe_find_map(tmp_np);
56 }
57 return np->data;
58 }
45 59
46 for (i = 0; i < cbe_regs_map_count; i++) 60 for (i = 0; i < cbe_regs_map_count; i++)
47 if (cbe_regs_maps[i].cpu_node == np) 61 if (cbe_regs_maps[i].cpu_node == np)
@@ -56,6 +70,7 @@ struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
56 return NULL; 70 return NULL;
57 return map->pmd_regs; 71 return map->pmd_regs;
58} 72}
73EXPORT_SYMBOL_GPL(cbe_get_pmd_regs);
59 74
60struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu) 75struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
61{ 76{
@@ -64,7 +79,23 @@ struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
64 return NULL; 79 return NULL;
65 return map->pmd_regs; 80 return map->pmd_regs;
66} 81}
82EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs);
67 83
84struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np)
85{
86 struct cbe_regs_map *map = cbe_find_map(np);
87 if (map == NULL)
88 return NULL;
89 return &map->pmd_shadow_regs;
90}
91
92struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu)
93{
94 struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
95 if (map == NULL)
96 return NULL;
97 return &map->pmd_shadow_regs;
98}
68 99
69struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np) 100struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
70{ 101{
@@ -73,6 +104,7 @@ struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
73 return NULL; 104 return NULL;
74 return map->iic_regs; 105 return map->iic_regs;
75} 106}
107
76struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu) 108struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
77{ 109{
78 struct cbe_regs_map *map = cbe_thread_map[cpu].regs; 110 struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
@@ -81,6 +113,36 @@ struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
81 return map->iic_regs; 113 return map->iic_regs;
82} 114}
83 115
116struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np)
117{
118 struct cbe_regs_map *map = cbe_find_map(np);
119 if (map == NULL)
120 return NULL;
121 return map->mic_tm_regs;
122}
123
124struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu)
125{
126 struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
127 if (map == NULL)
128 return NULL;
129 return map->mic_tm_regs;
130}
131EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs);
132
133/* FIXME
134 * This is little more than a stub at the moment. It should be
135 * fleshed out so that it works for both SMT and non-SMT, no
136 * matter if the passed cpu is odd or even.
137 * For SMT enabled, returns 0 for even-numbered cpu; otherwise 1.
138 * For SMT disabled, returns 0 for all cpus.
139 */
140u32 cbe_get_hw_thread_id(int cpu)
141{
142 return (cpu & 1);
143}
144EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id);
145
84void __init cbe_regs_init(void) 146void __init cbe_regs_init(void)
85{ 147{
86 int i; 148 int i;
@@ -119,6 +181,11 @@ void __init cbe_regs_init(void)
119 prop = get_property(cpu, "iic", NULL); 181 prop = get_property(cpu, "iic", NULL);
120 if (prop != NULL) 182 if (prop != NULL)
121 map->iic_regs = ioremap(prop->address, prop->len); 183 map->iic_regs = ioremap(prop->address, prop->len);
184
185 prop = (struct address_prop *)get_property(cpu, "mic-tm",
186 NULL);
187 if (prop != NULL)
188 map->mic_tm_regs = ioremap(prop->address, prop->len);
122 } 189 }
123} 190}
124 191
diff --git a/arch/powerpc/platforms/cell/cbe_regs.h b/arch/powerpc/platforms/cell/cbe_regs.h
index e76e4a6af5bc..440a7ecc66ea 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.h
+++ b/arch/powerpc/platforms/cell/cbe_regs.h
@@ -4,12 +4,19 @@
4 * This file is intended to hold the various register definitions for CBE 4 * This file is intended to hold the various register definitions for CBE
5 * on-chip system devices (memory controller, IO controller, etc...) 5 * on-chip system devices (memory controller, IO controller, etc...)
6 * 6 *
7 * (C) Copyright IBM Corporation 2001,2006
8 *
9 * Authors: Maximino Aguilar (maguilar@us.ibm.com)
10 * David J. Erb (djerb@us.ibm.com)
11 *
7 * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. 12 * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
8 */ 13 */
9 14
10#ifndef CBE_REGS_H 15#ifndef CBE_REGS_H
11#define CBE_REGS_H 16#define CBE_REGS_H
12 17
18#include <asm/cell-pmu.h>
19
13/* 20/*
14 * 21 *
15 * Some HID register definitions 22 * Some HID register definitions
@@ -22,6 +29,7 @@
22#define HID0_CBE_THERM_INT_EN 0x0000000400000000ul 29#define HID0_CBE_THERM_INT_EN 0x0000000400000000ul
23#define HID0_CBE_SYSERR_INT_EN 0x0000000200000000ul 30#define HID0_CBE_SYSERR_INT_EN 0x0000000200000000ul
24 31
32#define MAX_CBE 2
25 33
26/* 34/*
27 * 35 *
@@ -29,51 +37,124 @@
29 * 37 *
30 */ 38 */
31 39
40union spe_reg {
41 u64 val;
42 u8 spe[8];
43};
44
45union ppe_spe_reg {
46 u64 val;
47 struct {
48 u32 ppe;
49 u32 spe;
50 };
51};
52
53
32struct cbe_pmd_regs { 54struct cbe_pmd_regs {
33 u8 pad_0x0000_0x0800[0x0800 - 0x0000]; /* 0x0000 */ 55 /* Debug Bus Control */
56 u64 pad_0x0000; /* 0x0000 */
57
58 u64 group_control; /* 0x0008 */
59
60 u8 pad_0x0010_0x00a8 [0x00a8 - 0x0010]; /* 0x0010 */
61
62 u64 debug_bus_control; /* 0x00a8 */
63
64 u8 pad_0x00b0_0x0100 [0x0100 - 0x00b0]; /* 0x00b0 */
65
66 u64 trace_aux_data; /* 0x0100 */
67 u64 trace_buffer_0_63; /* 0x0108 */
68 u64 trace_buffer_64_127; /* 0x0110 */
69 u64 trace_address; /* 0x0118 */
70 u64 ext_tr_timer; /* 0x0120 */
71
72 u8 pad_0x0128_0x0400 [0x0400 - 0x0128]; /* 0x0128 */
73
74 /* Performance Monitor */
75 u64 pm_status; /* 0x0400 */
76 u64 pm_control; /* 0x0408 */
77 u64 pm_interval; /* 0x0410 */
78 u64 pm_ctr[4]; /* 0x0418 */
79 u64 pm_start_stop; /* 0x0438 */
80 u64 pm07_control[8]; /* 0x0440 */
81
82 u8 pad_0x0480_0x0800 [0x0800 - 0x0480]; /* 0x0480 */
34 83
35 /* Thermal Sensor Registers */ 84 /* Thermal Sensor Registers */
36 u64 ts_ctsr1; /* 0x0800 */ 85 union spe_reg ts_ctsr1; /* 0x0800 */
37 u64 ts_ctsr2; /* 0x0808 */ 86 u64 ts_ctsr2; /* 0x0808 */
38 u64 ts_mtsr1; /* 0x0810 */ 87 union spe_reg ts_mtsr1; /* 0x0810 */
39 u64 ts_mtsr2; /* 0x0818 */ 88 u64 ts_mtsr2; /* 0x0818 */
40 u64 ts_itr1; /* 0x0820 */ 89 union spe_reg ts_itr1; /* 0x0820 */
41 u64 ts_itr2; /* 0x0828 */ 90 u64 ts_itr2; /* 0x0828 */
42 u64 ts_gitr; /* 0x0830 */ 91 u64 ts_gitr; /* 0x0830 */
43 u64 ts_isr; /* 0x0838 */ 92 u64 ts_isr; /* 0x0838 */
44 u64 ts_imr; /* 0x0840 */ 93 u64 ts_imr; /* 0x0840 */
45 u64 tm_cr1; /* 0x0848 */ 94 union spe_reg tm_cr1; /* 0x0848 */
46 u64 tm_cr2; /* 0x0850 */ 95 u64 tm_cr2; /* 0x0850 */
47 u64 tm_simr; /* 0x0858 */ 96 u64 tm_simr; /* 0x0858 */
48 u64 tm_tpr; /* 0x0860 */ 97 union ppe_spe_reg tm_tpr; /* 0x0860 */
49 u64 tm_str1; /* 0x0868 */ 98 union spe_reg tm_str1; /* 0x0868 */
50 u64 tm_str2; /* 0x0870 */ 99 u64 tm_str2; /* 0x0870 */
51 u64 tm_tsr; /* 0x0878 */ 100 union ppe_spe_reg tm_tsr; /* 0x0878 */
52 101
53 /* Power Management */ 102 /* Power Management */
54 u64 pm_control; /* 0x0880 */ 103 u64 pmcr; /* 0x0880 */
55#define CBE_PMD_PAUSE_ZERO_CONTROL 0x10000 104#define CBE_PMD_PAUSE_ZERO_CONTROL 0x10000
56 u64 pm_status; /* 0x0888 */ 105 u64 pmsr; /* 0x0888 */
57 106
58 /* Time Base Register */ 107 /* Time Base Register */
59 u64 tbr; /* 0x0890 */ 108 u64 tbr; /* 0x0890 */
60 109
61 u8 pad_0x0898_0x0c00 [0x0c00 - 0x0898]; /* 0x0898 */ 110 u8 pad_0x0898_0x0c00 [0x0c00 - 0x0898]; /* 0x0898 */
62 111
63 /* Fault Isolation Registers */ 112 /* Fault Isolation Registers */
64 u64 checkstop_fir; /* 0x0c00 */ 113 u64 checkstop_fir; /* 0x0c00 */
65 u64 recoverable_fir; 114 u64 recoverable_fir; /* 0x0c08 */
66 u64 spec_att_mchk_fir; 115 u64 spec_att_mchk_fir; /* 0x0c10 */
67 u64 fir_mode_reg; 116 u64 fir_mode_reg; /* 0x0c18 */
68 u64 fir_enable_mask; 117 u64 fir_enable_mask; /* 0x0c20 */
69 118
70 u8 pad_0x0c28_0x1000 [0x1000 - 0x0c28]; /* 0x0c28 */ 119 u8 pad_0x0c28_0x1000 [0x1000 - 0x0c28]; /* 0x0c28 */
71}; 120};
72 121
73extern struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np); 122extern struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np);
74extern struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu); 123extern struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu);
75 124
76/* 125/*
126 * PMU shadow registers
127 *
128 * Many of the registers in the performance monitoring unit are write-only,
129 * so we need to save a copy of what we write to those registers.
130 *
131 * The actual data counters are read/write. However, writing to the counters
132 * only takes effect if the PMU is enabled. Otherwise the value is stored in
133 * a hardware latch until the next time the PMU is enabled. So we save a copy
134 * of the counter values if we need to read them back while the PMU is
135 * disabled. The counter_value_in_latch field is a bitmap indicating which
136 * counters currently have a value waiting to be written.
137 */
138
139struct cbe_pmd_shadow_regs {
140 u32 group_control;
141 u32 debug_bus_control;
142 u32 trace_address;
143 u32 ext_tr_timer;
144 u32 pm_status;
145 u32 pm_control;
146 u32 pm_interval;
147 u32 pm_start_stop;
148 u32 pm07_control[NR_CTRS];
149
150 u32 pm_ctr[NR_PHYS_CTRS];
151 u32 counter_value_in_latch;
152};
153
154extern struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np);
155extern struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu);
156
157/*
77 * 158 *
78 * IIC unit register definitions 159 * IIC unit register definitions
79 * 160 *
@@ -102,18 +183,28 @@ struct cbe_iic_regs {
102 183
103 /* IIC interrupt registers */ 184 /* IIC interrupt registers */
104 struct cbe_iic_thread_regs thread[2]; /* 0x0400 */ 185 struct cbe_iic_thread_regs thread[2]; /* 0x0400 */
105 u64 iic_ir; /* 0x0440 */ 186
106 u64 iic_is; /* 0x0448 */ 187 u64 iic_ir; /* 0x0440 */
188#define CBE_IIC_IR_PRIO(x) (((x) & 0xf) << 12)
189#define CBE_IIC_IR_DEST_NODE(x) (((x) & 0xf) << 4)
190#define CBE_IIC_IR_DEST_UNIT(x) ((x) & 0xf)
191#define CBE_IIC_IR_IOC_0 0x0
192#define CBE_IIC_IR_IOC_1S 0xb
193#define CBE_IIC_IR_PT_0 0xe
194#define CBE_IIC_IR_PT_1 0xf
195
196 u64 iic_is; /* 0x0448 */
197#define CBE_IIC_IS_PMI 0x2
107 198
108 u8 pad_0x0450_0x0500[0x0500 - 0x0450]; /* 0x0450 */ 199 u8 pad_0x0450_0x0500[0x0500 - 0x0450]; /* 0x0450 */
109 200
110 /* IOC FIR */ 201 /* IOC FIR */
111 u64 ioc_fir_reset; /* 0x0500 */ 202 u64 ioc_fir_reset; /* 0x0500 */
112 u64 ioc_fir_set; 203 u64 ioc_fir_set; /* 0x0508 */
113 u64 ioc_checkstop_enable; 204 u64 ioc_checkstop_enable; /* 0x0510 */
114 u64 ioc_fir_error_mask; 205 u64 ioc_fir_error_mask; /* 0x0518 */
115 u64 ioc_syserr_enable; 206 u64 ioc_syserr_enable; /* 0x0520 */
116 u64 ioc_fir; 207 u64 ioc_fir; /* 0x0528 */
117 208
118 u8 pad_0x0530_0x1000[0x1000 - 0x0530]; /* 0x0530 */ 209 u8 pad_0x0530_0x1000[0x1000 - 0x0530]; /* 0x0530 */
119}; 210};
@@ -122,6 +213,48 @@ extern struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np);
122extern struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu); 213extern struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu);
123 214
124 215
216struct cbe_mic_tm_regs {
217 u8 pad_0x0000_0x0040[0x0040 - 0x0000]; /* 0x0000 */
218
219 u64 mic_ctl_cnfg2; /* 0x0040 */
220#define CBE_MIC_ENABLE_AUX_TRC 0x8000000000000000LL
221#define CBE_MIC_DISABLE_PWR_SAV_2 0x0200000000000000LL
222#define CBE_MIC_DISABLE_AUX_TRC_WRAP 0x0100000000000000LL
223#define CBE_MIC_ENABLE_AUX_TRC_INT 0x0080000000000000LL
224
225 u64 pad_0x0048; /* 0x0048 */
226
227 u64 mic_aux_trc_base; /* 0x0050 */
228 u64 mic_aux_trc_max_addr; /* 0x0058 */
229 u64 mic_aux_trc_cur_addr; /* 0x0060 */
230 u64 mic_aux_trc_grf_addr; /* 0x0068 */
231 u64 mic_aux_trc_grf_data; /* 0x0070 */
232
233 u64 pad_0x0078; /* 0x0078 */
234
235 u64 mic_ctl_cnfg_0; /* 0x0080 */
236#define CBE_MIC_DISABLE_PWR_SAV_0 0x8000000000000000LL
237
238 u64 pad_0x0088; /* 0x0088 */
239
240 u64 slow_fast_timer_0; /* 0x0090 */
241 u64 slow_next_timer_0; /* 0x0098 */
242
243 u8 pad_0x00a0_0x01c0[0x01c0 - 0x0a0]; /* 0x00a0 */
244
245 u64 mic_ctl_cnfg_1; /* 0x01c0 */
246#define CBE_MIC_DISABLE_PWR_SAV_1 0x8000000000000000LL
247 u64 pad_0x01c8; /* 0x01c8 */
248
249 u64 slow_fast_timer_1; /* 0x01d0 */
250 u64 slow_next_timer_1; /* 0x01d8 */
251
252 u8 pad_0x01e0_0x1000[0x1000 - 0x01e0]; /* 0x01e0 */
253};
254
255extern struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np);
256extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu);
257
125/* Init this module early */ 258/* Init this module early */
126extern void cbe_regs_init(void); 259extern void cbe_regs_init(void);
127 260
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
new file mode 100644
index 000000000000..616a0a3fd0e2
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -0,0 +1,226 @@
1/*
2 * thermal support for the cell processor
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Christian Krafft <krafft@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/module.h>
24#include <linux/sysdev.h>
25#include <linux/kernel.h>
26#include <linux/cpu.h>
27#include <asm/spu.h>
28#include <asm/io.h>
29#include <asm/prom.h>
30
31#include "cbe_regs.h"
32#include "spu_priv1_mmio.h"
33
34static struct cbe_pmd_regs __iomem *get_pmd_regs(struct sys_device *sysdev)
35{
36 struct spu *spu;
37
38 spu = container_of(sysdev, struct spu, sysdev);
39
40 return cbe_get_pmd_regs(spu_devnode(spu));
41}
42
43/* returns the value for a given spu in a given register */
44static u8 spu_read_register_value(struct sys_device *sysdev, union spe_reg __iomem *reg)
45{
46 unsigned int *id;
47 union spe_reg value;
48 struct spu *spu;
49
50 /* getting the id from the reg attribute will not work on future device-tree layouts
51 * in future we should store the id to the spu struct and use it here */
52 spu = container_of(sysdev, struct spu, sysdev);
53 id = (unsigned int *)get_property(spu_devnode(spu), "reg", NULL);
54 value.val = in_be64(&reg->val);
55
56 return value.spe[*id];
57}
58
59static ssize_t spu_show_temp(struct sys_device *sysdev, char *buf)
60{
61 int value;
62 struct cbe_pmd_regs __iomem *pmd_regs;
63
64 pmd_regs = get_pmd_regs(sysdev);
65
66 value = spu_read_register_value(sysdev, &pmd_regs->ts_ctsr1);
67 /* clear all other bits */
68 value &= 0x3F;
69 /* temp is stored in steps of 2 degrees */
70 value *= 2;
71 /* base temp is 65 degrees */
72 value += 65;
73
74 return sprintf(buf, "%d\n", (int) value);
75}
76
77static ssize_t ppe_show_temp(struct sys_device *sysdev, char *buf, int pos)
78{
79 struct cbe_pmd_regs __iomem *pmd_regs;
80 u64 value;
81
82 pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id);
83 value = in_be64(&pmd_regs->ts_ctsr2);
84
85 /* access the corresponding byte */
86 value >>= pos;
87 /* clear all other bits */
88 value &= 0x3F;
89 /* temp is stored in steps of 2 degrees */
90 value *= 2;
91 /* base temp is 65 degrees */
92 value += 65;
93
94 return sprintf(buf, "%d\n", (int) value);
95}
96
97
98/* shows the temperature of the DTS on the PPE,
99 * located near the linear thermal sensor */
100static ssize_t ppe_show_temp0(struct sys_device *sysdev, char *buf)
101{
102 return ppe_show_temp(sysdev, buf, 32);
103}
104
105/* shows the temperature of the second DTS on the PPE */
106static ssize_t ppe_show_temp1(struct sys_device *sysdev, char *buf)
107{
108 return ppe_show_temp(sysdev, buf, 0);
109}
110
111static struct sysdev_attribute attr_spu_temperature = {
112 .attr = {.name = "temperature", .mode = 0400 },
113 .show = spu_show_temp,
114};
115
116static struct attribute *spu_attributes[] = {
117 &attr_spu_temperature.attr,
118};
119
120static struct attribute_group spu_attribute_group = {
121 .name = "thermal",
122 .attrs = spu_attributes,
123};
124
125static struct sysdev_attribute attr_ppe_temperature0 = {
126 .attr = {.name = "temperature0", .mode = 0400 },
127 .show = ppe_show_temp0,
128};
129
130static struct sysdev_attribute attr_ppe_temperature1 = {
131 .attr = {.name = "temperature1", .mode = 0400 },
132 .show = ppe_show_temp1,
133};
134
135static struct attribute *ppe_attributes[] = {
136 &attr_ppe_temperature0.attr,
137 &attr_ppe_temperature1.attr,
138};
139
140static struct attribute_group ppe_attribute_group = {
141 .name = "thermal",
142 .attrs = ppe_attributes,
143};
144
145/*
146 * initialize throttling with default values
147 */
148static void __init init_default_values(void)
149{
150 int cpu;
151 struct cbe_pmd_regs __iomem *pmd_regs;
152 struct sys_device *sysdev;
153 union ppe_spe_reg tpr;
154 union spe_reg str1;
155 u64 str2;
156 union spe_reg cr1;
157 u64 cr2;
158
159 /* TPR defaults */
160 /* ppe
161 * 1F - no full stop
162 * 08 - dynamic throttling starts if over 80 degrees
163 * 03 - dynamic throttling ceases if below 70 degrees */
164 tpr.ppe = 0x1F0803;
165 /* spe
166 * 10 - full stopped when over 96 degrees
167 * 08 - dynamic throttling starts if over 80 degrees
168 * 03 - dynamic throttling ceases if below 70 degrees
169 */
170 tpr.spe = 0x100803;
171
172 /* STR defaults */
173 /* str1
174 * 10 - stop 16 of 32 cycles
175 */
176 str1.val = 0x1010101010101010ull;
177 /* str2
178 * 10 - stop 16 of 32 cycles
179 */
180 str2 = 0x10;
181
182 /* CR defaults */
183 /* cr1
184 * 4 - normal operation
185 */
186 cr1.val = 0x0404040404040404ull;
187 /* cr2
188 * 4 - normal operation
189 */
190 cr2 = 0x04;
191
192 for_each_possible_cpu (cpu) {
193 pr_debug("processing cpu %d\n", cpu);
194 sysdev = get_cpu_sysdev(cpu);
195 pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id);
196
197 out_be64(&pmd_regs->tm_str2, str2);
198 out_be64(&pmd_regs->tm_str1.val, str1.val);
199 out_be64(&pmd_regs->tm_tpr.val, tpr.val);
200 out_be64(&pmd_regs->tm_cr1.val, cr1.val);
201 out_be64(&pmd_regs->tm_cr2, cr2);
202 }
203}
204
205
206static int __init thermal_init(void)
207{
208 init_default_values();
209
210 spu_add_sysdev_attr_group(&spu_attribute_group);
211 cpu_add_sysdev_attr_group(&ppe_attribute_group);
212
213 return 0;
214}
215module_init(thermal_init);
216
217static void __exit thermal_exit(void)
218{
219 spu_remove_sysdev_attr_group(&spu_attribute_group);
220 cpu_remove_sysdev_attr_group(&ppe_attribute_group);
221}
222module_exit(thermal_exit);
223
224MODULE_LICENSE("GPL");
225MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
226
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index a914c12b4060..6666d037eb44 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -396,3 +396,19 @@ void __init iic_init_IRQ(void)
396 /* Enable on current CPU */ 396 /* Enable on current CPU */
397 iic_setup_cpu(); 397 iic_setup_cpu();
398} 398}
399
400void iic_set_interrupt_routing(int cpu, int thread, int priority)
401{
402 struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu);
403 u64 iic_ir = 0;
404 int node = cpu >> 1;
405
406 /* Set which node and thread will handle the next interrupt */
407 iic_ir |= CBE_IIC_IR_PRIO(priority) |
408 CBE_IIC_IR_DEST_NODE(node);
409 if (thread == 0)
410 iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0);
411 else
412 iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1);
413 out_be64(&iic_regs->iic_ir, iic_ir);
414}
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h
index 9ba1d3c17b4b..942dc39d6045 100644
--- a/arch/powerpc/platforms/cell/interrupt.h
+++ b/arch/powerpc/platforms/cell/interrupt.h
@@ -83,5 +83,7 @@ extern u8 iic_get_target_id(int cpu);
83 83
84extern void spider_init_IRQ(void); 84extern void spider_init_IRQ(void);
85 85
86extern void iic_set_interrupt_routing(int cpu, int thread, int priority);
87
86#endif 88#endif
87#endif /* ASM_CELL_PIC_H */ 89#endif /* ASM_CELL_PIC_H */
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/platforms/cell/io-workarounds.c
new file mode 100644
index 000000000000..580d42595912
--- /dev/null
+++ b/arch/powerpc/platforms/cell/io-workarounds.c
@@ -0,0 +1,346 @@
1/*
2 * Copyright (C) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>
3 * IBM, Corp.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#undef DEBUG
10
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/pci.h>
14#include <asm/io.h>
15#include <asm/machdep.h>
16#include <asm/pci-bridge.h>
17#include <asm/ppc-pci.h>
18
19
20#define SPIDER_PCI_REG_BASE 0xd000
21#define SPIDER_PCI_VCI_CNTL_STAT 0x0110
22#define SPIDER_PCI_DUMMY_READ 0x0810
23#define SPIDER_PCI_DUMMY_READ_BASE 0x0814
24
25/* Undefine that to re-enable bogus prefetch
26 *
27 * Without that workaround, the chip will do bogus prefetch past
28 * page boundary from system memory. This setting will disable that,
29 * though the documentation is unclear as to the consequences of doing
30 * so, either purely performances, or possible misbehaviour... It's not
31 * clear wether the chip can handle unaligned accesses at all without
32 * prefetching enabled.
33 *
34 * For now, things appear to be behaving properly with that prefetching
35 * disabled and IDE, possibly because IDE isn't doing any unaligned
36 * access.
37 */
38#define SPIDER_DISABLE_PREFETCH
39
40#define MAX_SPIDERS 2
41
42static struct spider_pci_bus {
43 void __iomem *regs;
44 unsigned long mmio_start;
45 unsigned long mmio_end;
46 unsigned long pio_vstart;
47 unsigned long pio_vend;
48} spider_pci_busses[MAX_SPIDERS];
49static int spider_pci_count;
50
51static struct spider_pci_bus *spider_pci_find(unsigned long vaddr,
52 unsigned long paddr)
53{
54 int i;
55
56 for (i = 0; i < spider_pci_count; i++) {
57 struct spider_pci_bus *bus = &spider_pci_busses[i];
58 if (paddr && paddr >= bus->mmio_start && paddr < bus->mmio_end)
59 return bus;
60 if (vaddr && vaddr >= bus->pio_vstart && vaddr < bus->pio_vend)
61 return bus;
62 }
63 return NULL;
64}
65
66static void spider_io_flush(const volatile void __iomem *addr)
67{
68 struct spider_pci_bus *bus;
69 int token;
70
71 /* Get platform token (set by ioremap) from address */
72 token = PCI_GET_ADDR_TOKEN(addr);
73
74 /* Fast path if we have a non-0 token, it indicates which bus we
75 * are on.
76 *
77 * If the token is 0, that means either the the ioremap was done
78 * before we initialized this layer, or it's a PIO operation. We
79 * fallback to a low path in this case. Hopefully, internal devices
80 * which are ioremap'ed early should use in_XX/out_XX functions
81 * instead of the PCI ones and thus not suffer from the slowdown.
82 *
83 * Also note that currently, the workaround will not work for areas
84 * that are not mapped with PTEs (bolted in the hash table). This
85 * is the case for ioremaps done very early at boot (before
86 * mem_init_done) and includes the mapping of the ISA IO space.
87 *
88 * Fortunately, none of the affected devices is expected to do DMA
89 * and thus there should be no problem in practice.
90 *
91 * In order to improve performances, we only do the PTE search for
92 * addresses falling in the PHB IO space area. That means it will
93 * not work for hotplug'ed PHBs but those don't exist with Spider.
94 */
95 if (token && token <= spider_pci_count)
96 bus = &spider_pci_busses[token - 1];
97 else {
98 unsigned long vaddr, paddr;
99 pte_t *ptep;
100
101 /* Fixup physical address */
102 vaddr = (unsigned long)PCI_FIX_ADDR(addr);
103
104 /* Check if it's in allowed range for PIO */
105 if (vaddr < PHBS_IO_BASE || vaddr >= IMALLOC_BASE)
106 return;
107
108 /* Try to find a PTE. If not, clear the paddr, we'll do
109 * a vaddr only lookup (PIO only)
110 */
111 ptep = find_linux_pte(init_mm.pgd, vaddr);
112 if (ptep == NULL)
113 paddr = 0;
114 else
115 paddr = pte_pfn(*ptep) << PAGE_SHIFT;
116
117 bus = spider_pci_find(vaddr, paddr);
118 if (bus == NULL)
119 return;
120 }
121
122 /* Now do the workaround
123 */
124 (void)in_be32(bus->regs + SPIDER_PCI_DUMMY_READ);
125}
126
127static u8 spider_readb(const volatile void __iomem *addr)
128{
129 u8 val = __do_readb(addr);
130 spider_io_flush(addr);
131 return val;
132}
133
134static u16 spider_readw(const volatile void __iomem *addr)
135{
136 u16 val = __do_readw(addr);
137 spider_io_flush(addr);
138 return val;
139}
140
141static u32 spider_readl(const volatile void __iomem *addr)
142{
143 u32 val = __do_readl(addr);
144 spider_io_flush(addr);
145 return val;
146}
147
148static u64 spider_readq(const volatile void __iomem *addr)
149{
150 u64 val = __do_readq(addr);
151 spider_io_flush(addr);
152 return val;
153}
154
155static u16 spider_readw_be(const volatile void __iomem *addr)
156{
157 u16 val = __do_readw_be(addr);
158 spider_io_flush(addr);
159 return val;
160}
161
162static u32 spider_readl_be(const volatile void __iomem *addr)
163{
164 u32 val = __do_readl_be(addr);
165 spider_io_flush(addr);
166 return val;
167}
168
169static u64 spider_readq_be(const volatile void __iomem *addr)
170{
171 u64 val = __do_readq_be(addr);
172 spider_io_flush(addr);
173 return val;
174}
175
176static void spider_readsb(const volatile void __iomem *addr, void *buf,
177 unsigned long count)
178{
179 __do_readsb(addr, buf, count);
180 spider_io_flush(addr);
181}
182
183static void spider_readsw(const volatile void __iomem *addr, void *buf,
184 unsigned long count)
185{
186 __do_readsw(addr, buf, count);
187 spider_io_flush(addr);
188}
189
190static void spider_readsl(const volatile void __iomem *addr, void *buf,
191 unsigned long count)
192{
193 __do_readsl(addr, buf, count);
194 spider_io_flush(addr);
195}
196
197static void spider_memcpy_fromio(void *dest, const volatile void __iomem *src,
198 unsigned long n)
199{
200 __do_memcpy_fromio(dest, src, n);
201 spider_io_flush(src);
202}
203
204
205static void __iomem * spider_ioremap(unsigned long addr, unsigned long size,
206 unsigned long flags)
207{
208 struct spider_pci_bus *bus;
209 void __iomem *res = __ioremap(addr, size, flags);
210 int busno;
211
212 pr_debug("spider_ioremap(0x%lx, 0x%lx, 0x%lx) -> 0x%p\n",
213 addr, size, flags, res);
214
215 bus = spider_pci_find(0, addr);
216 if (bus != NULL) {
217 busno = bus - spider_pci_busses;
218 pr_debug(" found bus %d, setting token\n", busno);
219 PCI_SET_ADDR_TOKEN(res, busno + 1);
220 }
221 pr_debug(" result=0x%p\n", res);
222
223 return res;
224}
225
226static void __init spider_pci_setup_chip(struct spider_pci_bus *bus)
227{
228#ifdef SPIDER_DISABLE_PREFETCH
229 u32 val = in_be32(bus->regs + SPIDER_PCI_VCI_CNTL_STAT);
230 pr_debug(" PVCI_Control_Status was 0x%08x\n", val);
231 out_be32(bus->regs + SPIDER_PCI_VCI_CNTL_STAT, val | 0x8);
232#endif
233
234 /* Configure the dummy address for the workaround */
235 out_be32(bus->regs + SPIDER_PCI_DUMMY_READ_BASE, 0x80000000);
236}
237
238static void __init spider_pci_add_one(struct pci_controller *phb)
239{
240 struct spider_pci_bus *bus = &spider_pci_busses[spider_pci_count];
241 struct device_node *np = phb->arch_data;
242 struct resource rsrc;
243 void __iomem *regs;
244
245 if (spider_pci_count >= MAX_SPIDERS) {
246 printk(KERN_ERR "Too many spider bridges, workarounds"
247 " disabled for %s\n", np->full_name);
248 return;
249 }
250
251 /* Get the registers for the beast */
252 if (of_address_to_resource(np, 0, &rsrc)) {
253 printk(KERN_ERR "Failed to get registers for spider %s"
254 " workarounds disabled\n", np->full_name);
255 return;
256 }
257
258 /* Mask out some useless bits in there to get to the base of the
259 * spider chip
260 */
261 rsrc.start &= ~0xfffffffful;
262
263 /* Map them */
264 regs = ioremap(rsrc.start + SPIDER_PCI_REG_BASE, 0x1000);
265 if (regs == NULL) {
266 printk(KERN_ERR "Failed to map registers for spider %s"
267 " workarounds disabled\n", np->full_name);
268 return;
269 }
270
271 spider_pci_count++;
272
273 /* We assume spiders only have one MMIO resource */
274 bus->mmio_start = phb->mem_resources[0].start;
275 bus->mmio_end = phb->mem_resources[0].end + 1;
276
277 bus->pio_vstart = (unsigned long)phb->io_base_virt;
278 bus->pio_vend = bus->pio_vstart + phb->pci_io_size;
279
280 bus->regs = regs;
281
282 printk(KERN_INFO "PCI: Spider MMIO workaround for %s\n",np->full_name);
283
284 pr_debug(" mmio (P) = 0x%016lx..0x%016lx\n",
285 bus->mmio_start, bus->mmio_end);
286 pr_debug(" pio (V) = 0x%016lx..0x%016lx\n",
287 bus->pio_vstart, bus->pio_vend);
288 pr_debug(" regs (P) = 0x%016lx (V) = 0x%p\n",
289 rsrc.start + SPIDER_PCI_REG_BASE, bus->regs);
290
291 spider_pci_setup_chip(bus);
292}
293
294static struct ppc_pci_io __initdata spider_pci_io = {
295 .readb = spider_readb,
296 .readw = spider_readw,
297 .readl = spider_readl,
298 .readq = spider_readq,
299 .readw_be = spider_readw_be,
300 .readl_be = spider_readl_be,
301 .readq_be = spider_readq_be,
302 .readsb = spider_readsb,
303 .readsw = spider_readsw,
304 .readsl = spider_readsl,
305 .memcpy_fromio = spider_memcpy_fromio,
306};
307
308static int __init spider_pci_workaround_init(void)
309{
310 struct pci_controller *phb;
311
312 if (!machine_is(cell))
313 return 0;
314
315 /* Find spider bridges. We assume they have been all probed
316 * in setup_arch(). If that was to change, we would need to
317 * update this code to cope with dynamically added busses
318 */
319 list_for_each_entry(phb, &hose_list, list_node) {
320 struct device_node *np = phb->arch_data;
321 const char *model = get_property(np, "model", NULL);
322
323 /* If no model property or name isn't exactly "pci", skip */
324 if (model == NULL || strcmp(np->name, "pci"))
325 continue;
326 /* If model is not "Spider", skip */
327 if (strcmp(model, "Spider"))
328 continue;
329 spider_pci_add_one(phb);
330 }
331
332 /* No Spider PCI found, exit */
333 if (spider_pci_count == 0)
334 return 0;
335
336 /* Setup IO callbacks. We only setup MMIO reads. PIO reads will
337 * fallback to MMIO reads (though without a token, thus slower)
338 */
339 ppc_pci_io = spider_pci_io;
340
341 /* Setup ioremap callback */
342 ppc_md.ioremap = spider_ioremap;
343
344 return 0;
345}
346arch_initcall(spider_pci_workaround_init);
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index aca4c3db0dde..b43466ba8096 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -1,514 +1,747 @@
1/* 1/*
2 * IOMMU implementation for Cell Broadband Processor Architecture 2 * IOMMU implementation for Cell Broadband Processor Architecture
3 * We just establish a linear mapping at boot by setting all the
4 * IOPT cache entries in the CPU.
5 * The mapping functions should be identical to pci_direct_iommu,
6 * except for the handling of the high order bit that is required
7 * by the Spider bridge. These should be split into a separate
8 * file at the point where we get a different bridge chip.
9 * 3 *
10 * Copyright (C) 2005 IBM Deutschland Entwicklung GmbH, 4 * (C) Copyright IBM Corporation 2006
11 * Arnd Bergmann <arndb@de.ibm.com>
12 * 5 *
13 * Based on linear mapping 6 * Author: Jeremy Kerr <jk@ozlabs.org>
14 * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
15 * 7 *
16 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or modify
17 * modify it under the terms of the GNU General Public License 9 * it under the terms of the GNU General Public License as published by
18 * as published by the Free Software Foundation; either version 10 * the Free Software Foundation; either version 2, or (at your option)
19 * 2 of the License, or (at your option) any later version. 11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 21 */
21 22
22#undef DEBUG 23#undef DEBUG
23 24
24#include <linux/kernel.h> 25#include <linux/kernel.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/string.h>
28#include <linux/init.h> 26#include <linux/init.h>
29#include <linux/bootmem.h> 27#include <linux/interrupt.h>
30#include <linux/mm.h> 28#include <linux/notifier.h>
31#include <linux/dma-mapping.h>
32#include <linux/kernel.h>
33#include <linux/compiler.h>
34 29
35#include <asm/sections.h>
36#include <asm/iommu.h>
37#include <asm/io.h>
38#include <asm/prom.h> 30#include <asm/prom.h>
39#include <asm/pci-bridge.h> 31#include <asm/iommu.h>
40#include <asm/machdep.h> 32#include <asm/machdep.h>
41#include <asm/pmac_feature.h> 33#include <asm/pci-bridge.h>
42#include <asm/abs_addr.h>
43#include <asm/system.h>
44#include <asm/ppc-pci.h>
45#include <asm/udbg.h> 34#include <asm/udbg.h>
35#include <asm/of_platform.h>
36#include <asm/lmb.h>
46 37
47#include "iommu.h" 38#include "cbe_regs.h"
39#include "interrupt.h"
48 40
49static inline unsigned long 41/* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
50get_iopt_entry(unsigned long real_address, unsigned long ioid, 42 * instead of leaving them mapped to some dummy page. This can be
51 unsigned long prot) 43 * enabled once the appropriate workarounds for spider bugs have
52{ 44 * been enabled
53 return (prot & IOPT_PROT_MASK) 45 */
54 | (IOPT_COHERENT) 46#define CELL_IOMMU_REAL_UNMAP
55 | (IOPT_ORDER_VC)
56 | (real_address & IOPT_RPN_MASK)
57 | (ioid & IOPT_IOID_MASK);
58}
59 47
60typedef struct { 48/* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of
61 unsigned long val; 49 * IO PTEs based on the transfer direction. That can be enabled
62} ioste; 50 * once spider-net has been fixed to pass the correct direction
51 * to the DMA mapping functions
52 */
53#define CELL_IOMMU_STRICT_PROTECTION
54
55
56#define NR_IOMMUS 2
57
58/* IOC mmap registers */
59#define IOC_Reg_Size 0x2000
60
61#define IOC_IOPT_CacheInvd 0x908
62#define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul
63#define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul
64#define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul
65
66#define IOC_IOST_Origin 0x918
67#define IOC_IOST_Origin_E 0x8000000000000000ul
68#define IOC_IOST_Origin_HW 0x0000000000000800ul
69#define IOC_IOST_Origin_HL 0x0000000000000400ul
70
71#define IOC_IO_ExcpStat 0x920
72#define IOC_IO_ExcpStat_V 0x8000000000000000ul
73#define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul
74#define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul
75#define IOC_IO_ExcpStat_SPF_P 0x4000000000000000ul
76#define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul
77#define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul
78#define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful
79
80#define IOC_IO_ExcpMask 0x928
81#define IOC_IO_ExcpMask_SFE 0x4000000000000000ul
82#define IOC_IO_ExcpMask_PFE 0x2000000000000000ul
83
84#define IOC_IOCmd_Offset 0x1000
85
86#define IOC_IOCmd_Cfg 0xc00
87#define IOC_IOCmd_Cfg_TE 0x0000800000000000ul
88
89
90/* Segment table entries */
91#define IOSTE_V 0x8000000000000000ul /* valid */
92#define IOSTE_H 0x4000000000000000ul /* cache hint */
93#define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul /* base RPN of IOPT */
94#define IOSTE_NPPT_Mask 0x0000000000000fe0ul /* no. pages in IOPT */
95#define IOSTE_PS_Mask 0x0000000000000007ul /* page size */
96#define IOSTE_PS_4K 0x0000000000000001ul /* - 4kB */
97#define IOSTE_PS_64K 0x0000000000000003ul /* - 64kB */
98#define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */
99#define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */
100
101/* Page table entries */
102#define IOPTE_PP_W 0x8000000000000000ul /* protection: write */
103#define IOPTE_PP_R 0x4000000000000000ul /* protection: read */
104#define IOPTE_M 0x2000000000000000ul /* coherency required */
105#define IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */
106#define IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */
107#define IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */
108#define IOPTE_H 0x0000000000000800ul /* cache hint */
109#define IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
110
111
112/* IOMMU sizing */
113#define IO_SEGMENT_SHIFT 28
114#define IO_PAGENO_BITS (IO_SEGMENT_SHIFT - IOMMU_PAGE_SHIFT)
115
116/* The high bit needs to be set on every DMA address */
117#define SPIDER_DMA_OFFSET 0x80000000ul
118
119struct iommu_window {
120 struct list_head list;
121 struct cbe_iommu *iommu;
122 unsigned long offset;
123 unsigned long size;
124 unsigned long pte_offset;
125 unsigned int ioid;
126 struct iommu_table table;
127};
63 128
64static inline ioste 129#define NAMESIZE 8
65mk_ioste(unsigned long val) 130struct cbe_iommu {
66{ 131 int nid;
67 ioste ioste = { .val = val, }; 132 char name[NAMESIZE];
68 return ioste; 133 void __iomem *xlate_regs;
69} 134 void __iomem *cmd_regs;
135 unsigned long *stab;
136 unsigned long *ptab;
137 void *pad_page;
138 struct list_head windows;
139};
140
141/* Static array of iommus, one per node
142 * each contains a list of windows, keyed from dma_window property
143 * - on bus setup, look for a matching window, or create one
144 * - on dev setup, assign iommu_table ptr
145 */
146static struct cbe_iommu iommus[NR_IOMMUS];
147static int cbe_nr_iommus;
70 148
71static inline ioste 149static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
72get_iost_entry(unsigned long iopt_base, unsigned long io_address, unsigned page_size) 150 long n_ptes)
73{ 151{
74 unsigned long ps; 152 unsigned long *reg, val;
75 unsigned long iostep; 153 long n;
76 unsigned long nnpt;
77 unsigned long shift;
78
79 switch (page_size) {
80 case 0x1000000:
81 ps = IOST_PS_16M;
82 nnpt = 0; /* one page per segment */
83 shift = 5; /* segment has 16 iopt entries */
84 break;
85
86 case 0x100000:
87 ps = IOST_PS_1M;
88 nnpt = 0; /* one page per segment */
89 shift = 1; /* segment has 256 iopt entries */
90 break;
91
92 case 0x10000:
93 ps = IOST_PS_64K;
94 nnpt = 0x07; /* 8 pages per io page table */
95 shift = 0; /* all entries are used */
96 break;
97
98 case 0x1000:
99 ps = IOST_PS_4K;
100 nnpt = 0x7f; /* 128 pages per io page table */
101 shift = 0; /* all entries are used */
102 break;
103
104 default: /* not a known compile time constant */
105 {
106 /* BUILD_BUG_ON() is not usable here */
107 extern void __get_iost_entry_bad_page_size(void);
108 __get_iost_entry_bad_page_size();
109 }
110 break;
111 }
112 154
113 iostep = iopt_base + 155 reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
114 /* need 8 bytes per iopte */
115 (((io_address / page_size * 8)
116 /* align io page tables on 4k page boundaries */
117 << shift)
118 /* nnpt+1 pages go into each iopt */
119 & ~(nnpt << 12));
120
121 nnpt++; /* this seems to work, but the documentation is not clear
122 about wether we put nnpt or nnpt-1 into the ioste bits.
123 In theory, this can't work for 4k pages. */
124 return mk_ioste(IOST_VALID_MASK
125 | (iostep & IOST_PT_BASE_MASK)
126 | ((nnpt << 5) & IOST_NNPT_MASK)
127 | (ps & IOST_PS_MASK));
128}
129 156
130/* compute the address of an io pte */ 157 while (n_ptes > 0) {
131static inline unsigned long 158 /* we can invalidate up to 1 << 11 PTEs at once */
132get_ioptep(ioste iost_entry, unsigned long io_address) 159 n = min(n_ptes, 1l << 11);
133{ 160 val = (((n /*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask)
134 unsigned long iopt_base; 161 | (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask)
135 unsigned long page_size; 162 | IOC_IOPT_CacheInvd_Busy;
136 unsigned long page_number;
137 unsigned long iopt_offset;
138
139 iopt_base = iost_entry.val & IOST_PT_BASE_MASK;
140 page_size = iost_entry.val & IOST_PS_MASK;
141
142 /* decode page size to compute page number */
143 page_number = (io_address & 0x0fffffff) >> (10 + 2 * page_size);
144 /* page number is an offset into the io page table */
145 iopt_offset = (page_number << 3) & 0x7fff8ul;
146 return iopt_base + iopt_offset;
147}
148 163
149/* compute the tag field of the iopt cache entry */ 164 out_be64(reg, val);
150static inline unsigned long 165 while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy)
151get_ioc_tag(ioste iost_entry, unsigned long io_address) 166 ;
152{
153 unsigned long iopte = get_ioptep(iost_entry, io_address);
154 167
155 return IOPT_VALID_MASK 168 n_ptes -= n;
156 | ((iopte & 0x00000000000000ff8ul) >> 3) 169 pte += n;
157 | ((iopte & 0x0000003fffffc0000ul) >> 9); 170 }
158} 171}
159 172
160/* compute the hashed 6 bit index for the 4-way associative pte cache */ 173static void tce_build_cell(struct iommu_table *tbl, long index, long npages,
161static inline unsigned long 174 unsigned long uaddr, enum dma_data_direction direction)
162get_ioc_hash(ioste iost_entry, unsigned long io_address)
163{ 175{
164 unsigned long iopte = get_ioptep(iost_entry, io_address); 176 int i;
165 177 unsigned long *io_pte, base_pte;
166 return ((iopte & 0x000000000000001f8ul) >> 3) 178 struct iommu_window *window =
167 ^ ((iopte & 0x00000000000020000ul) >> 17) 179 container_of(tbl, struct iommu_window, table);
168 ^ ((iopte & 0x00000000000010000ul) >> 15) 180
169 ^ ((iopte & 0x00000000000008000ul) >> 13) 181 /* implementing proper protection causes problems with the spidernet
170 ^ ((iopte & 0x00000000000004000ul) >> 11) 182 * driver - check mapping directions later, but allow read & write by
171 ^ ((iopte & 0x00000000000002000ul) >> 9) 183 * default for now.*/
172 ^ ((iopte & 0x00000000000001000ul) >> 7); 184#ifdef CELL_IOMMU_STRICT_PROTECTION
185 /* to avoid referencing a global, we use a trick here to setup the
186 * protection bit. "prot" is setup to be 3 fields of 4 bits apprended
187 * together for each of the 3 supported direction values. It is then
188 * shifted left so that the fields matching the desired direction
189 * lands on the appropriate bits, and other bits are masked out.
190 */
191 const unsigned long prot = 0xc48;
192 base_pte =
193 ((prot << (52 + 4 * direction)) & (IOPTE_PP_W | IOPTE_PP_R))
194 | IOPTE_M | IOPTE_SO_RW | (window->ioid & IOPTE_IOID_Mask);
195#else
196 base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW |
197 (window->ioid & IOPTE_IOID_Mask);
198#endif
199
200 io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset);
201
202 for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE)
203 io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask);
204
205 mb();
206
207 invalidate_tce_cache(window->iommu, io_pte, npages);
208
209 pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
210 index, npages, direction, base_pte);
173} 211}
174 212
175/* same as above, but pretend that we have a simpler 1-way associative 213static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
176 pte cache with an 8 bit index */
177static inline unsigned long
178get_ioc_hash_1way(ioste iost_entry, unsigned long io_address)
179{ 214{
180 unsigned long iopte = get_ioptep(iost_entry, io_address);
181
182 return ((iopte & 0x000000000000001f8ul) >> 3)
183 ^ ((iopte & 0x00000000000020000ul) >> 17)
184 ^ ((iopte & 0x00000000000010000ul) >> 15)
185 ^ ((iopte & 0x00000000000008000ul) >> 13)
186 ^ ((iopte & 0x00000000000004000ul) >> 11)
187 ^ ((iopte & 0x00000000000002000ul) >> 9)
188 ^ ((iopte & 0x00000000000001000ul) >> 7)
189 ^ ((iopte & 0x0000000000000c000ul) >> 8);
190}
191 215
192static inline ioste 216 int i;
193get_iost_cache(void __iomem *base, unsigned long index) 217 unsigned long *io_pte, pte;
194{ 218 struct iommu_window *window =
195 unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR); 219 container_of(tbl, struct iommu_window, table);
196 return mk_ioste(in_be64(&p[index]));
197}
198 220
199static inline void 221 pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages);
200set_iost_cache(void __iomem *base, unsigned long index, ioste ste)
201{
202 unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR);
203 pr_debug("ioste %02lx was %016lx, store %016lx", index,
204 get_iost_cache(base, index).val, ste.val);
205 out_be64(&p[index], ste.val);
206 pr_debug(" now %016lx\n", get_iost_cache(base, index).val);
207}
208 222
209static inline unsigned long 223#ifdef CELL_IOMMU_REAL_UNMAP
210get_iopt_cache(void __iomem *base, unsigned long index, unsigned long *tag) 224 pte = 0;
211{ 225#else
212 unsigned long __iomem *tags = (void *)(base + IOC_PT_CACHE_DIR); 226 /* spider bridge does PCI reads after freeing - insert a mapping
213 unsigned long __iomem *p = (void *)(base + IOC_PT_CACHE_REG); 227 * to a scratch page instead of an invalid entry */
228 pte = IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW | __pa(window->iommu->pad_page)
229 | (window->ioid & IOPTE_IOID_Mask);
230#endif
214 231
215 *tag = tags[index]; 232 io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset);
216 rmb();
217 return *p;
218}
219 233
220static inline void 234 for (i = 0; i < npages; i++)
221set_iopt_cache(void __iomem *base, unsigned long index, 235 io_pte[i] = pte;
222 unsigned long tag, unsigned long val) 236
223{ 237 mb();
224 unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR;
225 unsigned long __iomem *p = base + IOC_PT_CACHE_REG;
226 238
227 out_be64(p, val); 239 invalidate_tce_cache(window->iommu, io_pte, npages);
228 out_be64(&tags[index], tag);
229} 240}
230 241
231static inline void 242static irqreturn_t ioc_interrupt(int irq, void *data)
232set_iost_origin(void __iomem *base)
233{ 243{
234 unsigned long __iomem *p = base + IOC_ST_ORIGIN; 244 unsigned long stat;
235 unsigned long origin = IOSTO_ENABLE | IOSTO_SW; 245 struct cbe_iommu *iommu = data;
236 246
237 pr_debug("iost_origin %016lx, now %016lx\n", in_be64(p), origin); 247 stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
238 out_be64(p, origin); 248
249 /* Might want to rate limit it */
250 printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat);
251 printk(KERN_ERR " V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n",
252 !!(stat & IOC_IO_ExcpStat_V),
253 (stat & IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ',
254 (stat & IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ',
255 (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write",
256 (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask));
257 printk(KERN_ERR " page=0x%016lx\n",
258 stat & IOC_IO_ExcpStat_ADDR_Mask);
259
260 /* clear interrupt */
261 stat &= ~IOC_IO_ExcpStat_V;
262 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat);
263
264 return IRQ_HANDLED;
239} 265}
240 266
241static inline void 267static int cell_iommu_find_ioc(int nid, unsigned long *base)
242set_iocmd_config(void __iomem *base)
243{ 268{
244 unsigned long __iomem *p = base + 0xc00; 269 struct device_node *np;
245 unsigned long conf; 270 struct resource r;
271
272 *base = 0;
273
274 /* First look for new style /be nodes */
275 for_each_node_by_name(np, "ioc") {
276 if (of_node_to_nid(np) != nid)
277 continue;
278 if (of_address_to_resource(np, 0, &r)) {
279 printk(KERN_ERR "iommu: can't get address for %s\n",
280 np->full_name);
281 continue;
282 }
283 *base = r.start;
284 of_node_put(np);
285 return 0;
286 }
246 287
247 conf = in_be64(p); 288 /* Ok, let's try the old way */
248 pr_debug("iost_conf %016lx, now %016lx\n", conf, conf | IOCMD_CONF_TE); 289 for_each_node_by_type(np, "cpu") {
249 out_be64(p, conf | IOCMD_CONF_TE); 290 const unsigned int *nidp;
291 const unsigned long *tmp;
292
293 nidp = get_property(np, "node-id", NULL);
294 if (nidp && *nidp == nid) {
295 tmp = get_property(np, "ioc-translation", NULL);
296 if (tmp) {
297 *base = *tmp;
298 of_node_put(np);
299 return 0;
300 }
301 }
302 }
303
304 return -ENODEV;
250} 305}
251 306
252static void enable_mapping(void __iomem *base, void __iomem *mmio_base) 307static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, unsigned long size)
253{ 308{
254 set_iocmd_config(base); 309 struct page *page;
255 set_iost_origin(mmio_base); 310 int ret, i;
256} 311 unsigned long reg, segments, pages_per_segment, ptab_size, n_pte_pages;
312 unsigned long xlate_base;
313 unsigned int virq;
314
315 if (cell_iommu_find_ioc(iommu->nid, &xlate_base))
316 panic("%s: missing IOC register mappings for node %d\n",
317 __FUNCTION__, iommu->nid);
318
319 iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size);
320 iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset;
321
322 segments = size >> IO_SEGMENT_SHIFT;
323 pages_per_segment = 1ull << IO_PAGENO_BITS;
324
325 pr_debug("%s: iommu[%d]: segments: %lu, pages per segment: %lu\n",
326 __FUNCTION__, iommu->nid, segments, pages_per_segment);
327
328 /* set up the segment table */
329 page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
330 BUG_ON(!page);
331 iommu->stab = page_address(page);
332 clear_page(iommu->stab);
333
334 /* ... and the page tables. Since these are contiguous, we can treat
335 * the page tables as one array of ptes, like pSeries does.
336 */
337 ptab_size = segments * pages_per_segment * sizeof(unsigned long);
338 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__,
339 iommu->nid, ptab_size, get_order(ptab_size));
340 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
341 BUG_ON(!page);
342
343 iommu->ptab = page_address(page);
344 memset(iommu->ptab, 0, ptab_size);
345
346 /* allocate a bogus page for the end of each mapping */
347 page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
348 BUG_ON(!page);
349 iommu->pad_page = page_address(page);
350 clear_page(iommu->pad_page);
351
352 /* number of pages needed for a page table */
353 n_pte_pages = (pages_per_segment *
354 sizeof(unsigned long)) >> IOMMU_PAGE_SHIFT;
355
356 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
357 __FUNCTION__, iommu->nid, iommu->stab, iommu->ptab,
358 n_pte_pages);
359
360 /* initialise the STEs */
361 reg = IOSTE_V | ((n_pte_pages - 1) << 5);
362
363 if (IOMMU_PAGE_SIZE == 0x1000)
364 reg |= IOSTE_PS_4K;
365 else if (IOMMU_PAGE_SIZE == 0x10000)
366 reg |= IOSTE_PS_64K;
367 else {
368 extern void __unknown_page_size_error(void);
369 __unknown_page_size_error();
370 }
371
372 pr_debug("Setting up IOMMU stab:\n");
373 for (i = 0; i * (1ul << IO_SEGMENT_SHIFT) < size; i++) {
374 iommu->stab[i] = reg |
375 (__pa(iommu->ptab) + n_pte_pages * IOMMU_PAGE_SIZE * i);
376 pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]);
377 }
257 378
258static void iommu_dev_setup_null(struct pci_dev *d) { } 379 /* ensure that the STEs have updated */
259static void iommu_bus_setup_null(struct pci_bus *b) { } 380 mb();
260 381
261struct cell_iommu { 382 /* setup interrupts for the iommu. */
262 unsigned long base; 383 reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
263 unsigned long mmio_base; 384 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat,
264 void __iomem *mapped_base; 385 reg & ~IOC_IO_ExcpStat_V);
265 void __iomem *mapped_mmio_base; 386 out_be64(iommu->xlate_regs + IOC_IO_ExcpMask,
266}; 387 IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE);
267 388
268static struct cell_iommu cell_iommus[NR_CPUS]; 389 virq = irq_create_mapping(NULL,
390 IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT));
391 BUG_ON(virq == NO_IRQ);
269 392
270/* initialize the iommu to support a simple linear mapping 393 ret = request_irq(virq, ioc_interrupt, IRQF_DISABLED,
271 * for each DMA window used by any device. For now, we 394 iommu->name, iommu);
272 * happen to know that there is only one DMA window in use, 395 BUG_ON(ret);
273 * starting at iopt_phys_offset. */
274static void cell_do_map_iommu(struct cell_iommu *iommu,
275 unsigned int ioid,
276 unsigned long map_start,
277 unsigned long map_size)
278{
279 unsigned long io_address, real_address;
280 void __iomem *ioc_base, *ioc_mmio_base;
281 ioste ioste;
282 unsigned long index;
283 396
284 /* we pretend the io page table was at a very high address */ 397 /* set the IOC segment table origin register (and turn on the iommu) */
285 const unsigned long fake_iopt = 0x10000000000ul; 398 reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW;
286 const unsigned long io_page_size = 0x1000000; /* use 16M pages */ 399 out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg);
287 const unsigned long io_segment_size = 0x10000000; /* 256M */ 400 in_be64(iommu->xlate_regs + IOC_IOST_Origin);
288 401
289 ioc_base = iommu->mapped_base; 402 /* turn on IO translation */
290 ioc_mmio_base = iommu->mapped_mmio_base; 403 reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE;
291 404 out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg);
292 for (real_address = 0, io_address = map_start;
293 io_address <= map_start + map_size;
294 real_address += io_page_size, io_address += io_page_size) {
295 ioste = get_iost_entry(fake_iopt, io_address, io_page_size);
296 if ((real_address % io_segment_size) == 0) /* segment start */
297 set_iost_cache(ioc_mmio_base,
298 io_address >> 28, ioste);
299 index = get_ioc_hash_1way(ioste, io_address);
300 pr_debug("addr %08lx, index %02lx, ioste %016lx\n",
301 io_address, index, ioste.val);
302 set_iopt_cache(ioc_mmio_base,
303 get_ioc_hash_1way(ioste, io_address),
304 get_ioc_tag(ioste, io_address),
305 get_iopt_entry(real_address, ioid, IOPT_PROT_RW));
306 }
307} 405}
308 406
309static void iommu_devnode_setup(struct device_node *d) 407#if 0/* Unused for now */
408static struct iommu_window *find_window(struct cbe_iommu *iommu,
409 unsigned long offset, unsigned long size)
310{ 410{
311 const unsigned int *ioid; 411 struct iommu_window *window;
312 unsigned long map_start, map_size, token;
313 const unsigned long *dma_window;
314 struct cell_iommu *iommu;
315 412
316 ioid = get_property(d, "ioid", NULL); 413 /* todo: check for overlapping (but not equal) windows) */
317 if (!ioid)
318 pr_debug("No ioid entry found !\n");
319 414
320 dma_window = get_property(d, "ibm,dma-window", NULL); 415 list_for_each_entry(window, &(iommu->windows), list) {
321 if (!dma_window) 416 if (window->offset == offset && window->size == size)
322 pr_debug("No ibm,dma-window entry found !\n"); 417 return window;
418 }
323 419
324 map_start = dma_window[1]; 420 return NULL;
325 map_size = dma_window[2]; 421}
326 token = dma_window[0] >> 32; 422#endif
327 423
328 iommu = &cell_iommus[token]; 424static struct iommu_window * __init
425cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
426 unsigned long offset, unsigned long size,
427 unsigned long pte_offset)
428{
429 struct iommu_window *window;
430 const unsigned int *ioid;
329 431
330 cell_do_map_iommu(iommu, *ioid, map_start, map_size); 432 ioid = get_property(np, "ioid", NULL);
433 if (ioid == NULL)
434 printk(KERN_WARNING "iommu: missing ioid for %s using 0\n",
435 np->full_name);
436
437 window = kmalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
438 BUG_ON(window == NULL);
439
440 window->offset = offset;
441 window->size = size;
442 window->ioid = ioid ? *ioid : 0;
443 window->iommu = iommu;
444 window->pte_offset = pte_offset;
445
446 window->table.it_blocksize = 16;
447 window->table.it_base = (unsigned long)iommu->ptab;
448 window->table.it_index = iommu->nid;
449 window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) +
450 window->pte_offset;
451 window->table.it_size = size >> IOMMU_PAGE_SHIFT;
452
453 iommu_init_table(&window->table, iommu->nid);
454
455 pr_debug("\tioid %d\n", window->ioid);
456 pr_debug("\tblocksize %ld\n", window->table.it_blocksize);
457 pr_debug("\tbase 0x%016lx\n", window->table.it_base);
458 pr_debug("\toffset 0x%lx\n", window->table.it_offset);
459 pr_debug("\tsize %ld\n", window->table.it_size);
460
461 list_add(&window->list, &iommu->windows);
462
463 if (offset != 0)
464 return window;
465
466 /* We need to map and reserve the first IOMMU page since it's used
467 * by the spider workaround. In theory, we only need to do that when
468 * running on spider but it doesn't really matter.
469 *
470 * This code also assumes that we have a window that starts at 0,
471 * which is the case on all spider based blades.
472 */
473 __set_bit(0, window->table.it_map);
474 tce_build_cell(&window->table, window->table.it_offset, 1,
475 (unsigned long)iommu->pad_page, DMA_TO_DEVICE);
476 window->table.it_hint = window->table.it_blocksize;
477
478 return window;
331} 479}
332 480
333static void iommu_bus_setup(struct pci_bus *b) 481static struct cbe_iommu *cell_iommu_for_node(int nid)
334{ 482{
335 struct device_node *d = (struct device_node *)b->sysdata; 483 int i;
336 iommu_devnode_setup(d);
337}
338 484
485 for (i = 0; i < cbe_nr_iommus; i++)
486 if (iommus[i].nid == nid)
487 return &iommus[i];
488 return NULL;
489}
339 490
340static int cell_map_iommu_hardcoded(int num_nodes) 491static void cell_dma_dev_setup(struct device *dev)
341{ 492{
342 struct cell_iommu *iommu = NULL; 493 struct iommu_window *window;
343 494 struct cbe_iommu *iommu;
344 pr_debug("%s(%d): Using hardcoded defaults\n", __FUNCTION__, __LINE__); 495 struct dev_archdata *archdata = &dev->archdata;
496
497 /* If we run without iommu, no need to do anything */
498 if (pci_dma_ops == &dma_direct_ops)
499 return;
500
501 /* Current implementation uses the first window available in that
502 * node's iommu. We -might- do something smarter later though it may
503 * never be necessary
504 */
505 iommu = cell_iommu_for_node(archdata->numa_node);
506 if (iommu == NULL || list_empty(&iommu->windows)) {
507 printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n",
508 archdata->of_node ? archdata->of_node->full_name : "?",
509 archdata->numa_node);
510 return;
511 }
512 window = list_entry(iommu->windows.next, struct iommu_window, list);
345 513
346 /* node 0 */ 514 archdata->dma_data = &window->table;
347 iommu = &cell_iommus[0]; 515}
348 iommu->mapped_base = ioremap(0x20000511000ul, 0x1000);
349 iommu->mapped_mmio_base = ioremap(0x20000510000ul, 0x1000);
350 516
351 enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base); 517static void cell_pci_dma_dev_setup(struct pci_dev *dev)
518{
519 cell_dma_dev_setup(&dev->dev);
520}
352 521
353 cell_do_map_iommu(iommu, 0x048a, 522static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
354 0x20000000ul,0x20000000ul); 523 void *data)
524{
525 struct device *dev = data;
355 526
356 if (num_nodes < 2) 527 /* We are only intereted in device addition */
528 if (action != BUS_NOTIFY_ADD_DEVICE)
357 return 0; 529 return 0;
358 530
359 /* node 1 */ 531 /* We use the PCI DMA ops */
360 iommu = &cell_iommus[1]; 532 dev->archdata.dma_ops = pci_dma_ops;
361 iommu->mapped_base = ioremap(0x30000511000ul, 0x1000);
362 iommu->mapped_mmio_base = ioremap(0x30000510000ul, 0x1000);
363
364 enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
365 533
366 cell_do_map_iommu(iommu, 0x048a, 534 cell_dma_dev_setup(dev);
367 0x20000000,0x20000000ul);
368 535
369 return 0; 536 return 0;
370} 537}
371 538
539static struct notifier_block cell_of_bus_notifier = {
540 .notifier_call = cell_of_bus_notify
541};
372 542
373static int cell_map_iommu(void) 543static int __init cell_iommu_get_window(struct device_node *np,
544 unsigned long *base,
545 unsigned long *size)
374{ 546{
375 unsigned int num_nodes = 0; 547 const void *dma_window;
376 const unsigned int *node_id; 548 unsigned long index;
377 const unsigned long *base, *mmio_base;
378 struct device_node *dn;
379 struct cell_iommu *iommu = NULL;
380
381 /* determine number of nodes (=iommus) */
382 pr_debug("%s(%d): determining number of nodes...", __FUNCTION__, __LINE__);
383 for(dn = of_find_node_by_type(NULL, "cpu");
384 dn;
385 dn = of_find_node_by_type(dn, "cpu")) {
386 node_id = get_property(dn, "node-id", NULL);
387
388 if (num_nodes < *node_id)
389 num_nodes = *node_id;
390 }
391
392 num_nodes++;
393 pr_debug("%i found.\n", num_nodes);
394 549
395 /* map the iommu registers for each node */ 550 /* Use ibm,dma-window if available, else, hard code ! */
396 pr_debug("%s(%d): Looping through nodes\n", __FUNCTION__, __LINE__); 551 dma_window = get_property(np, "ibm,dma-window", NULL);
397 for(dn = of_find_node_by_type(NULL, "cpu"); 552 if (dma_window == NULL) {
398 dn; 553 *base = 0;
399 dn = of_find_node_by_type(dn, "cpu")) { 554 *size = 0x80000000u;
555 return -ENODEV;
556 }
400 557
401 node_id = get_property(dn, "node-id", NULL); 558 of_parse_dma_window(np, dma_window, &index, base, size);
402 base = get_property(dn, "ioc-cache", NULL); 559 return 0;
403 mmio_base = get_property(dn, "ioc-translation", NULL); 560}
404 561
405 if (!base || !mmio_base || !node_id) 562static void __init cell_iommu_init_one(struct device_node *np, unsigned long offset)
406 return cell_map_iommu_hardcoded(num_nodes); 563{
564 struct cbe_iommu *iommu;
565 unsigned long base, size;
566 int nid, i;
567
568 /* Get node ID */
569 nid = of_node_to_nid(np);
570 if (nid < 0) {
571 printk(KERN_ERR "iommu: failed to get node for %s\n",
572 np->full_name);
573 return;
574 }
575 pr_debug("iommu: setting up iommu for node %d (%s)\n",
576 nid, np->full_name);
577
578 /* XXX todo: If we can have multiple windows on the same IOMMU, which
579 * isn't the case today, we probably want here to check wether the
580 * iommu for that node is already setup.
581 * However, there might be issue with getting the size right so let's
582 * ignore that for now. We might want to completely get rid of the
583 * multiple window support since the cell iommu supports per-page ioids
584 */
585
586 if (cbe_nr_iommus >= NR_IOMMUS) {
587 printk(KERN_ERR "iommu: too many IOMMUs detected ! (%s)\n",
588 np->full_name);
589 return;
590 }
407 591
408 iommu = &cell_iommus[*node_id]; 592 /* Init base fields */
409 iommu->base = *base; 593 i = cbe_nr_iommus++;
410 iommu->mmio_base = *mmio_base; 594 iommu = &iommus[i];
595 iommu->stab = 0;
596 iommu->nid = nid;
597 snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i);
598 INIT_LIST_HEAD(&iommu->windows);
411 599
412 iommu->mapped_base = ioremap(*base, 0x1000); 600 /* Obtain a window for it */
413 iommu->mapped_mmio_base = ioremap(*mmio_base, 0x1000); 601 cell_iommu_get_window(np, &base, &size);
414 602
415 enable_mapping(iommu->mapped_base, 603 pr_debug("\ttranslating window 0x%lx...0x%lx\n",
416 iommu->mapped_mmio_base); 604 base, base + size - 1);
417 605
418 /* everything else will be done in iommu_bus_setup */ 606 /* Initialize the hardware */
419 } 607 cell_iommu_setup_hardware(iommu, size);
420 608
421 return 1; 609 /* Setup the iommu_table */
610 cell_iommu_setup_window(iommu, np, base, size,
611 offset >> IOMMU_PAGE_SHIFT);
422} 612}
423 613
424static void *cell_alloc_coherent(struct device *hwdev, size_t size, 614static void __init cell_disable_iommus(void)
425 dma_addr_t *dma_handle, gfp_t flag)
426{ 615{
427 void *ret; 616 int node;
428 617 unsigned long base, val;
429 ret = (void *)__get_free_pages(flag, get_order(size)); 618 void __iomem *xregs, *cregs;
430 if (ret != NULL) { 619
431 memset(ret, 0, size); 620 /* Make sure IOC translation is disabled on all nodes */
432 *dma_handle = virt_to_abs(ret) | CELL_DMA_VALID; 621 for_each_online_node(node) {
622 if (cell_iommu_find_ioc(node, &base))
623 continue;
624 xregs = ioremap(base, IOC_Reg_Size);
625 if (xregs == NULL)
626 continue;
627 cregs = xregs + IOC_IOCmd_Offset;
628
629 pr_debug("iommu: cleaning up iommu on node %d\n", node);
630
631 out_be64(xregs + IOC_IOST_Origin, 0);
632 (void)in_be64(xregs + IOC_IOST_Origin);
633 val = in_be64(cregs + IOC_IOCmd_Cfg);
634 val &= ~IOC_IOCmd_Cfg_TE;
635 out_be64(cregs + IOC_IOCmd_Cfg, val);
636 (void)in_be64(cregs + IOC_IOCmd_Cfg);
637
638 iounmap(xregs);
433 } 639 }
434 return ret;
435} 640}
436 641
437static void cell_free_coherent(struct device *hwdev, size_t size, 642static int __init cell_iommu_init_disabled(void)
438 void *vaddr, dma_addr_t dma_handle)
439{ 643{
440 free_pages((unsigned long)vaddr, get_order(size)); 644 struct device_node *np = NULL;
441} 645 unsigned long base = 0, size;
646
647 /* When no iommu is present, we use direct DMA ops */
648 pci_dma_ops = &dma_direct_ops;
649
650 /* First make sure all IOC translation is turned off */
651 cell_disable_iommus();
652
653 /* If we have no Axon, we set up the spider DMA magic offset */
654 if (of_find_node_by_name(NULL, "axon") == NULL)
655 dma_direct_offset = SPIDER_DMA_OFFSET;
656
657 /* Now we need to check to see where the memory is mapped
658 * in PCI space. We assume that all busses use the same dma
659 * window which is always the case so far on Cell, thus we
660 * pick up the first pci-internal node we can find and check
661 * the DMA window from there.
662 */
663 for_each_node_by_name(np, "axon") {
664 if (np->parent == NULL || np->parent->parent != NULL)
665 continue;
666 if (cell_iommu_get_window(np, &base, &size) == 0)
667 break;
668 }
669 if (np == NULL) {
670 for_each_node_by_name(np, "pci-internal") {
671 if (np->parent == NULL || np->parent->parent != NULL)
672 continue;
673 if (cell_iommu_get_window(np, &base, &size) == 0)
674 break;
675 }
676 }
677 of_node_put(np);
678
679 /* If we found a DMA window, we check if it's big enough to enclose
680 * all of physical memory. If not, we force enable IOMMU
681 */
682 if (np && size < lmb_end_of_DRAM()) {
683 printk(KERN_WARNING "iommu: force-enabled, dma window"
684 " (%ldMB) smaller than total memory (%ldMB)\n",
685 size >> 20, lmb_end_of_DRAM() >> 20);
686 return -ENODEV;
687 }
442 688
443static dma_addr_t cell_map_single(struct device *hwdev, void *ptr, 689 dma_direct_offset += base;
444 size_t size, enum dma_data_direction direction)
445{
446 return virt_to_abs(ptr) | CELL_DMA_VALID;
447}
448 690
449static void cell_unmap_single(struct device *hwdev, dma_addr_t dma_addr, 691 printk("iommu: disabled, direct DMA offset is 0x%lx\n",
450 size_t size, enum dma_data_direction direction) 692 dma_direct_offset);
451{ 693
694 return 0;
452} 695}
453 696
454static int cell_map_sg(struct device *hwdev, struct scatterlist *sg, 697static int __init cell_iommu_init(void)
455 int nents, enum dma_data_direction direction)
456{ 698{
457 int i; 699 struct device_node *np;
700
701 if (!machine_is(cell))
702 return -ENODEV;
703
704 /* If IOMMU is disabled or we have little enough RAM to not need
705 * to enable it, we setup a direct mapping.
706 *
707 * Note: should we make sure we have the IOMMU actually disabled ?
708 */
709 if (iommu_is_off ||
710 (!iommu_force_on && lmb_end_of_DRAM() <= 0x80000000ull))
711 if (cell_iommu_init_disabled() == 0)
712 goto bail;
713
714 /* Setup various ppc_md. callbacks */
715 ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
716 ppc_md.tce_build = tce_build_cell;
717 ppc_md.tce_free = tce_free_cell;
718
719 /* Create an iommu for each /axon node. */
720 for_each_node_by_name(np, "axon") {
721 if (np->parent == NULL || np->parent->parent != NULL)
722 continue;
723 cell_iommu_init_one(np, 0);
724 }
458 725
459 for (i = 0; i < nents; i++, sg++) { 726 /* Create an iommu for each toplevel /pci-internal node for
460 sg->dma_address = (page_to_phys(sg->page) + sg->offset) 727 * old hardware/firmware
461 | CELL_DMA_VALID; 728 */
462 sg->dma_length = sg->length; 729 for_each_node_by_name(np, "pci-internal") {
730 if (np->parent == NULL || np->parent->parent != NULL)
731 continue;
732 cell_iommu_init_one(np, SPIDER_DMA_OFFSET);
463 } 733 }
464 734
465 return nents; 735 /* Setup default PCI iommu ops */
466} 736 pci_dma_ops = &dma_iommu_ops;
467 737
468static void cell_unmap_sg(struct device *hwdev, struct scatterlist *sg, 738 bail:
469 int nents, enum dma_data_direction direction) 739 /* Register callbacks on OF platform device addition/removal
470{ 740 * to handle linking them to the right DMA operations
471} 741 */
742 bus_register_notifier(&of_platform_bus_type, &cell_of_bus_notifier);
472 743
473static int cell_dma_supported(struct device *dev, u64 mask) 744 return 0;
474{
475 return mask < 0x100000000ull;
476} 745}
746arch_initcall(cell_iommu_init);
477 747
478static struct dma_mapping_ops cell_iommu_ops = {
479 .alloc_coherent = cell_alloc_coherent,
480 .free_coherent = cell_free_coherent,
481 .map_single = cell_map_single,
482 .unmap_single = cell_unmap_single,
483 .map_sg = cell_map_sg,
484 .unmap_sg = cell_unmap_sg,
485 .dma_supported = cell_dma_supported,
486};
487
488void cell_init_iommu(void)
489{
490 int setup_bus = 0;
491
492 if (of_find_node_by_path("/mambo")) {
493 pr_info("Not using iommu on systemsim\n");
494 } else {
495
496 if (!(of_chosen &&
497 get_property(of_chosen, "linux,iommu-off", NULL)))
498 setup_bus = cell_map_iommu();
499
500 if (setup_bus) {
501 pr_debug("%s: IOMMU mapping activated\n", __FUNCTION__);
502 ppc_md.iommu_dev_setup = iommu_dev_setup_null;
503 ppc_md.iommu_bus_setup = iommu_bus_setup;
504 } else {
505 pr_debug("%s: IOMMU mapping activated, "
506 "no device action necessary\n", __FUNCTION__);
507 /* Direct I/O, IOMMU off */
508 ppc_md.iommu_dev_setup = iommu_dev_setup_null;
509 ppc_md.iommu_bus_setup = iommu_bus_setup_null;
510 }
511 }
512
513 pci_dma_ops = cell_iommu_ops;
514}
diff --git a/arch/powerpc/platforms/cell/iommu.h b/arch/powerpc/platforms/cell/iommu.h
deleted file mode 100644
index 490d77abfe85..000000000000
--- a/arch/powerpc/platforms/cell/iommu.h
+++ /dev/null
@@ -1,65 +0,0 @@
1#ifndef CELL_IOMMU_H
2#define CELL_IOMMU_H
3
4/* some constants */
5enum {
6 /* segment table entries */
7 IOST_VALID_MASK = 0x8000000000000000ul,
8 IOST_TAG_MASK = 0x3000000000000000ul,
9 IOST_PT_BASE_MASK = 0x000003fffffff000ul,
10 IOST_NNPT_MASK = 0x0000000000000fe0ul,
11 IOST_PS_MASK = 0x000000000000000ful,
12
13 IOST_PS_4K = 0x1,
14 IOST_PS_64K = 0x3,
15 IOST_PS_1M = 0x5,
16 IOST_PS_16M = 0x7,
17
18 /* iopt tag register */
19 IOPT_VALID_MASK = 0x0000000200000000ul,
20 IOPT_TAG_MASK = 0x00000001fffffffful,
21
22 /* iopt cache register */
23 IOPT_PROT_MASK = 0xc000000000000000ul,
24 IOPT_PROT_NONE = 0x0000000000000000ul,
25 IOPT_PROT_READ = 0x4000000000000000ul,
26 IOPT_PROT_WRITE = 0x8000000000000000ul,
27 IOPT_PROT_RW = 0xc000000000000000ul,
28 IOPT_COHERENT = 0x2000000000000000ul,
29
30 IOPT_ORDER_MASK = 0x1800000000000000ul,
31 /* order access to same IOID/VC on same address */
32 IOPT_ORDER_ADDR = 0x0800000000000000ul,
33 /* similar, but only after a write access */
34 IOPT_ORDER_WRITES = 0x1000000000000000ul,
35 /* Order all accesses to same IOID/VC */
36 IOPT_ORDER_VC = 0x1800000000000000ul,
37
38 IOPT_RPN_MASK = 0x000003fffffff000ul,
39 IOPT_HINT_MASK = 0x0000000000000800ul,
40 IOPT_IOID_MASK = 0x00000000000007fful,
41
42 IOSTO_ENABLE = 0x8000000000000000ul,
43 IOSTO_ORIGIN = 0x000003fffffff000ul,
44 IOSTO_HW = 0x0000000000000800ul,
45 IOSTO_SW = 0x0000000000000400ul,
46
47 IOCMD_CONF_TE = 0x0000800000000000ul,
48
49 /* memory mapped registers */
50 IOC_PT_CACHE_DIR = 0x000,
51 IOC_ST_CACHE_DIR = 0x800,
52 IOC_PT_CACHE_REG = 0x910,
53 IOC_ST_ORIGIN = 0x918,
54 IOC_CONF = 0x930,
55
56 /* The high bit needs to be set on every DMA address,
57 only 2GB are addressable */
58 CELL_DMA_VALID = 0x80000000,
59 CELL_DMA_MASK = 0x7fffffff,
60};
61
62
63void cell_init_iommu(void);
64
65#endif
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index 9f2e4ed20a57..8c20f0fb8651 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -38,32 +38,25 @@
38#include "pervasive.h" 38#include "pervasive.h"
39#include "cbe_regs.h" 39#include "cbe_regs.h"
40 40
41static DEFINE_SPINLOCK(cbe_pervasive_lock); 41static void cbe_power_save(void)
42
43static void __init cbe_enable_pause_zero(void)
44{ 42{
45 unsigned long thread_switch_control; 43 unsigned long ctrl, thread_switch_control;
46 unsigned long temp_register;
47 struct cbe_pmd_regs __iomem *pregs;
48
49 spin_lock_irq(&cbe_pervasive_lock);
50 pregs = cbe_get_cpu_pmd_regs(smp_processor_id());
51 if (pregs == NULL)
52 goto out;
53 44
54 pr_debug("Power Management: CPU %d\n", smp_processor_id()); 45 /*
55 46 * We need to hard disable interrupts, but we also need to mark them
56 /* Enable Pause(0) control bit */ 47 * hard disabled in the PACA so that the local_irq_enable() done by
57 temp_register = in_be64(&pregs->pm_control); 48 * our caller upon return propertly hard enables.
49 */
50 hard_irq_disable();
51 get_paca()->hard_enabled = 0;
58 52
59 out_be64(&pregs->pm_control, 53 ctrl = mfspr(SPRN_CTRLF);
60 temp_register | CBE_PMD_PAUSE_ZERO_CONTROL);
61 54
62 /* Enable DEC and EE interrupt request */ 55 /* Enable DEC and EE interrupt request */
63 thread_switch_control = mfspr(SPRN_TSC_CELL); 56 thread_switch_control = mfspr(SPRN_TSC_CELL);
64 thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST; 57 thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
65 58
66 switch ((mfspr(SPRN_CTRLF) & CTRL_CT)) { 59 switch (ctrl & CTRL_CT) {
67 case CTRL_CT0: 60 case CTRL_CT0:
68 thread_switch_control |= TSC_CELL_DEC_ENABLE_0; 61 thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
69 break; 62 break;
@@ -75,58 +68,21 @@ static void __init cbe_enable_pause_zero(void)
75 __FUNCTION__); 68 __FUNCTION__);
76 break; 69 break;
77 } 70 }
78
79 mtspr(SPRN_TSC_CELL, thread_switch_control); 71 mtspr(SPRN_TSC_CELL, thread_switch_control);
80 72
81out: 73 /*
82 spin_unlock_irq(&cbe_pervasive_lock); 74 * go into low thread priority, medium priority will be
83} 75 * restored for us after wake-up.
84 76 */
85static void cbe_idle(void) 77 HMT_low();
86{
87 unsigned long ctrl;
88 78
89 /* Why do we do that on every idle ? Couldn't that be done once for 79 /*
90 * all or do we lose the state some way ? Also, the pm_control 80 * atomically disable thread execution and runlatch.
91 * register setting, that can't be set once at boot ? We really want 81 * External and Decrementer exceptions are still handled when the
92 * to move that away in order to implement a simple powersave 82 * thread is disabled but now enter in cbe_system_reset_exception()
93 */ 83 */
94 cbe_enable_pause_zero(); 84 ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
95 85 mtspr(SPRN_CTRLT, ctrl);
96 while (1) {
97 if (!need_resched()) {
98 local_irq_disable();
99 while (!need_resched()) {
100 /* go into low thread priority */
101 HMT_low();
102
103 /*
104 * atomically disable thread execution
105 * and runlatch.
106 * External and Decrementer exceptions
107 * are still handled when the thread
108 * is disabled but now enter in
109 * cbe_system_reset_exception()
110 */
111 ctrl = mfspr(SPRN_CTRLF);
112 ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
113 mtspr(SPRN_CTRLT, ctrl);
114 }
115 /* restore thread prio */
116 HMT_medium();
117 local_irq_enable();
118 }
119
120 /*
121 * turn runlatch on again before scheduling the
122 * process we just woke up
123 */
124 ppc64_runlatch_on();
125
126 preempt_enable_no_resched();
127 schedule();
128 preempt_disable();
129 }
130} 86}
131 87
132static int cbe_system_reset_exception(struct pt_regs *regs) 88static int cbe_system_reset_exception(struct pt_regs *regs)
@@ -158,9 +114,20 @@ static int cbe_system_reset_exception(struct pt_regs *regs)
158 114
159void __init cbe_pervasive_init(void) 115void __init cbe_pervasive_init(void)
160{ 116{
117 int cpu;
161 if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO)) 118 if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
162 return; 119 return;
163 120
164 ppc_md.idle_loop = cbe_idle; 121 for_each_possible_cpu(cpu) {
122 struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu);
123 if (!regs)
124 continue;
125
126 /* Enable Pause(0) control bit */
127 out_be64(&regs->pmcr, in_be64(&regs->pmcr) |
128 CBE_PMD_PAUSE_ZERO_CONTROL);
129 }
130
131 ppc_md.power_save = cbe_power_save;
165 ppc_md.system_reset_exception = cbe_system_reset_exception; 132 ppc_md.system_reset_exception = cbe_system_reset_exception;
166} 133}
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
new file mode 100644
index 000000000000..99c612025e8f
--- /dev/null
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -0,0 +1,429 @@
1/*
2 * Cell Broadband Engine Performance Monitor
3 *
4 * (C) Copyright IBM Corporation 2001,2006
5 *
6 * Author:
7 * David Erb (djerb@us.ibm.com)
8 * Kevin Corry (kevcorry@us.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <linux/interrupt.h>
26#include <linux/types.h>
27#include <asm/io.h>
28#include <asm/irq_regs.h>
29#include <asm/machdep.h>
30#include <asm/pmc.h>
31#include <asm/reg.h>
32#include <asm/spu.h>
33
34#include "cbe_regs.h"
35#include "interrupt.h"
36
37/*
38 * When writing to write-only mmio addresses, save a shadow copy. All of the
39 * registers are 32-bit, but stored in the upper-half of a 64-bit field in
40 * pmd_regs.
41 */
42
43#define WRITE_WO_MMIO(reg, x) \
44 do { \
45 u32 _x = (x); \
46 struct cbe_pmd_regs __iomem *pmd_regs; \
47 struct cbe_pmd_shadow_regs *shadow_regs; \
48 pmd_regs = cbe_get_cpu_pmd_regs(cpu); \
49 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
50 out_be64(&(pmd_regs->reg), (((u64)_x) << 32)); \
51 shadow_regs->reg = _x; \
52 } while (0)
53
54#define READ_SHADOW_REG(val, reg) \
55 do { \
56 struct cbe_pmd_shadow_regs *shadow_regs; \
57 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
58 (val) = shadow_regs->reg; \
59 } while (0)
60
61#define READ_MMIO_UPPER32(val, reg) \
62 do { \
63 struct cbe_pmd_regs __iomem *pmd_regs; \
64 pmd_regs = cbe_get_cpu_pmd_regs(cpu); \
65 (val) = (u32)(in_be64(&pmd_regs->reg) >> 32); \
66 } while (0)
67
68/*
69 * Physical counter registers.
70 * Each physical counter can act as one 32-bit counter or two 16-bit counters.
71 */
72
73u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr)
74{
75 u32 val_in_latch, val = 0;
76
77 if (phys_ctr < NR_PHYS_CTRS) {
78 READ_SHADOW_REG(val_in_latch, counter_value_in_latch);
79
80 /* Read the latch or the actual counter, whichever is newer. */
81 if (val_in_latch & (1 << phys_ctr)) {
82 READ_SHADOW_REG(val, pm_ctr[phys_ctr]);
83 } else {
84 READ_MMIO_UPPER32(val, pm_ctr[phys_ctr]);
85 }
86 }
87
88 return val;
89}
90EXPORT_SYMBOL_GPL(cbe_read_phys_ctr);
91
92void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val)
93{
94 struct cbe_pmd_shadow_regs *shadow_regs;
95 u32 pm_ctrl;
96
97 if (phys_ctr < NR_PHYS_CTRS) {
98 /* Writing to a counter only writes to a hardware latch.
99 * The new value is not propagated to the actual counter
100 * until the performance monitor is enabled.
101 */
102 WRITE_WO_MMIO(pm_ctr[phys_ctr], val);
103
104 pm_ctrl = cbe_read_pm(cpu, pm_control);
105 if (pm_ctrl & CBE_PM_ENABLE_PERF_MON) {
106 /* The counters are already active, so we need to
107 * rewrite the pm_control register to "re-enable"
108 * the PMU.
109 */
110 cbe_write_pm(cpu, pm_control, pm_ctrl);
111 } else {
112 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
113 shadow_regs->counter_value_in_latch |= (1 << phys_ctr);
114 }
115 }
116}
117EXPORT_SYMBOL_GPL(cbe_write_phys_ctr);
118
119/*
120 * "Logical" counter registers.
121 * These will read/write 16-bits or 32-bits depending on the
122 * current size of the counter. Counters 4 - 7 are always 16-bit.
123 */
124
125u32 cbe_read_ctr(u32 cpu, u32 ctr)
126{
127 u32 val;
128 u32 phys_ctr = ctr & (NR_PHYS_CTRS - 1);
129
130 val = cbe_read_phys_ctr(cpu, phys_ctr);
131
132 if (cbe_get_ctr_size(cpu, phys_ctr) == 16)
133 val = (ctr < NR_PHYS_CTRS) ? (val >> 16) : (val & 0xffff);
134
135 return val;
136}
137EXPORT_SYMBOL_GPL(cbe_read_ctr);
138
139void cbe_write_ctr(u32 cpu, u32 ctr, u32 val)
140{
141 u32 phys_ctr;
142 u32 phys_val;
143
144 phys_ctr = ctr & (NR_PHYS_CTRS - 1);
145
146 if (cbe_get_ctr_size(cpu, phys_ctr) == 16) {
147 phys_val = cbe_read_phys_ctr(cpu, phys_ctr);
148
149 if (ctr < NR_PHYS_CTRS)
150 val = (val << 16) | (phys_val & 0xffff);
151 else
152 val = (val & 0xffff) | (phys_val & 0xffff0000);
153 }
154
155 cbe_write_phys_ctr(cpu, phys_ctr, val);
156}
157EXPORT_SYMBOL_GPL(cbe_write_ctr);
158
159/*
160 * Counter-control registers.
161 * Each "logical" counter has a corresponding control register.
162 */
163
164u32 cbe_read_pm07_control(u32 cpu, u32 ctr)
165{
166 u32 pm07_control = 0;
167
168 if (ctr < NR_CTRS)
169 READ_SHADOW_REG(pm07_control, pm07_control[ctr]);
170
171 return pm07_control;
172}
173EXPORT_SYMBOL_GPL(cbe_read_pm07_control);
174
175void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val)
176{
177 if (ctr < NR_CTRS)
178 WRITE_WO_MMIO(pm07_control[ctr], val);
179}
180EXPORT_SYMBOL_GPL(cbe_write_pm07_control);
181
182/*
183 * Other PMU control registers. Most of these are write-only.
184 */
185
186u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg)
187{
188 u32 val = 0;
189
190 switch (reg) {
191 case group_control:
192 READ_SHADOW_REG(val, group_control);
193 break;
194
195 case debug_bus_control:
196 READ_SHADOW_REG(val, debug_bus_control);
197 break;
198
199 case trace_address:
200 READ_MMIO_UPPER32(val, trace_address);
201 break;
202
203 case ext_tr_timer:
204 READ_SHADOW_REG(val, ext_tr_timer);
205 break;
206
207 case pm_status:
208 READ_MMIO_UPPER32(val, pm_status);
209 break;
210
211 case pm_control:
212 READ_SHADOW_REG(val, pm_control);
213 break;
214
215 case pm_interval:
216 READ_SHADOW_REG(val, pm_interval);
217 break;
218
219 case pm_start_stop:
220 READ_SHADOW_REG(val, pm_start_stop);
221 break;
222 }
223
224 return val;
225}
226EXPORT_SYMBOL_GPL(cbe_read_pm);
227
228void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
229{
230 switch (reg) {
231 case group_control:
232 WRITE_WO_MMIO(group_control, val);
233 break;
234
235 case debug_bus_control:
236 WRITE_WO_MMIO(debug_bus_control, val);
237 break;
238
239 case trace_address:
240 WRITE_WO_MMIO(trace_address, val);
241 break;
242
243 case ext_tr_timer:
244 WRITE_WO_MMIO(ext_tr_timer, val);
245 break;
246
247 case pm_status:
248 WRITE_WO_MMIO(pm_status, val);
249 break;
250
251 case pm_control:
252 WRITE_WO_MMIO(pm_control, val);
253 break;
254
255 case pm_interval:
256 WRITE_WO_MMIO(pm_interval, val);
257 break;
258
259 case pm_start_stop:
260 WRITE_WO_MMIO(pm_start_stop, val);
261 break;
262 }
263}
264EXPORT_SYMBOL_GPL(cbe_write_pm);
265
266/*
267 * Get/set the size of a physical counter to either 16 or 32 bits.
268 */
269
270u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr)
271{
272 u32 pm_ctrl, size = 0;
273
274 if (phys_ctr < NR_PHYS_CTRS) {
275 pm_ctrl = cbe_read_pm(cpu, pm_control);
276 size = (pm_ctrl & CBE_PM_16BIT_CTR(phys_ctr)) ? 16 : 32;
277 }
278
279 return size;
280}
281EXPORT_SYMBOL_GPL(cbe_get_ctr_size);
282
283void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size)
284{
285 u32 pm_ctrl;
286
287 if (phys_ctr < NR_PHYS_CTRS) {
288 pm_ctrl = cbe_read_pm(cpu, pm_control);
289 switch (ctr_size) {
290 case 16:
291 pm_ctrl |= CBE_PM_16BIT_CTR(phys_ctr);
292 break;
293
294 case 32:
295 pm_ctrl &= ~CBE_PM_16BIT_CTR(phys_ctr);
296 break;
297 }
298 cbe_write_pm(cpu, pm_control, pm_ctrl);
299 }
300}
301EXPORT_SYMBOL_GPL(cbe_set_ctr_size);
302
303/*
304 * Enable/disable the entire performance monitoring unit.
305 * When we enable the PMU, all pending writes to counters get committed.
306 */
307
308void cbe_enable_pm(u32 cpu)
309{
310 struct cbe_pmd_shadow_regs *shadow_regs;
311 u32 pm_ctrl;
312
313 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
314 shadow_regs->counter_value_in_latch = 0;
315
316 pm_ctrl = cbe_read_pm(cpu, pm_control) | CBE_PM_ENABLE_PERF_MON;
317 cbe_write_pm(cpu, pm_control, pm_ctrl);
318}
319EXPORT_SYMBOL_GPL(cbe_enable_pm);
320
321void cbe_disable_pm(u32 cpu)
322{
323 u32 pm_ctrl;
324 pm_ctrl = cbe_read_pm(cpu, pm_control) & ~CBE_PM_ENABLE_PERF_MON;
325 cbe_write_pm(cpu, pm_control, pm_ctrl);
326}
327EXPORT_SYMBOL_GPL(cbe_disable_pm);
328
329/*
330 * Reading from the trace_buffer.
331 * The trace buffer is two 64-bit registers. Reading from
332 * the second half automatically increments the trace_address.
333 */
334
335void cbe_read_trace_buffer(u32 cpu, u64 *buf)
336{
337 struct cbe_pmd_regs __iomem *pmd_regs = cbe_get_cpu_pmd_regs(cpu);
338
339 *buf++ = in_be64(&pmd_regs->trace_buffer_0_63);
340 *buf++ = in_be64(&pmd_regs->trace_buffer_64_127);
341}
342EXPORT_SYMBOL_GPL(cbe_read_trace_buffer);
343
344/*
345 * Enabling/disabling interrupts for the entire performance monitoring unit.
346 */
347
348u32 cbe_query_pm_interrupts(u32 cpu)
349{
350 return cbe_read_pm(cpu, pm_status);
351}
352EXPORT_SYMBOL_GPL(cbe_query_pm_interrupts);
353
354u32 cbe_clear_pm_interrupts(u32 cpu)
355{
356 /* Reading pm_status clears the interrupt bits. */
357 return cbe_query_pm_interrupts(cpu);
358}
359EXPORT_SYMBOL_GPL(cbe_clear_pm_interrupts);
360
361void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
362{
363 /* Set which node and thread will handle the next interrupt. */
364 iic_set_interrupt_routing(cpu, thread, 0);
365
366 /* Enable the interrupt bits in the pm_status register. */
367 if (mask)
368 cbe_write_pm(cpu, pm_status, mask);
369}
370EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts);
371
372void cbe_disable_pm_interrupts(u32 cpu)
373{
374 cbe_clear_pm_interrupts(cpu);
375 cbe_write_pm(cpu, pm_status, 0);
376}
377EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts);
378
379static irqreturn_t cbe_pm_irq(int irq, void *dev_id)
380{
381 perf_irq(get_irq_regs());
382 return IRQ_HANDLED;
383}
384
385int __init cbe_init_pm_irq(void)
386{
387 unsigned int irq;
388 int rc, node;
389
390 for_each_node(node) {
391 irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
392 (node << IIC_IRQ_NODE_SHIFT));
393 if (irq == NO_IRQ) {
394 printk("ERROR: Unable to allocate irq for node %d\n",
395 node);
396 return -EINVAL;
397 }
398
399 rc = request_irq(irq, cbe_pm_irq,
400 IRQF_DISABLED, "cbe-pmu-0", NULL);
401 if (rc) {
402 printk("ERROR: Request for irq on node %d failed\n",
403 node);
404 return rc;
405 }
406 }
407
408 return 0;
409}
410arch_initcall(cbe_init_pm_irq);
411
412void cbe_sync_irq(int node)
413{
414 unsigned int irq;
415
416 irq = irq_find_mapping(NULL,
417 IIC_IRQ_IOEX_PMI
418 | (node << IIC_IRQ_NODE_SHIFT));
419
420 if (irq == NO_IRQ) {
421 printk(KERN_WARNING "ERROR, unable to get existing irq %d " \
422 "for node %d\n", irq, node);
423 return;
424 }
425
426 synchronize_irq(irq);
427}
428EXPORT_SYMBOL_GPL(cbe_sync_irq);
429
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 22c228a49c33..36989c2eee66 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -50,9 +50,10 @@
50#include <asm/spu.h> 50#include <asm/spu.h>
51#include <asm/spu_priv1.h> 51#include <asm/spu_priv1.h>
52#include <asm/udbg.h> 52#include <asm/udbg.h>
53#include <asm/mpic.h>
54#include <asm/of_platform.h>
53 55
54#include "interrupt.h" 56#include "interrupt.h"
55#include "iommu.h"
56#include "cbe_regs.h" 57#include "cbe_regs.h"
57#include "pervasive.h" 58#include "pervasive.h"
58#include "ras.h" 59#include "ras.h"
@@ -80,24 +81,72 @@ static void cell_progress(char *s, unsigned short hex)
80 printk("*** %04x : %s\n", hex, s ? s : ""); 81 printk("*** %04x : %s\n", hex, s ? s : "");
81} 82}
82 83
83static void __init cell_pcibios_fixup(void) 84static int __init cell_publish_devices(void)
84{ 85{
85 struct pci_dev *dev = NULL; 86 if (!machine_is(cell))
87 return 0;
88
89 /* Publish OF platform devices for southbridge IOs */
90 of_platform_bus_probe(NULL, NULL, NULL);
91
92 return 0;
93}
94device_initcall(cell_publish_devices);
95
96static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc)
97{
98 struct mpic *mpic = desc->handler_data;
99 unsigned int virq;
100
101 virq = mpic_get_one_irq(mpic);
102 if (virq != NO_IRQ)
103 generic_handle_irq(virq);
104 desc->chip->eoi(irq);
105}
86 106
87 for_each_pci_dev(dev) 107static void __init mpic_init_IRQ(void)
88 pci_read_irq_line(dev); 108{
109 struct device_node *dn;
110 struct mpic *mpic;
111 unsigned int virq;
112
113 for (dn = NULL;
114 (dn = of_find_node_by_name(dn, "interrupt-controller"));) {
115 if (!device_is_compatible(dn, "CBEA,platform-open-pic"))
116 continue;
117
118 /* The MPIC driver will get everything it needs from the
119 * device-tree, just pass 0 to all arguments
120 */
121 mpic = mpic_alloc(dn, 0, 0, 0, 0, " MPIC ");
122 if (mpic == NULL)
123 continue;
124 mpic_init(mpic);
125
126 virq = irq_of_parse_and_map(dn, 0);
127 if (virq == NO_IRQ)
128 continue;
129
130 printk(KERN_INFO "%s : hooking up to IRQ %d\n",
131 dn->full_name, virq);
132 set_irq_data(virq, mpic);
133 set_irq_chained_handler(virq, cell_mpic_cascade);
134 }
89} 135}
90 136
137
91static void __init cell_init_irq(void) 138static void __init cell_init_irq(void)
92{ 139{
93 iic_init_IRQ(); 140 iic_init_IRQ();
94 spider_init_IRQ(); 141 spider_init_IRQ();
142 mpic_init_IRQ();
95} 143}
96 144
97static void __init cell_setup_arch(void) 145static void __init cell_setup_arch(void)
98{ 146{
99#ifdef CONFIG_SPU_BASE 147#ifdef CONFIG_SPU_BASE
100 spu_priv1_ops = &spu_priv1_mmio_ops; 148 spu_priv1_ops = &spu_priv1_mmio_ops;
149 spu_management_ops = &spu_management_of_ops;
101#endif 150#endif
102 151
103 cbe_regs_init(); 152 cbe_regs_init();
@@ -109,7 +158,6 @@ static void __init cell_setup_arch(void)
109#ifdef CONFIG_SMP 158#ifdef CONFIG_SMP
110 smp_init_cell(); 159 smp_init_cell();
111#endif 160#endif
112
113 /* init to some ~sane value until calibrate_delay() runs */ 161 /* init to some ~sane value until calibrate_delay() runs */
114 loops_per_jiffy = 50000000; 162 loops_per_jiffy = 50000000;
115 163
@@ -129,19 +177,6 @@ static void __init cell_setup_arch(void)
129 mmio_nvram_init(); 177 mmio_nvram_init();
130} 178}
131 179
132/*
133 * Early initialization. Relocation is on but do not reference unbolted pages
134 */
135static void __init cell_init_early(void)
136{
137 DBG(" -> cell_init_early()\n");
138
139 cell_init_iommu();
140
141 DBG(" <- cell_init_early()\n");
142}
143
144
145static int __init cell_probe(void) 180static int __init cell_probe(void)
146{ 181{
147 unsigned long root = of_get_flat_dt_root(); 182 unsigned long root = of_get_flat_dt_root();
@@ -168,7 +203,6 @@ define_machine(cell) {
168 .name = "Cell", 203 .name = "Cell",
169 .probe = cell_probe, 204 .probe = cell_probe,
170 .setup_arch = cell_setup_arch, 205 .setup_arch = cell_setup_arch,
171 .init_early = cell_init_early,
172 .show_cpuinfo = cell_show_cpuinfo, 206 .show_cpuinfo = cell_show_cpuinfo,
173 .restart = rtas_restart, 207 .restart = rtas_restart,
174 .power_off = rtas_power_off, 208 .power_off = rtas_power_off,
@@ -180,7 +214,7 @@ define_machine(cell) {
180 .check_legacy_ioport = cell_check_legacy_ioport, 214 .check_legacy_ioport = cell_check_legacy_ioport,
181 .progress = cell_progress, 215 .progress = cell_progress,
182 .init_IRQ = cell_init_irq, 216 .init_IRQ = cell_init_irq,
183 .pcibios_fixup = cell_pcibios_fixup, 217 .pci_setup_phb = rtas_setup_phb,
184#ifdef CONFIG_KEXEC 218#ifdef CONFIG_KEXEC
185 .machine_kexec = default_machine_kexec, 219 .machine_kexec = default_machine_kexec,
186 .machine_kexec_prepare = default_machine_kexec_prepare, 220 .machine_kexec_prepare = default_machine_kexec_prepare,
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 7aa809d5a244..bd7bffc3ddd0 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -25,22 +25,17 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/poll.h>
30#include <linux/ptrace.h> 28#include <linux/ptrace.h>
31#include <linux/slab.h> 29#include <linux/slab.h>
32#include <linux/wait.h> 30#include <linux/wait.h>
33 31#include <linux/mm.h>
34#include <asm/firmware.h> 32#include <linux/io.h>
35#include <asm/io.h>
36#include <asm/prom.h>
37#include <linux/mutex.h> 33#include <linux/mutex.h>
38#include <asm/spu.h> 34#include <asm/spu.h>
39#include <asm/spu_priv1.h> 35#include <asm/spu_priv1.h>
40#include <asm/mmu_context.h> 36#include <asm/xmon.h>
41
42#include "interrupt.h"
43 37
38const struct spu_management_ops *spu_management_ops;
44const struct spu_priv1_ops *spu_priv1_ops; 39const struct spu_priv1_ops *spu_priv1_ops;
45 40
46EXPORT_SYMBOL_GPL(spu_priv1_ops); 41EXPORT_SYMBOL_GPL(spu_priv1_ops);
@@ -89,7 +84,30 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
89 printk("%s: invalid access during switch!\n", __func__); 84 printk("%s: invalid access during switch!\n", __func__);
90 return 1; 85 return 1;
91 } 86 }
92 if (!mm || (REGION_ID(ea) != USER_REGION_ID)) { 87 esid = (ea & ESID_MASK) | SLB_ESID_V;
88
89 switch(REGION_ID(ea)) {
90 case USER_REGION_ID:
91#ifdef CONFIG_HUGETLB_PAGE
92 if (in_hugepage_area(mm->context, ea))
93 llp = mmu_psize_defs[mmu_huge_psize].sllp;
94 else
95#endif
96 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
97 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
98 SLB_VSID_USER | llp;
99 break;
100 case VMALLOC_REGION_ID:
101 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
102 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
103 SLB_VSID_KERNEL | llp;
104 break;
105 case KERNEL_REGION_ID:
106 llp = mmu_psize_defs[mmu_linear_psize].sllp;
107 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
108 SLB_VSID_KERNEL | llp;
109 break;
110 default:
93 /* Future: support kernel segments so that drivers 111 /* Future: support kernel segments so that drivers
94 * can use SPUs. 112 * can use SPUs.
95 */ 113 */
@@ -97,16 +115,6 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
97 return 1; 115 return 1;
98 } 116 }
99 117
100 esid = (ea & ESID_MASK) | SLB_ESID_V;
101#ifdef CONFIG_HUGETLB_PAGE
102 if (in_hugepage_area(mm->context, ea))
103 llp = mmu_psize_defs[mmu_huge_psize].sllp;
104 else
105#endif
106 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
107 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
108 SLB_VSID_USER | llp;
109
110 out_be64(&priv2->slb_index_W, spu->slb_replace); 118 out_be64(&priv2->slb_index_W, spu->slb_replace);
111 out_be64(&priv2->slb_vsid_RW, vsid); 119 out_be64(&priv2->slb_vsid_RW, vsid);
112 out_be64(&priv2->slb_esid_RW, esid); 120 out_be64(&priv2->slb_esid_RW, esid);
@@ -320,6 +328,7 @@ static void spu_free_irqs(struct spu *spu)
320} 328}
321 329
322static struct list_head spu_list[MAX_NUMNODES]; 330static struct list_head spu_list[MAX_NUMNODES];
331static LIST_HEAD(spu_full_list);
323static DEFINE_MUTEX(spu_mutex); 332static DEFINE_MUTEX(spu_mutex);
324 333
325static void spu_init_channels(struct spu *spu) 334static void spu_init_channels(struct spu *spu)
@@ -364,8 +373,7 @@ struct spu *spu_alloc_node(int node)
364 if (!list_empty(&spu_list[node])) { 373 if (!list_empty(&spu_list[node])) {
365 spu = list_entry(spu_list[node].next, struct spu, list); 374 spu = list_entry(spu_list[node].next, struct spu, list);
366 list_del_init(&spu->list); 375 list_del_init(&spu->list);
367 pr_debug("Got SPU %x %d %d\n", 376 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
368 spu->isrc, spu->number, spu->node);
369 spu_init_channels(spu); 377 spu_init_channels(spu);
370 } 378 }
371 mutex_unlock(&spu_mutex); 379 mutex_unlock(&spu_mutex);
@@ -493,280 +501,65 @@ int spu_irq_class_1_bottom(struct spu *spu)
493 if (!error) { 501 if (!error) {
494 spu_restart_dma(spu); 502 spu_restart_dma(spu);
495 } else { 503 } else {
496 __spu_trap_invalid_dma(spu); 504 spu->dma_callback(spu, SPE_EVENT_SPE_DATA_STORAGE);
497 } 505 }
498 return ret; 506 return ret;
499} 507}
500 508
501static int __init find_spu_node_id(struct device_node *spe) 509struct sysdev_class spu_sysdev_class = {
502{ 510 set_kset_name("spu")
503 const unsigned int *id; 511};
504 struct device_node *cpu;
505 cpu = spe->parent->parent;
506 id = get_property(cpu, "node-id", NULL);
507 return id ? *id : 0;
508}
509
510static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
511 const char *prop)
512{
513 static DEFINE_MUTEX(add_spumem_mutex);
514
515 const struct address_prop {
516 unsigned long address;
517 unsigned int len;
518 } __attribute__((packed)) *p;
519 int proplen;
520
521 unsigned long start_pfn, nr_pages;
522 struct pglist_data *pgdata;
523 struct zone *zone;
524 int ret;
525
526 p = get_property(spe, prop, &proplen);
527 WARN_ON(proplen != sizeof (*p));
528
529 start_pfn = p->address >> PAGE_SHIFT;
530 nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
531
532 pgdata = NODE_DATA(spu->nid);
533 zone = pgdata->node_zones;
534
535 /* XXX rethink locking here */
536 mutex_lock(&add_spumem_mutex);
537 ret = __add_pages(zone, start_pfn, nr_pages);
538 mutex_unlock(&add_spumem_mutex);
539
540 return ret;
541}
542 512
543static void __iomem * __init map_spe_prop(struct spu *spu, 513int spu_add_sysdev_attr(struct sysdev_attribute *attr)
544 struct device_node *n, const char *name)
545{ 514{
546 const struct address_prop { 515 struct spu *spu;
547 unsigned long address; 516 mutex_lock(&spu_mutex);
548 unsigned int len;
549 } __attribute__((packed)) *prop;
550
551 const void *p;
552 int proplen;
553 void __iomem *ret = NULL;
554 int err = 0;
555
556 p = get_property(n, name, &proplen);
557 if (proplen != sizeof (struct address_prop))
558 return NULL;
559
560 prop = p;
561
562 err = cell_spuprop_present(spu, n, name);
563 if (err && (err != -EEXIST))
564 goto out;
565
566 ret = ioremap(prop->address, prop->len);
567
568 out:
569 return ret;
570}
571 517
572static void spu_unmap(struct spu *spu) 518 list_for_each_entry(spu, &spu_full_list, full_list)
573{ 519 sysdev_create_file(&spu->sysdev, attr);
574 iounmap(spu->priv2);
575 iounmap(spu->priv1);
576 iounmap(spu->problem);
577 iounmap((__force u8 __iomem *)spu->local_store);
578}
579 520
580/* This function shall be abstracted for HV platforms */ 521 mutex_unlock(&spu_mutex);
581static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np) 522 return 0;
582{
583 unsigned int isrc;
584 const u32 *tmp;
585
586 /* Get the interrupt source unit from the device-tree */
587 tmp = get_property(np, "isrc", NULL);
588 if (!tmp)
589 return -ENODEV;
590 isrc = tmp[0];
591
592 /* Add the node number */
593 isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
594 spu->isrc = isrc;
595
596 /* Now map interrupts of all 3 classes */
597 spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
598 spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
599 spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
600
601 /* Right now, we only fail if class 2 failed */
602 return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
603} 523}
524EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
604 525
605static int __init spu_map_device_old(struct spu *spu, struct device_node *node) 526int spu_add_sysdev_attr_group(struct attribute_group *attrs)
606{ 527{
607 const char *prop; 528 struct spu *spu;
608 int ret; 529 mutex_lock(&spu_mutex);
609
610 ret = -ENODEV;
611 spu->name = get_property(node, "name", NULL);
612 if (!spu->name)
613 goto out;
614
615 prop = get_property(node, "local-store", NULL);
616 if (!prop)
617 goto out;
618 spu->local_store_phys = *(unsigned long *)prop;
619
620 /* we use local store as ram, not io memory */
621 spu->local_store = (void __force *)
622 map_spe_prop(spu, node, "local-store");
623 if (!spu->local_store)
624 goto out;
625
626 prop = get_property(node, "problem", NULL);
627 if (!prop)
628 goto out_unmap;
629 spu->problem_phys = *(unsigned long *)prop;
630
631 spu->problem= map_spe_prop(spu, node, "problem");
632 if (!spu->problem)
633 goto out_unmap;
634
635 spu->priv1= map_spe_prop(spu, node, "priv1");
636 /* priv1 is not available on a hypervisor */
637
638 spu->priv2= map_spe_prop(spu, node, "priv2");
639 if (!spu->priv2)
640 goto out_unmap;
641 ret = 0;
642 goto out;
643
644out_unmap:
645 spu_unmap(spu);
646out:
647 return ret;
648}
649 530
650static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) 531 list_for_each_entry(spu, &spu_full_list, full_list)
651{ 532 sysfs_create_group(&spu->sysdev.kobj, attrs);
652 struct of_irq oirq;
653 int ret;
654 int i;
655 533
656 for (i=0; i < 3; i++) { 534 mutex_unlock(&spu_mutex);
657 ret = of_irq_map_one(np, i, &oirq);
658 if (ret) {
659 pr_debug("spu_new: failed to get irq %d\n", i);
660 goto err;
661 }
662 ret = -EINVAL;
663 pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0],
664 oirq.controller->full_name);
665 spu->irqs[i] = irq_create_of_mapping(oirq.controller,
666 oirq.specifier, oirq.size);
667 if (spu->irqs[i] == NO_IRQ) {
668 pr_debug("spu_new: failed to map it !\n");
669 goto err;
670 }
671 }
672 return 0; 535 return 0;
673
674err:
675 pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name);
676 for (; i >= 0; i--) {
677 if (spu->irqs[i] != NO_IRQ)
678 irq_dispose_mapping(spu->irqs[i]);
679 }
680 return ret;
681} 536}
537EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
682 538
683static int spu_map_resource(struct device_node *node, int nr,
684 void __iomem** virt, unsigned long *phys)
685{
686 struct resource resource = { };
687 int ret;
688
689 ret = of_address_to_resource(node, nr, &resource);
690 if (ret)
691 goto out;
692 539
693 if (phys) 540void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
694 *phys = resource.start;
695 *virt = ioremap(resource.start, resource.end - resource.start);
696 if (!*virt)
697 ret = -EINVAL;
698
699out:
700 return ret;
701}
702
703static int __init spu_map_device(struct spu *spu, struct device_node *node)
704{ 541{
705 int ret = -ENODEV; 542 struct spu *spu;
706 spu->name = get_property(node, "name", NULL); 543 mutex_lock(&spu_mutex);
707 if (!spu->name)
708 goto out;
709
710 ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
711 &spu->local_store_phys);
712 if (ret) {
713 pr_debug("spu_new: failed to map %s resource 0\n",
714 node->full_name);
715 goto out;
716 }
717 ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
718 &spu->problem_phys);
719 if (ret) {
720 pr_debug("spu_new: failed to map %s resource 1\n",
721 node->full_name);
722 goto out_unmap;
723 }
724 ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
725 NULL);
726 if (ret) {
727 pr_debug("spu_new: failed to map %s resource 2\n",
728 node->full_name);
729 goto out_unmap;
730 }
731
732 if (!firmware_has_feature(FW_FEATURE_LPAR))
733 ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1,
734 NULL);
735 if (ret) {
736 pr_debug("spu_new: failed to map %s resource 3\n",
737 node->full_name);
738 goto out_unmap;
739 }
740 pr_debug("spu_new: %s maps:\n", node->full_name);
741 pr_debug(" local store : 0x%016lx -> 0x%p\n",
742 spu->local_store_phys, spu->local_store);
743 pr_debug(" problem state : 0x%016lx -> 0x%p\n",
744 spu->problem_phys, spu->problem);
745 pr_debug(" priv2 : 0x%p\n", spu->priv2);
746 pr_debug(" priv1 : 0x%p\n", spu->priv1);
747 544
748 return 0; 545 list_for_each_entry(spu, &spu_full_list, full_list)
546 sysdev_remove_file(&spu->sysdev, attr);
749 547
750out_unmap: 548 mutex_unlock(&spu_mutex);
751 spu_unmap(spu);
752out:
753 pr_debug("failed to map spe %s: %d\n", spu->name, ret);
754 return ret;
755} 549}
550EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
756 551
757struct sysdev_class spu_sysdev_class = { 552void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
758 set_kset_name("spu")
759};
760
761static ssize_t spu_show_isrc(struct sys_device *sysdev, char *buf)
762{ 553{
763 struct spu *spu = container_of(sysdev, struct spu, sysdev); 554 struct spu *spu;
764 return sprintf(buf, "%d\n", spu->isrc); 555 mutex_lock(&spu_mutex);
765 556
766} 557 list_for_each_entry(spu, &spu_full_list, full_list)
767static SYSDEV_ATTR(isrc, 0400, spu_show_isrc, NULL); 558 sysfs_remove_group(&spu->sysdev.kobj, attrs);
768 559
769extern int attach_sysdev_to_node(struct sys_device *dev, int nid); 560 mutex_unlock(&spu_mutex);
561}
562EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
770 563
771static int spu_create_sysdev(struct spu *spu) 564static int spu_create_sysdev(struct spu *spu)
772{ 565{
@@ -781,21 +574,18 @@ static int spu_create_sysdev(struct spu *spu)
781 return ret; 574 return ret;
782 } 575 }
783 576
784 if (spu->isrc != 0) 577 sysfs_add_device_to_node(&spu->sysdev, spu->node);
785 sysdev_create_file(&spu->sysdev, &attr_isrc);
786 sysfs_add_device_to_node(&spu->sysdev, spu->nid);
787 578
788 return 0; 579 return 0;
789} 580}
790 581
791static void spu_destroy_sysdev(struct spu *spu) 582static void spu_destroy_sysdev(struct spu *spu)
792{ 583{
793 sysdev_remove_file(&spu->sysdev, &attr_isrc); 584 sysfs_remove_device_from_node(&spu->sysdev, spu->node);
794 sysfs_remove_device_from_node(&spu->sysdev, spu->nid);
795 sysdev_unregister(&spu->sysdev); 585 sysdev_unregister(&spu->sysdev);
796} 586}
797 587
798static int __init create_spu(struct device_node *spe) 588static int __init create_spu(void *data)
799{ 589{
800 struct spu *spu; 590 struct spu *spu;
801 int ret; 591 int ret;
@@ -806,57 +596,37 @@ static int __init create_spu(struct device_node *spe)
806 if (!spu) 596 if (!spu)
807 goto out; 597 goto out;
808 598
809 spu->node = find_spu_node_id(spe); 599 spin_lock_init(&spu->register_lock);
810 if (spu->node >= MAX_NUMNODES) { 600 mutex_lock(&spu_mutex);
811 printk(KERN_WARNING "SPE %s on node %d ignored," 601 spu->number = number++;
812 " node number too big\n", spe->full_name, spu->node); 602 mutex_unlock(&spu_mutex);
813 printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n"); 603
814 return -ENODEV; 604 ret = spu_create_spu(spu, data);
815 }
816 spu->nid = of_node_to_nid(spe);
817 if (spu->nid == -1)
818 spu->nid = 0;
819 605
820 ret = spu_map_device(spu, spe);
821 /* try old method */
822 if (ret)
823 ret = spu_map_device_old(spu, spe);
824 if (ret) 606 if (ret)
825 goto out_free; 607 goto out_free;
826 608
827 ret = spu_map_interrupts(spu, spe); 609 spu_mfc_sdr_setup(spu);
828 if (ret)
829 ret = spu_map_interrupts_old(spu, spe);
830 if (ret)
831 goto out_unmap;
832 spin_lock_init(&spu->register_lock);
833 spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
834 spu_mfc_sr1_set(spu, 0x33); 610 spu_mfc_sr1_set(spu, 0x33);
835 mutex_lock(&spu_mutex);
836
837 spu->number = number++;
838 ret = spu_request_irqs(spu); 611 ret = spu_request_irqs(spu);
839 if (ret) 612 if (ret)
840 goto out_unlock; 613 goto out_destroy;
841 614
842 ret = spu_create_sysdev(spu); 615 ret = spu_create_sysdev(spu);
843 if (ret) 616 if (ret)
844 goto out_free_irqs; 617 goto out_free_irqs;
845 618
619 mutex_lock(&spu_mutex);
846 list_add(&spu->list, &spu_list[spu->node]); 620 list_add(&spu->list, &spu_list[spu->node]);
621 list_add(&spu->full_list, &spu_full_list);
847 mutex_unlock(&spu_mutex); 622 mutex_unlock(&spu_mutex);
848 623
849 pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
850 spu->name, spu->isrc, spu->local_store,
851 spu->problem, spu->priv1, spu->priv2, spu->number);
852 goto out; 624 goto out;
853 625
854out_free_irqs: 626out_free_irqs:
855 spu_free_irqs(spu); 627 spu_free_irqs(spu);
856out_unlock: 628out_destroy:
857 mutex_unlock(&spu_mutex); 629 spu_destroy_spu(spu);
858out_unmap:
859 spu_unmap(spu);
860out_free: 630out_free:
861 kfree(spu); 631 kfree(spu);
862out: 632out:
@@ -866,10 +636,11 @@ out:
866static void destroy_spu(struct spu *spu) 636static void destroy_spu(struct spu *spu)
867{ 637{
868 list_del_init(&spu->list); 638 list_del_init(&spu->list);
639 list_del_init(&spu->full_list);
869 640
870 spu_destroy_sysdev(spu); 641 spu_destroy_sysdev(spu);
871 spu_free_irqs(spu); 642 spu_free_irqs(spu);
872 spu_unmap(spu); 643 spu_destroy_spu(spu);
873 kfree(spu); 644 kfree(spu);
874} 645}
875 646
@@ -890,9 +661,11 @@ module_exit(cleanup_spu_base);
890 661
891static int __init init_spu_base(void) 662static int __init init_spu_base(void)
892{ 663{
893 struct device_node *node;
894 int i, ret; 664 int i, ret;
895 665
666 if (!spu_management_ops)
667 return 0;
668
896 /* create sysdev class for spus */ 669 /* create sysdev class for spus */
897 ret = sysdev_class_register(&spu_sysdev_class); 670 ret = sysdev_class_register(&spu_sysdev_class);
898 if (ret) 671 if (ret)
@@ -901,17 +674,17 @@ static int __init init_spu_base(void)
901 for (i = 0; i < MAX_NUMNODES; i++) 674 for (i = 0; i < MAX_NUMNODES; i++)
902 INIT_LIST_HEAD(&spu_list[i]); 675 INIT_LIST_HEAD(&spu_list[i]);
903 676
904 ret = -ENODEV; 677 ret = spu_enumerate_spus(create_spu);
905 for (node = of_find_node_by_type(NULL, "spe"); 678
906 node; node = of_find_node_by_type(node, "spe")) { 679 if (ret) {
907 ret = create_spu(node); 680 printk(KERN_WARNING "%s: Error initializing spus\n",
908 if (ret) { 681 __FUNCTION__);
909 printk(KERN_WARNING "%s: Error initializing %s\n", 682 cleanup_spu_base();
910 __FUNCTION__, node->name); 683 return ret;
911 cleanup_spu_base();
912 break;
913 }
914 } 684 }
685
686 xmon_register_spus(&spu_full_list);
687
915 return ret; 688 return ret;
916} 689}
917module_init(init_spu_base); 690module_init(init_spu_base);
diff --git a/arch/powerpc/platforms/cell/spu_coredump.c b/arch/powerpc/platforms/cell/spu_coredump.c
new file mode 100644
index 000000000000..6915b418ee73
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_coredump.c
@@ -0,0 +1,81 @@
1/*
2 * SPU core dump code
3 *
4 * (C) Copyright 2006 IBM Corp.
5 *
6 * Author: Dwayne Grant McConnell <decimal@us.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/file.h>
24#include <linux/module.h>
25#include <linux/syscalls.h>
26
27#include <asm/spu.h>
28
29static struct spu_coredump_calls spu_coredump_calls;
30static DEFINE_MUTEX(spu_coredump_mutex);
31
32int arch_notes_size(void)
33{
34 long ret;
35 struct module *owner = spu_coredump_calls.owner;
36
37 ret = -ENOSYS;
38 mutex_lock(&spu_coredump_mutex);
39 if (owner && try_module_get(owner)) {
40 ret = spu_coredump_calls.arch_notes_size();
41 module_put(owner);
42 }
43 mutex_unlock(&spu_coredump_mutex);
44 return ret;
45}
46
47void arch_write_notes(struct file *file)
48{
49 struct module *owner = spu_coredump_calls.owner;
50
51 mutex_lock(&spu_coredump_mutex);
52 if (owner && try_module_get(owner)) {
53 spu_coredump_calls.arch_write_notes(file);
54 module_put(owner);
55 }
56 mutex_unlock(&spu_coredump_mutex);
57}
58
59int register_arch_coredump_calls(struct spu_coredump_calls *calls)
60{
61 if (spu_coredump_calls.owner)
62 return -EBUSY;
63
64 mutex_lock(&spu_coredump_mutex);
65 spu_coredump_calls.arch_notes_size = calls->arch_notes_size;
66 spu_coredump_calls.arch_write_notes = calls->arch_write_notes;
67 spu_coredump_calls.owner = calls->owner;
68 mutex_unlock(&spu_coredump_mutex);
69 return 0;
70}
71EXPORT_SYMBOL_GPL(register_arch_coredump_calls);
72
73void unregister_arch_coredump_calls(struct spu_coredump_calls *calls)
74{
75 BUG_ON(spu_coredump_calls.owner != calls->owner);
76
77 mutex_lock(&spu_coredump_mutex);
78 spu_coredump_calls.owner = NULL;
79 mutex_unlock(&spu_coredump_mutex);
80}
81EXPORT_SYMBOL_GPL(unregister_arch_coredump_calls);
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
index 71b69f0a1a48..a5de0430c56d 100644
--- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
@@ -18,120 +18,498 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 19 */
20 20
21#include <linux/interrupt.h>
22#include <linux/list.h>
21#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/wait.h>
27#include <linux/mm.h>
28#include <linux/io.h>
29#include <linux/mutex.h>
30#include <linux/device.h>
22 31
23#include <asm/io.h>
24#include <asm/spu.h> 32#include <asm/spu.h>
25#include <asm/spu_priv1.h> 33#include <asm/spu_priv1.h>
34#include <asm/firmware.h>
35#include <asm/prom.h>
26 36
27#include "interrupt.h" 37#include "interrupt.h"
38#include "spu_priv1_mmio.h"
39
40struct spu_pdata {
41 int nid;
42 struct device_node *devnode;
43 struct spu_priv1 __iomem *priv1;
44};
45
46static struct spu_pdata *spu_get_pdata(struct spu *spu)
47{
48 BUG_ON(!spu->pdata);
49 return spu->pdata;
50}
51
52struct device_node *spu_devnode(struct spu *spu)
53{
54 return spu_get_pdata(spu)->devnode;
55}
56
57EXPORT_SYMBOL_GPL(spu_devnode);
58
59static int __init find_spu_node_id(struct device_node *spe)
60{
61 const unsigned int *id;
62 struct device_node *cpu;
63 cpu = spe->parent->parent;
64 id = get_property(cpu, "node-id", NULL);
65 return id ? *id : 0;
66}
67
68static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
69 const char *prop)
70{
71 static DEFINE_MUTEX(add_spumem_mutex);
72
73 const struct address_prop {
74 unsigned long address;
75 unsigned int len;
76 } __attribute__((packed)) *p;
77 int proplen;
78
79 unsigned long start_pfn, nr_pages;
80 struct pglist_data *pgdata;
81 struct zone *zone;
82 int ret;
83
84 p = get_property(spe, prop, &proplen);
85 WARN_ON(proplen != sizeof (*p));
86
87 start_pfn = p->address >> PAGE_SHIFT;
88 nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
89
90 pgdata = NODE_DATA(spu_get_pdata(spu)->nid);
91 zone = pgdata->node_zones;
92
93 /* XXX rethink locking here */
94 mutex_lock(&add_spumem_mutex);
95 ret = __add_pages(zone, start_pfn, nr_pages);
96 mutex_unlock(&add_spumem_mutex);
97
98 return ret;
99}
100
101static void __iomem * __init map_spe_prop(struct spu *spu,
102 struct device_node *n, const char *name)
103{
104 const struct address_prop {
105 unsigned long address;
106 unsigned int len;
107 } __attribute__((packed)) *prop;
108
109 const void *p;
110 int proplen;
111 void __iomem *ret = NULL;
112 int err = 0;
113
114 p = get_property(n, name, &proplen);
115 if (proplen != sizeof (struct address_prop))
116 return NULL;
117
118 prop = p;
119
120 err = cell_spuprop_present(spu, n, name);
121 if (err && (err != -EEXIST))
122 goto out;
123
124 ret = ioremap(prop->address, prop->len);
125
126 out:
127 return ret;
128}
129
130static void spu_unmap(struct spu *spu)
131{
132 iounmap(spu->priv2);
133 iounmap(spu_get_pdata(spu)->priv1);
134 iounmap(spu->problem);
135 iounmap((__force u8 __iomem *)spu->local_store);
136}
137
138static int __init spu_map_interrupts_old(struct spu *spu,
139 struct device_node *np)
140{
141 unsigned int isrc;
142 const u32 *tmp;
143
144 /* Get the interrupt source unit from the device-tree */
145 tmp = get_property(np, "isrc", NULL);
146 if (!tmp)
147 return -ENODEV;
148 isrc = tmp[0];
149
150 /* Add the node number */
151 isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
152
153 /* Now map interrupts of all 3 classes */
154 spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
155 spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
156 spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
157
158 /* Right now, we only fail if class 2 failed */
159 return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
160}
161
162static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
163{
164 const char *prop;
165 int ret;
166
167 ret = -ENODEV;
168 spu->name = get_property(node, "name", NULL);
169 if (!spu->name)
170 goto out;
171
172 prop = get_property(node, "local-store", NULL);
173 if (!prop)
174 goto out;
175 spu->local_store_phys = *(unsigned long *)prop;
176
177 /* we use local store as ram, not io memory */
178 spu->local_store = (void __force *)
179 map_spe_prop(spu, node, "local-store");
180 if (!spu->local_store)
181 goto out;
182
183 prop = get_property(node, "problem", NULL);
184 if (!prop)
185 goto out_unmap;
186 spu->problem_phys = *(unsigned long *)prop;
187
188 spu->problem= map_spe_prop(spu, node, "problem");
189 if (!spu->problem)
190 goto out_unmap;
191
192 spu_get_pdata(spu)->priv1= map_spe_prop(spu, node, "priv1");
193
194 spu->priv2= map_spe_prop(spu, node, "priv2");
195 if (!spu->priv2)
196 goto out_unmap;
197 ret = 0;
198 goto out;
199
200out_unmap:
201 spu_unmap(spu);
202out:
203 return ret;
204}
205
206static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
207{
208 struct of_irq oirq;
209 int ret;
210 int i;
211
212 for (i=0; i < 3; i++) {
213 ret = of_irq_map_one(np, i, &oirq);
214 if (ret) {
215 pr_debug("spu_new: failed to get irq %d\n", i);
216 goto err;
217 }
218 ret = -EINVAL;
219 pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0],
220 oirq.controller->full_name);
221 spu->irqs[i] = irq_create_of_mapping(oirq.controller,
222 oirq.specifier, oirq.size);
223 if (spu->irqs[i] == NO_IRQ) {
224 pr_debug("spu_new: failed to map it !\n");
225 goto err;
226 }
227 }
228 return 0;
229
230err:
231 pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier,
232 spu->name);
233 for (; i >= 0; i--) {
234 if (spu->irqs[i] != NO_IRQ)
235 irq_dispose_mapping(spu->irqs[i]);
236 }
237 return ret;
238}
239
240static int spu_map_resource(struct device_node *node, int nr,
241 void __iomem** virt, unsigned long *phys)
242{
243 struct resource resource = { };
244 int ret;
245
246 ret = of_address_to_resource(node, nr, &resource);
247 if (ret)
248 goto out;
249
250 if (phys)
251 *phys = resource.start;
252 *virt = ioremap(resource.start, resource.end - resource.start);
253 if (!*virt)
254 ret = -EINVAL;
255
256out:
257 return ret;
258}
259
260static int __init spu_map_device(struct spu *spu, struct device_node *node)
261{
262 int ret = -ENODEV;
263 spu->name = get_property(node, "name", NULL);
264 if (!spu->name)
265 goto out;
266
267 ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
268 &spu->local_store_phys);
269 if (ret) {
270 pr_debug("spu_new: failed to map %s resource 0\n",
271 node->full_name);
272 goto out;
273 }
274 ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
275 &spu->problem_phys);
276 if (ret) {
277 pr_debug("spu_new: failed to map %s resource 1\n",
278 node->full_name);
279 goto out_unmap;
280 }
281 ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
282 NULL);
283 if (ret) {
284 pr_debug("spu_new: failed to map %s resource 2\n",
285 node->full_name);
286 goto out_unmap;
287 }
288 if (!firmware_has_feature(FW_FEATURE_LPAR))
289 ret = spu_map_resource(node, 3,
290 (void __iomem**)&spu_get_pdata(spu)->priv1, NULL);
291 if (ret) {
292 pr_debug("spu_new: failed to map %s resource 3\n",
293 node->full_name);
294 goto out_unmap;
295 }
296 pr_debug("spu_new: %s maps:\n", node->full_name);
297 pr_debug(" local store : 0x%016lx -> 0x%p\n",
298 spu->local_store_phys, spu->local_store);
299 pr_debug(" problem state : 0x%016lx -> 0x%p\n",
300 spu->problem_phys, spu->problem);
301 pr_debug(" priv2 : 0x%p\n", spu->priv2);
302 pr_debug(" priv1 : 0x%p\n",
303 spu_get_pdata(spu)->priv1);
304
305 return 0;
306
307out_unmap:
308 spu_unmap(spu);
309out:
310 pr_debug("failed to map spe %s: %d\n", spu->name, ret);
311 return ret;
312}
313
314static int __init of_enumerate_spus(int (*fn)(void *data))
315{
316 int ret;
317 struct device_node *node;
318
319 ret = -ENODEV;
320 for (node = of_find_node_by_type(NULL, "spe");
321 node; node = of_find_node_by_type(node, "spe")) {
322 ret = fn(node);
323 if (ret) {
324 printk(KERN_WARNING "%s: Error initializing %s\n",
325 __FUNCTION__, node->name);
326 break;
327 }
328 }
329 return ret;
330}
331
332static int __init of_create_spu(struct spu *spu, void *data)
333{
334 int ret;
335 struct device_node *spe = (struct device_node *)data;
336
337 spu->pdata = kzalloc(sizeof(struct spu_pdata),
338 GFP_KERNEL);
339 if (!spu->pdata) {
340 ret = -ENOMEM;
341 goto out;
342 }
343
344 spu->node = find_spu_node_id(spe);
345 if (spu->node >= MAX_NUMNODES) {
346 printk(KERN_WARNING "SPE %s on node %d ignored,"
347 " node number too big\n", spe->full_name, spu->node);
348 printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
349 ret = -ENODEV;
350 goto out_free;
351 }
352
353 spu_get_pdata(spu)->nid = of_node_to_nid(spe);
354 if (spu_get_pdata(spu)->nid == -1)
355 spu_get_pdata(spu)->nid = 0;
356
357 ret = spu_map_device(spu, spe);
358 /* try old method */
359 if (ret)
360 ret = spu_map_device_old(spu, spe);
361 if (ret)
362 goto out_free;
363
364 ret = spu_map_interrupts(spu, spe);
365 if (ret)
366 ret = spu_map_interrupts_old(spu, spe);
367 if (ret)
368 goto out_unmap;
369
370 spu_get_pdata(spu)->devnode = of_node_get(spe);
371
372 pr_debug(KERN_DEBUG "Using SPE %s %p %p %p %p %d\n", spu->name,
373 spu->local_store, spu->problem, spu_get_pdata(spu)->priv1,
374 spu->priv2, spu->number);
375 goto out;
376
377out_unmap:
378 spu_unmap(spu);
379out_free:
380 kfree(spu->pdata);
381 spu->pdata = NULL;
382out:
383 return ret;
384}
385
386static int of_destroy_spu(struct spu *spu)
387{
388 spu_unmap(spu);
389 of_node_put(spu_get_pdata(spu)->devnode);
390 kfree(spu->pdata);
391 spu->pdata = NULL;
392 return 0;
393}
394
395const struct spu_management_ops spu_management_of_ops = {
396 .enumerate_spus = of_enumerate_spus,
397 .create_spu = of_create_spu,
398 .destroy_spu = of_destroy_spu,
399};
28 400
29static void int_mask_and(struct spu *spu, int class, u64 mask) 401static void int_mask_and(struct spu *spu, int class, u64 mask)
30{ 402{
31 u64 old_mask; 403 u64 old_mask;
32 404
33 old_mask = in_be64(&spu->priv1->int_mask_RW[class]); 405 old_mask = in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
34 out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask); 406 out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class],
407 old_mask & mask);
35} 408}
36 409
37static void int_mask_or(struct spu *spu, int class, u64 mask) 410static void int_mask_or(struct spu *spu, int class, u64 mask)
38{ 411{
39 u64 old_mask; 412 u64 old_mask;
40 413
41 old_mask = in_be64(&spu->priv1->int_mask_RW[class]); 414 old_mask = in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
42 out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask); 415 out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class],
416 old_mask | mask);
43} 417}
44 418
45static void int_mask_set(struct spu *spu, int class, u64 mask) 419static void int_mask_set(struct spu *spu, int class, u64 mask)
46{ 420{
47 out_be64(&spu->priv1->int_mask_RW[class], mask); 421 out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class], mask);
48} 422}
49 423
50static u64 int_mask_get(struct spu *spu, int class) 424static u64 int_mask_get(struct spu *spu, int class)
51{ 425{
52 return in_be64(&spu->priv1->int_mask_RW[class]); 426 return in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
53} 427}
54 428
55static void int_stat_clear(struct spu *spu, int class, u64 stat) 429static void int_stat_clear(struct spu *spu, int class, u64 stat)
56{ 430{
57 out_be64(&spu->priv1->int_stat_RW[class], stat); 431 out_be64(&spu_get_pdata(spu)->priv1->int_stat_RW[class], stat);
58} 432}
59 433
60static u64 int_stat_get(struct spu *spu, int class) 434static u64 int_stat_get(struct spu *spu, int class)
61{ 435{
62 return in_be64(&spu->priv1->int_stat_RW[class]); 436 return in_be64(&spu_get_pdata(spu)->priv1->int_stat_RW[class]);
63} 437}
64 438
65static void cpu_affinity_set(struct spu *spu, int cpu) 439static void cpu_affinity_set(struct spu *spu, int cpu)
66{ 440{
67 u64 target = iic_get_target_id(cpu); 441 u64 target = iic_get_target_id(cpu);
68 u64 route = target << 48 | target << 32 | target << 16; 442 u64 route = target << 48 | target << 32 | target << 16;
69 out_be64(&spu->priv1->int_route_RW, route); 443 out_be64(&spu_get_pdata(spu)->priv1->int_route_RW, route);
70} 444}
71 445
72static u64 mfc_dar_get(struct spu *spu) 446static u64 mfc_dar_get(struct spu *spu)
73{ 447{
74 return in_be64(&spu->priv1->mfc_dar_RW); 448 return in_be64(&spu_get_pdata(spu)->priv1->mfc_dar_RW);
75} 449}
76 450
77static u64 mfc_dsisr_get(struct spu *spu) 451static u64 mfc_dsisr_get(struct spu *spu)
78{ 452{
79 return in_be64(&spu->priv1->mfc_dsisr_RW); 453 return in_be64(&spu_get_pdata(spu)->priv1->mfc_dsisr_RW);
80} 454}
81 455
82static void mfc_dsisr_set(struct spu *spu, u64 dsisr) 456static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
83{ 457{
84 out_be64(&spu->priv1->mfc_dsisr_RW, dsisr); 458 out_be64(&spu_get_pdata(spu)->priv1->mfc_dsisr_RW, dsisr);
85} 459}
86 460
87static void mfc_sdr_set(struct spu *spu, u64 sdr) 461static void mfc_sdr_setup(struct spu *spu)
88{ 462{
89 out_be64(&spu->priv1->mfc_sdr_RW, sdr); 463 out_be64(&spu_get_pdata(spu)->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
90} 464}
91 465
92static void mfc_sr1_set(struct spu *spu, u64 sr1) 466static void mfc_sr1_set(struct spu *spu, u64 sr1)
93{ 467{
94 out_be64(&spu->priv1->mfc_sr1_RW, sr1); 468 out_be64(&spu_get_pdata(spu)->priv1->mfc_sr1_RW, sr1);
95} 469}
96 470
97static u64 mfc_sr1_get(struct spu *spu) 471static u64 mfc_sr1_get(struct spu *spu)
98{ 472{
99 return in_be64(&spu->priv1->mfc_sr1_RW); 473 return in_be64(&spu_get_pdata(spu)->priv1->mfc_sr1_RW);
100} 474}
101 475
102static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id) 476static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
103{ 477{
104 out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id); 478 out_be64(&spu_get_pdata(spu)->priv1->mfc_tclass_id_RW, tclass_id);
105} 479}
106 480
107static u64 mfc_tclass_id_get(struct spu *spu) 481static u64 mfc_tclass_id_get(struct spu *spu)
108{ 482{
109 return in_be64(&spu->priv1->mfc_tclass_id_RW); 483 return in_be64(&spu_get_pdata(spu)->priv1->mfc_tclass_id_RW);
110} 484}
111 485
112static void tlb_invalidate(struct spu *spu) 486static void tlb_invalidate(struct spu *spu)
113{ 487{
114 out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul); 488 out_be64(&spu_get_pdata(spu)->priv1->tlb_invalidate_entry_W, 0ul);
115} 489}
116 490
117static void resource_allocation_groupID_set(struct spu *spu, u64 id) 491static void resource_allocation_groupID_set(struct spu *spu, u64 id)
118{ 492{
119 out_be64(&spu->priv1->resource_allocation_groupID_RW, id); 493 out_be64(&spu_get_pdata(spu)->priv1->resource_allocation_groupID_RW,
494 id);
120} 495}
121 496
122static u64 resource_allocation_groupID_get(struct spu *spu) 497static u64 resource_allocation_groupID_get(struct spu *spu)
123{ 498{
124 return in_be64(&spu->priv1->resource_allocation_groupID_RW); 499 return in_be64(
500 &spu_get_pdata(spu)->priv1->resource_allocation_groupID_RW);
125} 501}
126 502
127static void resource_allocation_enable_set(struct spu *spu, u64 enable) 503static void resource_allocation_enable_set(struct spu *spu, u64 enable)
128{ 504{
129 out_be64(&spu->priv1->resource_allocation_enable_RW, enable); 505 out_be64(&spu_get_pdata(spu)->priv1->resource_allocation_enable_RW,
506 enable);
130} 507}
131 508
132static u64 resource_allocation_enable_get(struct spu *spu) 509static u64 resource_allocation_enable_get(struct spu *spu)
133{ 510{
134 return in_be64(&spu->priv1->resource_allocation_enable_RW); 511 return in_be64(
512 &spu_get_pdata(spu)->priv1->resource_allocation_enable_RW);
135} 513}
136 514
137const struct spu_priv1_ops spu_priv1_mmio_ops = 515const struct spu_priv1_ops spu_priv1_mmio_ops =
@@ -146,7 +524,7 @@ const struct spu_priv1_ops spu_priv1_mmio_ops =
146 .mfc_dar_get = mfc_dar_get, 524 .mfc_dar_get = mfc_dar_get,
147 .mfc_dsisr_get = mfc_dsisr_get, 525 .mfc_dsisr_get = mfc_dsisr_get,
148 .mfc_dsisr_set = mfc_dsisr_set, 526 .mfc_dsisr_set = mfc_dsisr_set,
149 .mfc_sdr_set = mfc_sdr_set, 527 .mfc_sdr_setup = mfc_sdr_setup,
150 .mfc_sr1_set = mfc_sr1_set, 528 .mfc_sr1_set = mfc_sr1_set,
151 .mfc_sr1_get = mfc_sr1_get, 529 .mfc_sr1_get = mfc_sr1_get,
152 .mfc_tclass_id_set = mfc_tclass_id_set, 530 .mfc_tclass_id_set = mfc_tclass_id_set,
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.h b/arch/powerpc/platforms/cell/spu_priv1_mmio.h
new file mode 100644
index 000000000000..7b62bd1cc256
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.h
@@ -0,0 +1,26 @@
1/*
2 * spu hypervisor abstraction for direct hardware access.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef SPU_PRIV1_MMIO_H
22#define SPU_PRIV1_MMIO_H
23
24struct device_node *spu_devnode(struct spu *spu);
25
26#endif /* SPU_PRIV1_MMIO_H */
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index ecdfbb35f82e..472217d19faf 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -1,7 +1,7 @@
1obj-y += switch.o 1obj-y += switch.o
2 2
3obj-$(CONFIG_SPU_FS) += spufs.o 3obj-$(CONFIG_SPU_FS) += spufs.o
4spufs-y += inode.o file.o context.o syscalls.o 4spufs-y += inode.o file.o context.o syscalls.o coredump.o
5spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o 5spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
6 6
7# Rules to build switch.o with the help of SPU tool chain 7# Rules to build switch.o with the help of SPU tool chain
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
index 2d22cd59d6fc..1898f0d3a8b8 100644
--- a/arch/powerpc/platforms/cell/spufs/backing_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -36,6 +36,7 @@
36#include <asm/io.h> 36#include <asm/io.h>
37#include <asm/spu.h> 37#include <asm/spu.h>
38#include <asm/spu_csa.h> 38#include <asm/spu_csa.h>
39#include <asm/spu_info.h>
39#include <asm/mmu_context.h> 40#include <asm/mmu_context.h>
40#include "spufs.h" 41#include "spufs.h"
41 42
@@ -267,6 +268,11 @@ static char *spu_backing_get_ls(struct spu_context *ctx)
267 return ctx->csa.lscsa->ls; 268 return ctx->csa.lscsa->ls;
268} 269}
269 270
271static u32 spu_backing_runcntl_read(struct spu_context *ctx)
272{
273 return ctx->csa.prob.spu_runcntl_RW;
274}
275
270static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val) 276static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
271{ 277{
272 spin_lock(&ctx->csa.register_lock); 278 spin_lock(&ctx->csa.register_lock);
@@ -279,9 +285,26 @@ static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
279 spin_unlock(&ctx->csa.register_lock); 285 spin_unlock(&ctx->csa.register_lock);
280} 286}
281 287
282static void spu_backing_runcntl_stop(struct spu_context *ctx) 288static void spu_backing_master_start(struct spu_context *ctx)
289{
290 struct spu_state *csa = &ctx->csa;
291 u64 sr1;
292
293 spin_lock(&csa->register_lock);
294 sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
295 csa->priv1.mfc_sr1_RW = sr1;
296 spin_unlock(&csa->register_lock);
297}
298
299static void spu_backing_master_stop(struct spu_context *ctx)
283{ 300{
284 spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP); 301 struct spu_state *csa = &ctx->csa;
302 u64 sr1;
303
304 spin_lock(&csa->register_lock);
305 sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
306 csa->priv1.mfc_sr1_RW = sr1;
307 spin_unlock(&csa->register_lock);
285} 308}
286 309
287static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask, 310static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
@@ -345,8 +368,10 @@ struct spu_context_ops spu_backing_ops = {
345 .npc_write = spu_backing_npc_write, 368 .npc_write = spu_backing_npc_write,
346 .status_read = spu_backing_status_read, 369 .status_read = spu_backing_status_read,
347 .get_ls = spu_backing_get_ls, 370 .get_ls = spu_backing_get_ls,
371 .runcntl_read = spu_backing_runcntl_read,
348 .runcntl_write = spu_backing_runcntl_write, 372 .runcntl_write = spu_backing_runcntl_write,
349 .runcntl_stop = spu_backing_runcntl_stop, 373 .master_start = spu_backing_master_start,
374 .master_stop = spu_backing_master_stop,
350 .set_mfc_query = spu_backing_set_mfc_query, 375 .set_mfc_query = spu_backing_set_mfc_query,
351 .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus, 376 .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
352 .get_mfc_free_elements = spu_backing_get_mfc_free_elements, 377 .get_mfc_free_elements = spu_backing_get_mfc_free_elements,
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 034cf6af53a2..0870009f56db 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -120,6 +120,33 @@ void spu_unmap_mappings(struct spu_context *ctx)
120 unmap_mapping_range(ctx->signal2, 0, 0x4000, 1); 120 unmap_mapping_range(ctx->signal2, 0, 0x4000, 1);
121} 121}
122 122
123int spu_acquire_exclusive(struct spu_context *ctx)
124{
125 int ret = 0;
126
127 down_write(&ctx->state_sema);
128 /* ctx is about to be freed, can't acquire any more */
129 if (!ctx->owner) {
130 ret = -EINVAL;
131 goto out;
132 }
133
134 if (ctx->state == SPU_STATE_SAVED) {
135 ret = spu_activate(ctx, 0);
136 if (ret)
137 goto out;
138 ctx->state = SPU_STATE_RUNNABLE;
139 } else {
140 /* We need to exclude userspace access to the context. */
141 spu_unmap_mappings(ctx);
142 }
143
144out:
145 if (ret)
146 up_write(&ctx->state_sema);
147 return ret;
148}
149
123int spu_acquire_runnable(struct spu_context *ctx) 150int spu_acquire_runnable(struct spu_context *ctx)
124{ 151{
125 int ret = 0; 152 int ret = 0;
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
new file mode 100644
index 000000000000..26945c491f6b
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -0,0 +1,238 @@
1/*
2 * SPU core dump code
3 *
4 * (C) Copyright 2006 IBM Corp.
5 *
6 * Author: Dwayne Grant McConnell <decimal@us.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/elf.h>
24#include <linux/file.h>
25#include <linux/fs.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/syscalls.h>
29
30#include <asm/uaccess.h>
31
32#include "spufs.h"
33
34struct spufs_ctx_info {
35 struct list_head list;
36 int dfd;
37 int memsize; /* in bytes */
38 struct spu_context *ctx;
39};
40
41static LIST_HEAD(ctx_info_list);
42
43static ssize_t do_coredump_read(int num, struct spu_context *ctx, void __user *buffer,
44 size_t size, loff_t *off)
45{
46 u64 data;
47 int ret;
48
49 if (spufs_coredump_read[num].read)
50 return spufs_coredump_read[num].read(ctx, buffer, size, off);
51
52 data = spufs_coredump_read[num].get(ctx);
53 ret = copy_to_user(buffer, &data, 8);
54 return ret ? -EFAULT : 8;
55}
56
57/*
58 * These are the only things you should do on a core-file: use only these
59 * functions to write out all the necessary info.
60 */
61static int spufs_dump_write(struct file *file, const void *addr, int nr)
62{
63 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
64}
65
66static int spufs_dump_seek(struct file *file, loff_t off)
67{
68 if (file->f_op->llseek) {
69 if (file->f_op->llseek(file, off, 0) != off)
70 return 0;
71 } else
72 file->f_pos = off;
73 return 1;
74}
75
76static void spufs_fill_memsize(struct spufs_ctx_info *ctx_info)
77{
78 struct spu_context *ctx;
79 unsigned long long lslr;
80
81 ctx = ctx_info->ctx;
82 lslr = ctx->csa.priv2.spu_lslr_RW;
83 ctx_info->memsize = lslr + 1;
84}
85
86static int spufs_ctx_note_size(struct spufs_ctx_info *ctx_info)
87{
88 int dfd, memsize, i, sz, total = 0;
89 char *name;
90 char fullname[80];
91
92 dfd = ctx_info->dfd;
93 memsize = ctx_info->memsize;
94
95 for (i = 0; spufs_coredump_read[i].name; i++) {
96 name = spufs_coredump_read[i].name;
97 sz = spufs_coredump_read[i].size;
98
99 sprintf(fullname, "SPU/%d/%s", dfd, name);
100
101 total += sizeof(struct elf_note);
102 total += roundup(strlen(fullname) + 1, 4);
103 if (!strcmp(name, "mem"))
104 total += roundup(memsize, 4);
105 else
106 total += roundup(sz, 4);
107 }
108
109 return total;
110}
111
112static int spufs_add_one_context(struct file *file, int dfd)
113{
114 struct spu_context *ctx;
115 struct spufs_ctx_info *ctx_info;
116 int size;
117
118 ctx = SPUFS_I(file->f_dentry->d_inode)->i_ctx;
119 if (ctx->flags & SPU_CREATE_NOSCHED)
120 return 0;
121
122 ctx_info = kzalloc(sizeof(*ctx_info), GFP_KERNEL);
123 if (unlikely(!ctx_info))
124 return -ENOMEM;
125
126 ctx_info->dfd = dfd;
127 ctx_info->ctx = ctx;
128
129 spufs_fill_memsize(ctx_info);
130
131 size = spufs_ctx_note_size(ctx_info);
132 list_add(&ctx_info->list, &ctx_info_list);
133 return size;
134}
135
136/*
137 * The additional architecture-specific notes for Cell are various
138 * context files in the spu context.
139 *
140 * This function iterates over all open file descriptors and sees
141 * if they are a directory in spufs. In that case we use spufs
142 * internal functionality to dump them without needing to actually
143 * open the files.
144 */
145static int spufs_arch_notes_size(void)
146{
147 struct fdtable *fdt = files_fdtable(current->files);
148 int size = 0, fd;
149
150 for (fd = 0; fd < fdt->max_fdset && fd < fdt->max_fds; fd++) {
151 if (FD_ISSET(fd, fdt->open_fds)) {
152 struct file *file = fcheck(fd);
153
154 if (file && file->f_op == &spufs_context_fops) {
155 int rval = spufs_add_one_context(file, fd);
156 if (rval < 0)
157 break;
158 size += rval;
159 }
160 }
161 }
162
163 return size;
164}
165
166static void spufs_arch_write_note(struct spufs_ctx_info *ctx_info, int i,
167 struct file *file)
168{
169 struct spu_context *ctx;
170 loff_t pos = 0;
171 int sz, dfd, rc, total = 0;
172 const int bufsz = 4096;
173 char *name;
174 char fullname[80], *buf;
175 struct elf_note en;
176
177 buf = kmalloc(bufsz, GFP_KERNEL);
178 if (!buf)
179 return;
180
181 dfd = ctx_info->dfd;
182 name = spufs_coredump_read[i].name;
183
184 if (!strcmp(name, "mem"))
185 sz = ctx_info->memsize;
186 else
187 sz = spufs_coredump_read[i].size;
188
189 ctx = ctx_info->ctx;
190 if (!ctx) {
191 return;
192 }
193
194 sprintf(fullname, "SPU/%d/%s", dfd, name);
195 en.n_namesz = strlen(fullname) + 1;
196 en.n_descsz = sz;
197 en.n_type = NT_SPU;
198
199 if (!spufs_dump_write(file, &en, sizeof(en)))
200 return;
201 if (!spufs_dump_write(file, fullname, en.n_namesz))
202 return;
203 if (!spufs_dump_seek(file, roundup((unsigned long)file->f_pos, 4)))
204 return;
205
206 do {
207 rc = do_coredump_read(i, ctx, buf, bufsz, &pos);
208 if (rc > 0) {
209 if (!spufs_dump_write(file, buf, rc))
210 return;
211 total += rc;
212 }
213 } while (rc == bufsz && total < sz);
214
215 spufs_dump_seek(file, roundup((unsigned long)file->f_pos
216 - total + sz, 4));
217}
218
219static void spufs_arch_write_notes(struct file *file)
220{
221 int j;
222 struct spufs_ctx_info *ctx_info, *next;
223
224 list_for_each_entry_safe(ctx_info, next, &ctx_info_list, list) {
225 spu_acquire_saved(ctx_info->ctx);
226 for (j = 0; j < spufs_coredump_num_notes; j++)
227 spufs_arch_write_note(ctx_info, j, file);
228 spu_release(ctx_info->ctx);
229 list_del(&ctx_info->list);
230 kfree(ctx_info);
231 }
232}
233
234struct spu_coredump_calls spufs_coredump_calls = {
235 .arch_notes_size = spufs_arch_notes_size,
236 .arch_write_notes = spufs_arch_write_notes,
237 .owner = THIS_MODULE,
238};
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 533e2723e184..347eff56fcbd 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -32,13 +32,13 @@
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/semaphore.h> 33#include <asm/semaphore.h>
34#include <asm/spu.h> 34#include <asm/spu.h>
35#include <asm/spu_info.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
36 37
37#include "spufs.h" 38#include "spufs.h"
38 39
39#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000) 40#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
40 41
41
42static int 42static int
43spufs_mem_open(struct inode *inode, struct file *file) 43spufs_mem_open(struct inode *inode, struct file *file)
44{ 44{
@@ -51,18 +51,23 @@ spufs_mem_open(struct inode *inode, struct file *file)
51} 51}
52 52
53static ssize_t 53static ssize_t
54__spufs_mem_read(struct spu_context *ctx, char __user *buffer,
55 size_t size, loff_t *pos)
56{
57 char *local_store = ctx->ops->get_ls(ctx);
58 return simple_read_from_buffer(buffer, size, pos, local_store,
59 LS_SIZE);
60}
61
62static ssize_t
54spufs_mem_read(struct file *file, char __user *buffer, 63spufs_mem_read(struct file *file, char __user *buffer,
55 size_t size, loff_t *pos) 64 size_t size, loff_t *pos)
56{ 65{
57 struct spu_context *ctx = file->private_data;
58 char *local_store;
59 int ret; 66 int ret;
67 struct spu_context *ctx = file->private_data;
60 68
61 spu_acquire(ctx); 69 spu_acquire(ctx);
62 70 ret = __spufs_mem_read(ctx, buffer, size, pos);
63 local_store = ctx->ops->get_ls(ctx);
64 ret = simple_read_from_buffer(buffer, size, pos, local_store, LS_SIZE);
65
66 spu_release(ctx); 71 spu_release(ctx);
67 return ret; 72 return ret;
68} 73}
@@ -104,11 +109,11 @@ spufs_mem_mmap_nopage(struct vm_area_struct *vma,
104 109
105 if (ctx->state == SPU_STATE_SAVED) { 110 if (ctx->state == SPU_STATE_SAVED) {
106 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 111 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
107 & ~(_PAGE_NO_CACHE | _PAGE_GUARDED)); 112 & ~_PAGE_NO_CACHE);
108 page = vmalloc_to_page(ctx->csa.lscsa->ls + offset); 113 page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
109 } else { 114 } else {
110 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 115 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
111 | _PAGE_NO_CACHE | _PAGE_GUARDED); 116 | _PAGE_NO_CACHE);
112 page = pfn_to_page((ctx->spu->local_store_phys + offset) 117 page = pfn_to_page((ctx->spu->local_store_phys + offset)
113 >> PAGE_SHIFT); 118 >> PAGE_SHIFT);
114 } 119 }
@@ -131,7 +136,7 @@ spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
131 if (!(vma->vm_flags & VM_SHARED)) 136 if (!(vma->vm_flags & VM_SHARED))
132 return -EINVAL; 137 return -EINVAL;
133 138
134 /* FIXME: */ 139 vma->vm_flags |= VM_IO;
135 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 140 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
136 | _PAGE_NO_CACHE); 141 | _PAGE_NO_CACHE);
137 142
@@ -200,7 +205,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
200 if (!(vma->vm_flags & VM_SHARED)) 205 if (!(vma->vm_flags & VM_SHARED))
201 return -EINVAL; 206 return -EINVAL;
202 207
203 vma->vm_flags |= VM_RESERVED; 208 vma->vm_flags |= VM_IO;
204 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 209 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
205 | _PAGE_NO_CACHE | _PAGE_GUARDED); 210 | _PAGE_NO_CACHE | _PAGE_GUARDED);
206 211
@@ -261,18 +266,23 @@ spufs_regs_open(struct inode *inode, struct file *file)
261} 266}
262 267
263static ssize_t 268static ssize_t
269__spufs_regs_read(struct spu_context *ctx, char __user *buffer,
270 size_t size, loff_t *pos)
271{
272 struct spu_lscsa *lscsa = ctx->csa.lscsa;
273 return simple_read_from_buffer(buffer, size, pos,
274 lscsa->gprs, sizeof lscsa->gprs);
275}
276
277static ssize_t
264spufs_regs_read(struct file *file, char __user *buffer, 278spufs_regs_read(struct file *file, char __user *buffer,
265 size_t size, loff_t *pos) 279 size_t size, loff_t *pos)
266{ 280{
267 struct spu_context *ctx = file->private_data;
268 struct spu_lscsa *lscsa = ctx->csa.lscsa;
269 int ret; 281 int ret;
282 struct spu_context *ctx = file->private_data;
270 283
271 spu_acquire_saved(ctx); 284 spu_acquire_saved(ctx);
272 285 ret = __spufs_regs_read(ctx, buffer, size, pos);
273 ret = simple_read_from_buffer(buffer, size, pos,
274 lscsa->gprs, sizeof lscsa->gprs);
275
276 spu_release(ctx); 286 spu_release(ctx);
277 return ret; 287 return ret;
278} 288}
@@ -307,18 +317,23 @@ static struct file_operations spufs_regs_fops = {
307}; 317};
308 318
309static ssize_t 319static ssize_t
320__spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
321 size_t size, loff_t * pos)
322{
323 struct spu_lscsa *lscsa = ctx->csa.lscsa;
324 return simple_read_from_buffer(buffer, size, pos,
325 &lscsa->fpcr, sizeof(lscsa->fpcr));
326}
327
328static ssize_t
310spufs_fpcr_read(struct file *file, char __user * buffer, 329spufs_fpcr_read(struct file *file, char __user * buffer,
311 size_t size, loff_t * pos) 330 size_t size, loff_t * pos)
312{ 331{
313 struct spu_context *ctx = file->private_data;
314 struct spu_lscsa *lscsa = ctx->csa.lscsa;
315 int ret; 332 int ret;
333 struct spu_context *ctx = file->private_data;
316 334
317 spu_acquire_saved(ctx); 335 spu_acquire_saved(ctx);
318 336 ret = __spufs_fpcr_read(ctx, buffer, size, pos);
319 ret = simple_read_from_buffer(buffer, size, pos,
320 &lscsa->fpcr, sizeof(lscsa->fpcr));
321
322 spu_release(ctx); 337 spu_release(ctx);
323 return ret; 338 return ret;
324} 339}
@@ -718,23 +733,41 @@ static int spufs_signal1_open(struct inode *inode, struct file *file)
718 return nonseekable_open(inode, file); 733 return nonseekable_open(inode, file);
719} 734}
720 735
721static ssize_t spufs_signal1_read(struct file *file, char __user *buf, 736static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
722 size_t len, loff_t *pos) 737 size_t len, loff_t *pos)
723{ 738{
724 struct spu_context *ctx = file->private_data; 739 int ret = 0;
725 u32 data; 740 u32 data;
726 741
727 if (len < 4) 742 if (len < 4)
728 return -EINVAL; 743 return -EINVAL;
729 744
730 spu_acquire(ctx); 745 if (ctx->csa.spu_chnlcnt_RW[3]) {
731 data = ctx->ops->signal1_read(ctx); 746 data = ctx->csa.spu_chnldata_RW[3];
732 spu_release(ctx); 747 ret = 4;
748 }
749
750 if (!ret)
751 goto out;
733 752
734 if (copy_to_user(buf, &data, 4)) 753 if (copy_to_user(buf, &data, 4))
735 return -EFAULT; 754 return -EFAULT;
736 755
737 return 4; 756out:
757 return ret;
758}
759
760static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
761 size_t len, loff_t *pos)
762{
763 int ret;
764 struct spu_context *ctx = file->private_data;
765
766 spu_acquire_saved(ctx);
767 ret = __spufs_signal1_read(ctx, buf, len, pos);
768 spu_release(ctx);
769
770 return ret;
738} 771}
739 772
740static ssize_t spufs_signal1_write(struct file *file, const char __user *buf, 773static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
@@ -782,7 +815,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
782 if (!(vma->vm_flags & VM_SHARED)) 815 if (!(vma->vm_flags & VM_SHARED))
783 return -EINVAL; 816 return -EINVAL;
784 817
785 vma->vm_flags |= VM_RESERVED; 818 vma->vm_flags |= VM_IO;
786 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 819 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
787 | _PAGE_NO_CACHE | _PAGE_GUARDED); 820 | _PAGE_NO_CACHE | _PAGE_GUARDED);
788 821
@@ -807,25 +840,41 @@ static int spufs_signal2_open(struct inode *inode, struct file *file)
807 return nonseekable_open(inode, file); 840 return nonseekable_open(inode, file);
808} 841}
809 842
810static ssize_t spufs_signal2_read(struct file *file, char __user *buf, 843static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
811 size_t len, loff_t *pos) 844 size_t len, loff_t *pos)
812{ 845{
813 struct spu_context *ctx; 846 int ret = 0;
814 u32 data; 847 u32 data;
815 848
816 ctx = file->private_data;
817
818 if (len < 4) 849 if (len < 4)
819 return -EINVAL; 850 return -EINVAL;
820 851
821 spu_acquire(ctx); 852 if (ctx->csa.spu_chnlcnt_RW[4]) {
822 data = ctx->ops->signal2_read(ctx); 853 data = ctx->csa.spu_chnldata_RW[4];
823 spu_release(ctx); 854 ret = 4;
855 }
856
857 if (!ret)
858 goto out;
824 859
825 if (copy_to_user(buf, &data, 4)) 860 if (copy_to_user(buf, &data, 4))
826 return -EFAULT; 861 return -EFAULT;
827 862
828 return 4; 863out:
864 return ret;
865}
866
867static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
868 size_t len, loff_t *pos)
869{
870 struct spu_context *ctx = file->private_data;
871 int ret;
872
873 spu_acquire_saved(ctx);
874 ret = __spufs_signal2_read(ctx, buf, len, pos);
875 spu_release(ctx);
876
877 return ret;
829} 878}
830 879
831static ssize_t spufs_signal2_write(struct file *file, const char __user *buf, 880static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
@@ -874,8 +923,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
874 if (!(vma->vm_flags & VM_SHARED)) 923 if (!(vma->vm_flags & VM_SHARED))
875 return -EINVAL; 924 return -EINVAL;
876 925
877 /* FIXME: */ 926 vma->vm_flags |= VM_IO;
878 vma->vm_flags |= VM_RESERVED;
879 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 927 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
880 | _PAGE_NO_CACHE | _PAGE_GUARDED); 928 | _PAGE_NO_CACHE | _PAGE_GUARDED);
881 929
@@ -902,13 +950,19 @@ static void spufs_signal1_type_set(void *data, u64 val)
902 spu_release(ctx); 950 spu_release(ctx);
903} 951}
904 952
953static u64 __spufs_signal1_type_get(void *data)
954{
955 struct spu_context *ctx = data;
956 return ctx->ops->signal1_type_get(ctx);
957}
958
905static u64 spufs_signal1_type_get(void *data) 959static u64 spufs_signal1_type_get(void *data)
906{ 960{
907 struct spu_context *ctx = data; 961 struct spu_context *ctx = data;
908 u64 ret; 962 u64 ret;
909 963
910 spu_acquire(ctx); 964 spu_acquire(ctx);
911 ret = ctx->ops->signal1_type_get(ctx); 965 ret = __spufs_signal1_type_get(data);
912 spu_release(ctx); 966 spu_release(ctx);
913 967
914 return ret; 968 return ret;
@@ -925,13 +979,19 @@ static void spufs_signal2_type_set(void *data, u64 val)
925 spu_release(ctx); 979 spu_release(ctx);
926} 980}
927 981
982static u64 __spufs_signal2_type_get(void *data)
983{
984 struct spu_context *ctx = data;
985 return ctx->ops->signal2_type_get(ctx);
986}
987
928static u64 spufs_signal2_type_get(void *data) 988static u64 spufs_signal2_type_get(void *data)
929{ 989{
930 struct spu_context *ctx = data; 990 struct spu_context *ctx = data;
931 u64 ret; 991 u64 ret;
932 992
933 spu_acquire(ctx); 993 spu_acquire(ctx);
934 ret = ctx->ops->signal2_type_get(ctx); 994 ret = __spufs_signal2_type_get(data);
935 spu_release(ctx); 995 spu_release(ctx);
936 996
937 return ret; 997 return ret;
@@ -958,7 +1018,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
958 if (!(vma->vm_flags & VM_SHARED)) 1018 if (!(vma->vm_flags & VM_SHARED))
959 return -EINVAL; 1019 return -EINVAL;
960 1020
961 vma->vm_flags |= VM_RESERVED; 1021 vma->vm_flags |= VM_IO;
962 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1022 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
963 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1023 | _PAGE_NO_CACHE | _PAGE_GUARDED);
964 1024
@@ -1000,7 +1060,7 @@ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1000 if (!(vma->vm_flags & VM_SHARED)) 1060 if (!(vma->vm_flags & VM_SHARED))
1001 return -EINVAL; 1061 return -EINVAL;
1002 1062
1003 vma->vm_flags |= VM_RESERVED; 1063 vma->vm_flags |= VM_IO;
1004 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1064 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1005 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1065 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1006 1066
@@ -1041,7 +1101,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1041 if (!(vma->vm_flags & VM_SHARED)) 1101 if (!(vma->vm_flags & VM_SHARED))
1042 return -EINVAL; 1102 return -EINVAL;
1043 1103
1044 vma->vm_flags |= VM_RESERVED; 1104 vma->vm_flags |= VM_IO;
1045 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 1105 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1046 | _PAGE_NO_CACHE | _PAGE_GUARDED); 1106 | _PAGE_NO_CACHE | _PAGE_GUARDED);
1047 1107
@@ -1265,6 +1325,7 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1265 goto out; 1325 goto out;
1266 1326
1267 ctx->tagwait |= 1 << cmd.tag; 1327 ctx->tagwait |= 1 << cmd.tag;
1328 ret = size;
1268 1329
1269out: 1330out:
1270 return ret; 1331 return ret;
@@ -1360,7 +1421,8 @@ static u64 spufs_npc_get(void *data)
1360 spu_release(ctx); 1421 spu_release(ctx);
1361 return ret; 1422 return ret;
1362} 1423}
1363DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n") 1424DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1425 "0x%llx\n")
1364 1426
1365static void spufs_decr_set(void *data, u64 val) 1427static void spufs_decr_set(void *data, u64 val)
1366{ 1428{
@@ -1371,18 +1433,24 @@ static void spufs_decr_set(void *data, u64 val)
1371 spu_release(ctx); 1433 spu_release(ctx);
1372} 1434}
1373 1435
1374static u64 spufs_decr_get(void *data) 1436static u64 __spufs_decr_get(void *data)
1375{ 1437{
1376 struct spu_context *ctx = data; 1438 struct spu_context *ctx = data;
1377 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1439 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1440 return lscsa->decr.slot[0];
1441}
1442
1443static u64 spufs_decr_get(void *data)
1444{
1445 struct spu_context *ctx = data;
1378 u64 ret; 1446 u64 ret;
1379 spu_acquire_saved(ctx); 1447 spu_acquire_saved(ctx);
1380 ret = lscsa->decr.slot[0]; 1448 ret = __spufs_decr_get(data);
1381 spu_release(ctx); 1449 spu_release(ctx);
1382 return ret; 1450 return ret;
1383} 1451}
1384DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set, 1452DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1385 "%llx\n") 1453 "0x%llx\n")
1386 1454
1387static void spufs_decr_status_set(void *data, u64 val) 1455static void spufs_decr_status_set(void *data, u64 val)
1388{ 1456{
@@ -1393,62 +1461,76 @@ static void spufs_decr_status_set(void *data, u64 val)
1393 spu_release(ctx); 1461 spu_release(ctx);
1394} 1462}
1395 1463
1396static u64 spufs_decr_status_get(void *data) 1464static u64 __spufs_decr_status_get(void *data)
1397{ 1465{
1398 struct spu_context *ctx = data; 1466 struct spu_context *ctx = data;
1399 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1467 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1468 return lscsa->decr_status.slot[0];
1469}
1470
1471static u64 spufs_decr_status_get(void *data)
1472{
1473 struct spu_context *ctx = data;
1400 u64 ret; 1474 u64 ret;
1401 spu_acquire_saved(ctx); 1475 spu_acquire_saved(ctx);
1402 ret = lscsa->decr_status.slot[0]; 1476 ret = __spufs_decr_status_get(data);
1403 spu_release(ctx); 1477 spu_release(ctx);
1404 return ret; 1478 return ret;
1405} 1479}
1406DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get, 1480DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1407 spufs_decr_status_set, "%llx\n") 1481 spufs_decr_status_set, "0x%llx\n")
1408 1482
1409static void spufs_spu_tag_mask_set(void *data, u64 val) 1483static void spufs_event_mask_set(void *data, u64 val)
1410{ 1484{
1411 struct spu_context *ctx = data; 1485 struct spu_context *ctx = data;
1412 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1486 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1413 spu_acquire_saved(ctx); 1487 spu_acquire_saved(ctx);
1414 lscsa->tag_mask.slot[0] = (u32) val; 1488 lscsa->event_mask.slot[0] = (u32) val;
1415 spu_release(ctx); 1489 spu_release(ctx);
1416} 1490}
1417 1491
1418static u64 spufs_spu_tag_mask_get(void *data) 1492static u64 __spufs_event_mask_get(void *data)
1419{ 1493{
1420 struct spu_context *ctx = data; 1494 struct spu_context *ctx = data;
1421 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1495 struct spu_lscsa *lscsa = ctx->csa.lscsa;
1496 return lscsa->event_mask.slot[0];
1497}
1498
1499static u64 spufs_event_mask_get(void *data)
1500{
1501 struct spu_context *ctx = data;
1422 u64 ret; 1502 u64 ret;
1423 spu_acquire_saved(ctx); 1503 spu_acquire_saved(ctx);
1424 ret = lscsa->tag_mask.slot[0]; 1504 ret = __spufs_event_mask_get(data);
1425 spu_release(ctx); 1505 spu_release(ctx);
1426 return ret; 1506 return ret;
1427} 1507}
1428DEFINE_SIMPLE_ATTRIBUTE(spufs_spu_tag_mask_ops, spufs_spu_tag_mask_get, 1508DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1429 spufs_spu_tag_mask_set, "%llx\n") 1509 spufs_event_mask_set, "0x%llx\n")
1430 1510
1431static void spufs_event_mask_set(void *data, u64 val) 1511static u64 __spufs_event_status_get(void *data)
1432{ 1512{
1433 struct spu_context *ctx = data; 1513 struct spu_context *ctx = data;
1434 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1514 struct spu_state *state = &ctx->csa;
1435 spu_acquire_saved(ctx); 1515 u64 stat;
1436 lscsa->event_mask.slot[0] = (u32) val; 1516 stat = state->spu_chnlcnt_RW[0];
1437 spu_release(ctx); 1517 if (stat)
1518 return state->spu_chnldata_RW[0];
1519 return 0;
1438} 1520}
1439 1521
1440static u64 spufs_event_mask_get(void *data) 1522static u64 spufs_event_status_get(void *data)
1441{ 1523{
1442 struct spu_context *ctx = data; 1524 struct spu_context *ctx = data;
1443 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1525 u64 ret = 0;
1444 u64 ret; 1526
1445 spu_acquire_saved(ctx); 1527 spu_acquire_saved(ctx);
1446 ret = lscsa->event_mask.slot[0]; 1528 ret = __spufs_event_status_get(data);
1447 spu_release(ctx); 1529 spu_release(ctx);
1448 return ret; 1530 return ret;
1449} 1531}
1450DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get, 1532DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1451 spufs_event_mask_set, "%llx\n") 1533 NULL, "0x%llx\n")
1452 1534
1453static void spufs_srr0_set(void *data, u64 val) 1535static void spufs_srr0_set(void *data, u64 val)
1454{ 1536{
@@ -1470,7 +1552,7 @@ static u64 spufs_srr0_get(void *data)
1470 return ret; 1552 return ret;
1471} 1553}
1472DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set, 1554DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1473 "%llx\n") 1555 "0x%llx\n")
1474 1556
1475static u64 spufs_id_get(void *data) 1557static u64 spufs_id_get(void *data)
1476{ 1558{
@@ -1488,12 +1570,18 @@ static u64 spufs_id_get(void *data)
1488} 1570}
1489DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n") 1571DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
1490 1572
1491static u64 spufs_object_id_get(void *data) 1573static u64 __spufs_object_id_get(void *data)
1492{ 1574{
1493 struct spu_context *ctx = data; 1575 struct spu_context *ctx = data;
1494 return ctx->object_id; 1576 return ctx->object_id;
1495} 1577}
1496 1578
1579static u64 spufs_object_id_get(void *data)
1580{
1581 /* FIXME: Should there really be no locking here? */
1582 return __spufs_object_id_get(data);
1583}
1584
1497static void spufs_object_id_set(void *data, u64 id) 1585static void spufs_object_id_set(void *data, u64 id)
1498{ 1586{
1499 struct spu_context *ctx = data; 1587 struct spu_context *ctx = data;
@@ -1503,6 +1591,250 @@ static void spufs_object_id_set(void *data, u64 id)
1503DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get, 1591DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1504 spufs_object_id_set, "0x%llx\n"); 1592 spufs_object_id_set, "0x%llx\n");
1505 1593
1594static u64 __spufs_lslr_get(void *data)
1595{
1596 struct spu_context *ctx = data;
1597 return ctx->csa.priv2.spu_lslr_RW;
1598}
1599
1600static u64 spufs_lslr_get(void *data)
1601{
1602 struct spu_context *ctx = data;
1603 u64 ret;
1604
1605 spu_acquire_saved(ctx);
1606 ret = __spufs_lslr_get(data);
1607 spu_release(ctx);
1608
1609 return ret;
1610}
1611DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n")
1612
1613static int spufs_info_open(struct inode *inode, struct file *file)
1614{
1615 struct spufs_inode_info *i = SPUFS_I(inode);
1616 struct spu_context *ctx = i->i_ctx;
1617 file->private_data = ctx;
1618 return 0;
1619}
1620
1621static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
1622 char __user *buf, size_t len, loff_t *pos)
1623{
1624 u32 mbox_stat;
1625 u32 data;
1626
1627 mbox_stat = ctx->csa.prob.mb_stat_R;
1628 if (mbox_stat & 0x0000ff) {
1629 data = ctx->csa.prob.pu_mb_R;
1630 }
1631
1632 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1633}
1634
1635static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1636 size_t len, loff_t *pos)
1637{
1638 int ret;
1639 struct spu_context *ctx = file->private_data;
1640
1641 if (!access_ok(VERIFY_WRITE, buf, len))
1642 return -EFAULT;
1643
1644 spu_acquire_saved(ctx);
1645 spin_lock(&ctx->csa.register_lock);
1646 ret = __spufs_mbox_info_read(ctx, buf, len, pos);
1647 spin_unlock(&ctx->csa.register_lock);
1648 spu_release(ctx);
1649
1650 return ret;
1651}
1652
1653static struct file_operations spufs_mbox_info_fops = {
1654 .open = spufs_info_open,
1655 .read = spufs_mbox_info_read,
1656 .llseek = generic_file_llseek,
1657};
1658
1659static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
1660 char __user *buf, size_t len, loff_t *pos)
1661{
1662 u32 ibox_stat;
1663 u32 data;
1664
1665 ibox_stat = ctx->csa.prob.mb_stat_R;
1666 if (ibox_stat & 0xff0000) {
1667 data = ctx->csa.priv2.puint_mb_R;
1668 }
1669
1670 return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1671}
1672
1673static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1674 size_t len, loff_t *pos)
1675{
1676 struct spu_context *ctx = file->private_data;
1677 int ret;
1678
1679 if (!access_ok(VERIFY_WRITE, buf, len))
1680 return -EFAULT;
1681
1682 spu_acquire_saved(ctx);
1683 spin_lock(&ctx->csa.register_lock);
1684 ret = __spufs_ibox_info_read(ctx, buf, len, pos);
1685 spin_unlock(&ctx->csa.register_lock);
1686 spu_release(ctx);
1687
1688 return ret;
1689}
1690
1691static struct file_operations spufs_ibox_info_fops = {
1692 .open = spufs_info_open,
1693 .read = spufs_ibox_info_read,
1694 .llseek = generic_file_llseek,
1695};
1696
1697static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
1698 char __user *buf, size_t len, loff_t *pos)
1699{
1700 int i, cnt;
1701 u32 data[4];
1702 u32 wbox_stat;
1703
1704 wbox_stat = ctx->csa.prob.mb_stat_R;
1705 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
1706 for (i = 0; i < cnt; i++) {
1707 data[i] = ctx->csa.spu_mailbox_data[i];
1708 }
1709
1710 return simple_read_from_buffer(buf, len, pos, &data,
1711 cnt * sizeof(u32));
1712}
1713
1714static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
1715 size_t len, loff_t *pos)
1716{
1717 struct spu_context *ctx = file->private_data;
1718 int ret;
1719
1720 if (!access_ok(VERIFY_WRITE, buf, len))
1721 return -EFAULT;
1722
1723 spu_acquire_saved(ctx);
1724 spin_lock(&ctx->csa.register_lock);
1725 ret = __spufs_wbox_info_read(ctx, buf, len, pos);
1726 spin_unlock(&ctx->csa.register_lock);
1727 spu_release(ctx);
1728
1729 return ret;
1730}
1731
1732static struct file_operations spufs_wbox_info_fops = {
1733 .open = spufs_info_open,
1734 .read = spufs_wbox_info_read,
1735 .llseek = generic_file_llseek,
1736};
1737
1738static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
1739 char __user *buf, size_t len, loff_t *pos)
1740{
1741 struct spu_dma_info info;
1742 struct mfc_cq_sr *qp, *spuqp;
1743 int i;
1744
1745 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
1746 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
1747 info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
1748 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
1749 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
1750 for (i = 0; i < 16; i++) {
1751 qp = &info.dma_info_command_data[i];
1752 spuqp = &ctx->csa.priv2.spuq[i];
1753
1754 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
1755 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
1756 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
1757 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
1758 }
1759
1760 return simple_read_from_buffer(buf, len, pos, &info,
1761 sizeof info);
1762}
1763
1764static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
1765 size_t len, loff_t *pos)
1766{
1767 struct spu_context *ctx = file->private_data;
1768 int ret;
1769
1770 if (!access_ok(VERIFY_WRITE, buf, len))
1771 return -EFAULT;
1772
1773 spu_acquire_saved(ctx);
1774 spin_lock(&ctx->csa.register_lock);
1775 ret = __spufs_dma_info_read(ctx, buf, len, pos);
1776 spin_unlock(&ctx->csa.register_lock);
1777 spu_release(ctx);
1778
1779 return ret;
1780}
1781
1782static struct file_operations spufs_dma_info_fops = {
1783 .open = spufs_info_open,
1784 .read = spufs_dma_info_read,
1785};
1786
1787static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
1788 char __user *buf, size_t len, loff_t *pos)
1789{
1790 struct spu_proxydma_info info;
1791 struct mfc_cq_sr *qp, *puqp;
1792 int ret = sizeof info;
1793 int i;
1794
1795 if (len < ret)
1796 return -EINVAL;
1797
1798 if (!access_ok(VERIFY_WRITE, buf, len))
1799 return -EFAULT;
1800
1801 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
1802 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
1803 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
1804 for (i = 0; i < 8; i++) {
1805 qp = &info.proxydma_info_command_data[i];
1806 puqp = &ctx->csa.priv2.puq[i];
1807
1808 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
1809 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
1810 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
1811 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
1812 }
1813
1814 return simple_read_from_buffer(buf, len, pos, &info,
1815 sizeof info);
1816}
1817
1818static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
1819 size_t len, loff_t *pos)
1820{
1821 struct spu_context *ctx = file->private_data;
1822 int ret;
1823
1824 spu_acquire_saved(ctx);
1825 spin_lock(&ctx->csa.register_lock);
1826 ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
1827 spin_unlock(&ctx->csa.register_lock);
1828 spu_release(ctx);
1829
1830 return ret;
1831}
1832
1833static struct file_operations spufs_proxydma_info_fops = {
1834 .open = spufs_info_open,
1835 .read = spufs_proxydma_info_read,
1836};
1837
1506struct tree_descr spufs_dir_contents[] = { 1838struct tree_descr spufs_dir_contents[] = {
1507 { "mem", &spufs_mem_fops, 0666, }, 1839 { "mem", &spufs_mem_fops, 0666, },
1508 { "regs", &spufs_regs_fops, 0666, }, 1840 { "regs", &spufs_regs_fops, 0666, },
@@ -1516,18 +1848,70 @@ struct tree_descr spufs_dir_contents[] = {
1516 { "signal2", &spufs_signal2_fops, 0666, }, 1848 { "signal2", &spufs_signal2_fops, 0666, },
1517 { "signal1_type", &spufs_signal1_type, 0666, }, 1849 { "signal1_type", &spufs_signal1_type, 0666, },
1518 { "signal2_type", &spufs_signal2_type, 0666, }, 1850 { "signal2_type", &spufs_signal2_type, 0666, },
1519 { "mss", &spufs_mss_fops, 0666, },
1520 { "mfc", &spufs_mfc_fops, 0666, },
1521 { "cntl", &spufs_cntl_fops, 0666, }, 1851 { "cntl", &spufs_cntl_fops, 0666, },
1522 { "npc", &spufs_npc_ops, 0666, },
1523 { "fpcr", &spufs_fpcr_fops, 0666, }, 1852 { "fpcr", &spufs_fpcr_fops, 0666, },
1853 { "lslr", &spufs_lslr_ops, 0444, },
1854 { "mfc", &spufs_mfc_fops, 0666, },
1855 { "mss", &spufs_mss_fops, 0666, },
1856 { "npc", &spufs_npc_ops, 0666, },
1857 { "srr0", &spufs_srr0_ops, 0666, },
1524 { "decr", &spufs_decr_ops, 0666, }, 1858 { "decr", &spufs_decr_ops, 0666, },
1525 { "decr_status", &spufs_decr_status_ops, 0666, }, 1859 { "decr_status", &spufs_decr_status_ops, 0666, },
1526 { "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
1527 { "event_mask", &spufs_event_mask_ops, 0666, }, 1860 { "event_mask", &spufs_event_mask_ops, 0666, },
1528 { "srr0", &spufs_srr0_ops, 0666, }, 1861 { "event_status", &spufs_event_status_ops, 0444, },
1862 { "psmap", &spufs_psmap_fops, 0666, },
1863 { "phys-id", &spufs_id_ops, 0666, },
1864 { "object-id", &spufs_object_id_ops, 0666, },
1865 { "mbox_info", &spufs_mbox_info_fops, 0444, },
1866 { "ibox_info", &spufs_ibox_info_fops, 0444, },
1867 { "wbox_info", &spufs_wbox_info_fops, 0444, },
1868 { "dma_info", &spufs_dma_info_fops, 0444, },
1869 { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
1870 {},
1871};
1872
1873struct tree_descr spufs_dir_nosched_contents[] = {
1874 { "mem", &spufs_mem_fops, 0666, },
1875 { "mbox", &spufs_mbox_fops, 0444, },
1876 { "ibox", &spufs_ibox_fops, 0444, },
1877 { "wbox", &spufs_wbox_fops, 0222, },
1878 { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1879 { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1880 { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1881 { "signal1", &spufs_signal1_fops, 0666, },
1882 { "signal2", &spufs_signal2_fops, 0666, },
1883 { "signal1_type", &spufs_signal1_type, 0666, },
1884 { "signal2_type", &spufs_signal2_type, 0666, },
1885 { "mss", &spufs_mss_fops, 0666, },
1886 { "mfc", &spufs_mfc_fops, 0666, },
1887 { "cntl", &spufs_cntl_fops, 0666, },
1888 { "npc", &spufs_npc_ops, 0666, },
1529 { "psmap", &spufs_psmap_fops, 0666, }, 1889 { "psmap", &spufs_psmap_fops, 0666, },
1530 { "phys-id", &spufs_id_ops, 0666, }, 1890 { "phys-id", &spufs_id_ops, 0666, },
1531 { "object-id", &spufs_object_id_ops, 0666, }, 1891 { "object-id", &spufs_object_id_ops, 0666, },
1532 {}, 1892 {},
1533}; 1893};
1894
1895struct spufs_coredump_reader spufs_coredump_read[] = {
1896 { "regs", __spufs_regs_read, NULL, 128 * 16 },
1897 { "fpcr", __spufs_fpcr_read, NULL, 16 },
1898 { "lslr", NULL, __spufs_lslr_get, 11 },
1899 { "decr", NULL, __spufs_decr_get, 11 },
1900 { "decr_status", NULL, __spufs_decr_status_get, 11 },
1901 { "mem", __spufs_mem_read, NULL, 256 * 1024, },
1902 { "signal1", __spufs_signal1_read, NULL, 4 },
1903 { "signal1_type", NULL, __spufs_signal1_type_get, 2 },
1904 { "signal2", __spufs_signal2_read, NULL, 4 },
1905 { "signal2_type", NULL, __spufs_signal2_type_get, 2 },
1906 { "event_mask", NULL, __spufs_event_mask_get, 8 },
1907 { "event_status", NULL, __spufs_event_status_get, 8 },
1908 { "mbox_info", __spufs_mbox_info_read, NULL, 4 },
1909 { "ibox_info", __spufs_ibox_info_read, NULL, 4 },
1910 { "wbox_info", __spufs_wbox_info_read, NULL, 16 },
1911 { "dma_info", __spufs_dma_info_read, NULL, 69 * 8 },
1912 { "proxydma_info", __spufs_proxydma_info_read, NULL, 35 * 8 },
1913 { "object-id", NULL, __spufs_object_id_get, 19 },
1914 { },
1915};
1916int spufs_coredump_num_notes = ARRAY_SIZE(spufs_coredump_read) - 1;
1917
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
index d805ffed892d..ae42e03b8c86 100644
--- a/arch/powerpc/platforms/cell/spufs/hw_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -135,21 +135,11 @@ static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
135 return ret; 135 return ret;
136} 136}
137 137
138static u32 spu_hw_signal1_read(struct spu_context *ctx)
139{
140 return in_be32(&ctx->spu->problem->signal_notify1);
141}
142
143static void spu_hw_signal1_write(struct spu_context *ctx, u32 data) 138static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
144{ 139{
145 out_be32(&ctx->spu->problem->signal_notify1, data); 140 out_be32(&ctx->spu->problem->signal_notify1, data);
146} 141}
147 142
148static u32 spu_hw_signal2_read(struct spu_context *ctx)
149{
150 return in_be32(&ctx->spu->problem->signal_notify2);
151}
152
153static void spu_hw_signal2_write(struct spu_context *ctx, u32 data) 143static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
154{ 144{
155 out_be32(&ctx->spu->problem->signal_notify2, data); 145 out_be32(&ctx->spu->problem->signal_notify2, data);
@@ -217,21 +207,42 @@ static char *spu_hw_get_ls(struct spu_context *ctx)
217 return ctx->spu->local_store; 207 return ctx->spu->local_store;
218} 208}
219 209
220static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val) 210static u32 spu_hw_runcntl_read(struct spu_context *ctx)
221{ 211{
222 eieio(); 212 return in_be32(&ctx->spu->problem->spu_runcntl_RW);
223 out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
224} 213}
225 214
226static void spu_hw_runcntl_stop(struct spu_context *ctx) 215static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
227{ 216{
228 spin_lock_irq(&ctx->spu->register_lock); 217 spin_lock_irq(&ctx->spu->register_lock);
229 out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP); 218 if (val & SPU_RUNCNTL_ISOLATE)
230 while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING) 219 out_be64(&ctx->spu->priv2->spu_privcntl_RW, 4LL);
231 cpu_relax(); 220 out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
232 spin_unlock_irq(&ctx->spu->register_lock); 221 spin_unlock_irq(&ctx->spu->register_lock);
233} 222}
234 223
224static void spu_hw_master_start(struct spu_context *ctx)
225{
226 struct spu *spu = ctx->spu;
227 u64 sr1;
228
229 spin_lock_irq(&spu->register_lock);
230 sr1 = spu_mfc_sr1_get(spu) | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
231 spu_mfc_sr1_set(spu, sr1);
232 spin_unlock_irq(&spu->register_lock);
233}
234
235static void spu_hw_master_stop(struct spu_context *ctx)
236{
237 struct spu *spu = ctx->spu;
238 u64 sr1;
239
240 spin_lock_irq(&spu->register_lock);
241 sr1 = spu_mfc_sr1_get(spu) & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
242 spu_mfc_sr1_set(spu, sr1);
243 spin_unlock_irq(&spu->register_lock);
244}
245
235static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode) 246static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode)
236{ 247{
237 struct spu_problem __iomem *prob = ctx->spu->problem; 248 struct spu_problem __iomem *prob = ctx->spu->problem;
@@ -291,9 +302,7 @@ struct spu_context_ops spu_hw_ops = {
291 .mbox_stat_poll = spu_hw_mbox_stat_poll, 302 .mbox_stat_poll = spu_hw_mbox_stat_poll,
292 .ibox_read = spu_hw_ibox_read, 303 .ibox_read = spu_hw_ibox_read,
293 .wbox_write = spu_hw_wbox_write, 304 .wbox_write = spu_hw_wbox_write,
294 .signal1_read = spu_hw_signal1_read,
295 .signal1_write = spu_hw_signal1_write, 305 .signal1_write = spu_hw_signal1_write,
296 .signal2_read = spu_hw_signal2_read,
297 .signal2_write = spu_hw_signal2_write, 306 .signal2_write = spu_hw_signal2_write,
298 .signal1_type_set = spu_hw_signal1_type_set, 307 .signal1_type_set = spu_hw_signal1_type_set,
299 .signal1_type_get = spu_hw_signal1_type_get, 308 .signal1_type_get = spu_hw_signal1_type_get,
@@ -303,8 +312,10 @@ struct spu_context_ops spu_hw_ops = {
303 .npc_write = spu_hw_npc_write, 312 .npc_write = spu_hw_npc_write,
304 .status_read = spu_hw_status_read, 313 .status_read = spu_hw_status_read,
305 .get_ls = spu_hw_get_ls, 314 .get_ls = spu_hw_get_ls,
315 .runcntl_read = spu_hw_runcntl_read,
306 .runcntl_write = spu_hw_runcntl_write, 316 .runcntl_write = spu_hw_runcntl_write,
307 .runcntl_stop = spu_hw_runcntl_stop, 317 .master_start = spu_hw_master_start,
318 .master_stop = spu_hw_master_stop,
308 .set_mfc_query = spu_hw_set_mfc_query, 319 .set_mfc_query = spu_hw_set_mfc_query,
309 .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus, 320 .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
310 .get_mfc_free_elements = spu_hw_get_mfc_free_elements, 321 .get_mfc_free_elements = spu_hw_get_mfc_free_elements,
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 427d00a4f6a0..c7d010749a18 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -33,7 +33,7 @@
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/parser.h> 34#include <linux/parser.h>
35 35
36#include <asm/io.h> 36#include <asm/prom.h>
37#include <asm/semaphore.h> 37#include <asm/semaphore.h>
38#include <asm/spu.h> 38#include <asm/spu.h>
39#include <asm/uaccess.h> 39#include <asm/uaccess.h>
@@ -41,6 +41,7 @@
41#include "spufs.h" 41#include "spufs.h"
42 42
43static kmem_cache_t *spufs_inode_cache; 43static kmem_cache_t *spufs_inode_cache;
44char *isolated_loader;
44 45
45static struct inode * 46static struct inode *
46spufs_alloc_inode(struct super_block *sb) 47spufs_alloc_inode(struct super_block *sb)
@@ -231,6 +232,7 @@ struct file_operations spufs_context_fops = {
231 .readdir = dcache_readdir, 232 .readdir = dcache_readdir,
232 .fsync = simple_sync_file, 233 .fsync = simple_sync_file,
233}; 234};
235EXPORT_SYMBOL_GPL(spufs_context_fops);
234 236
235static int 237static int
236spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, 238spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
@@ -255,10 +257,14 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
255 goto out_iput; 257 goto out_iput;
256 258
257 ctx->flags = flags; 259 ctx->flags = flags;
258
259 inode->i_op = &spufs_dir_inode_operations; 260 inode->i_op = &spufs_dir_inode_operations;
260 inode->i_fop = &simple_dir_operations; 261 inode->i_fop = &simple_dir_operations;
261 ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx); 262 if (flags & SPU_CREATE_NOSCHED)
263 ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
264 mode, ctx);
265 else
266 ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
267
262 if (ret) 268 if (ret)
263 goto out_free_ctx; 269 goto out_free_ctx;
264 270
@@ -307,6 +313,20 @@ static int spufs_create_context(struct inode *inode,
307{ 313{
308 int ret; 314 int ret;
309 315
316 ret = -EPERM;
317 if ((flags & SPU_CREATE_NOSCHED) &&
318 !capable(CAP_SYS_NICE))
319 goto out_unlock;
320
321 ret = -EINVAL;
322 if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE))
323 == SPU_CREATE_ISOLATE)
324 goto out_unlock;
325
326 ret = -ENODEV;
327 if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
328 goto out_unlock;
329
310 ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO); 330 ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO);
311 if (ret) 331 if (ret)
312 goto out_unlock; 332 goto out_unlock;
@@ -540,6 +560,30 @@ spufs_parse_options(char *options, struct inode *root)
540 return 1; 560 return 1;
541} 561}
542 562
563static void
564spufs_init_isolated_loader(void)
565{
566 struct device_node *dn;
567 const char *loader;
568 int size;
569
570 dn = of_find_node_by_path("/spu-isolation");
571 if (!dn)
572 return;
573
574 loader = get_property(dn, "loader", &size);
575 if (!loader)
576 return;
577
578 /* kmalloc should align on a 16 byte boundary..* */
579 isolated_loader = kmalloc(size, GFP_KERNEL);
580 if (!isolated_loader)
581 return;
582
583 memcpy(isolated_loader, loader, size);
584 printk(KERN_INFO "spufs: SPU isolation mode enabled\n");
585}
586
543static int 587static int
544spufs_create_root(struct super_block *sb, void *data) 588spufs_create_root(struct super_block *sb, void *data)
545{ 589{
@@ -608,6 +652,7 @@ static struct file_system_type spufs_type = {
608static int __init spufs_init(void) 652static int __init spufs_init(void)
609{ 653{
610 int ret; 654 int ret;
655
611 ret = -ENOMEM; 656 ret = -ENOMEM;
612 spufs_inode_cache = kmem_cache_create("spufs_inode_cache", 657 spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
613 sizeof(struct spufs_inode_info), 0, 658 sizeof(struct spufs_inode_info), 0,
@@ -625,6 +670,12 @@ static int __init spufs_init(void)
625 ret = register_spu_syscalls(&spufs_calls); 670 ret = register_spu_syscalls(&spufs_calls);
626 if (ret) 671 if (ret)
627 goto out_fs; 672 goto out_fs;
673 ret = register_arch_coredump_calls(&spufs_coredump_calls);
674 if (ret)
675 goto out_fs;
676
677 spufs_init_isolated_loader();
678
628 return 0; 679 return 0;
629out_fs: 680out_fs:
630 unregister_filesystem(&spufs_type); 681 unregister_filesystem(&spufs_type);
@@ -638,6 +689,7 @@ module_init(spufs_init);
638static void __exit spufs_exit(void) 689static void __exit spufs_exit(void)
639{ 690{
640 spu_sched_exit(); 691 spu_sched_exit();
692 unregister_arch_coredump_calls(&spufs_coredump_calls);
641 unregister_spu_syscalls(&spufs_calls); 693 unregister_spu_syscalls(&spufs_calls);
642 unregister_filesystem(&spufs_type); 694 unregister_filesystem(&spufs_type);
643 kmem_cache_destroy(spufs_inode_cache); 695 kmem_cache_destroy(spufs_inode_cache);
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 63df8cf4ba16..1acc2ffef8c8 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -1,7 +1,11 @@
1#define DEBUG
2
1#include <linux/wait.h> 3#include <linux/wait.h>
2#include <linux/ptrace.h> 4#include <linux/ptrace.h>
3 5
4#include <asm/spu.h> 6#include <asm/spu.h>
7#include <asm/spu_priv1.h>
8#include <asm/io.h>
5#include <asm/unistd.h> 9#include <asm/unistd.h>
6 10
7#include "spufs.h" 11#include "spufs.h"
@@ -24,6 +28,7 @@ void spufs_dma_callback(struct spu *spu, int type)
24 } else { 28 } else {
25 switch (type) { 29 switch (type) {
26 case SPE_EVENT_DMA_ALIGNMENT: 30 case SPE_EVENT_DMA_ALIGNMENT:
31 case SPE_EVENT_SPE_DATA_STORAGE:
27 case SPE_EVENT_INVALID_DMA: 32 case SPE_EVENT_INVALID_DMA:
28 force_sig(SIGBUS, /* info, */ current); 33 force_sig(SIGBUS, /* info, */ current);
29 break; 34 break;
@@ -48,15 +53,122 @@ static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
48 return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0; 53 return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
49} 54}
50 55
56static int spu_setup_isolated(struct spu_context *ctx)
57{
58 int ret;
59 u64 __iomem *mfc_cntl;
60 u64 sr1;
61 u32 status;
62 unsigned long timeout;
63 const u32 status_loading = SPU_STATUS_RUNNING
64 | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
65
66 if (!isolated_loader)
67 return -ENODEV;
68
69 ret = spu_acquire_exclusive(ctx);
70 if (ret)
71 goto out;
72
73 mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
74
75 /* purge the MFC DMA queue to ensure no spurious accesses before we
76 * enter kernel mode */
77 timeout = jiffies + HZ;
78 out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
79 while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
80 != MFC_CNTL_PURGE_DMA_COMPLETE) {
81 if (time_after(jiffies, timeout)) {
82 printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
83 __FUNCTION__);
84 ret = -EIO;
85 goto out_unlock;
86 }
87 cond_resched();
88 }
89
90 /* put the SPE in kernel mode to allow access to the loader */
91 sr1 = spu_mfc_sr1_get(ctx->spu);
92 sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
93 spu_mfc_sr1_set(ctx->spu, sr1);
94
95 /* start the loader */
96 ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
97 ctx->ops->signal2_write(ctx,
98 (unsigned long)isolated_loader & 0xffffffff);
99
100 ctx->ops->runcntl_write(ctx,
101 SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
102
103 ret = 0;
104 timeout = jiffies + HZ;
105 while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
106 status_loading) {
107 if (time_after(jiffies, timeout)) {
108 printk(KERN_ERR "%s: timeout waiting for loader\n",
109 __FUNCTION__);
110 ret = -EIO;
111 goto out_drop_priv;
112 }
113 cond_resched();
114 }
115
116 if (!(status & SPU_STATUS_RUNNING)) {
117 /* If isolated LOAD has failed: run SPU, we will get a stop-and
118 * signal later. */
119 pr_debug("%s: isolated LOAD failed\n", __FUNCTION__);
120 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
121 ret = -EACCES;
122
123 } else if (!(status & SPU_STATUS_ISOLATED_STATE)) {
124 /* This isn't allowed by the CBEA, but check anyway */
125 pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__);
126 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
127 ret = -EINVAL;
128 }
129
130out_drop_priv:
131 /* Finished accessing the loader. Drop kernel mode */
132 sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
133 spu_mfc_sr1_set(ctx->spu, sr1);
134
135out_unlock:
136 spu_release_exclusive(ctx);
137out:
138 return ret;
139}
140
51static inline int spu_run_init(struct spu_context *ctx, u32 * npc) 141static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
52{ 142{
53 int ret; 143 int ret;
144 unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
54 145
55 if ((ret = spu_acquire_runnable(ctx)) != 0) 146 ret = spu_acquire_runnable(ctx);
147 if (ret)
56 return ret; 148 return ret;
57 ctx->ops->npc_write(ctx, *npc); 149
58 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); 150 if (ctx->flags & SPU_CREATE_ISOLATE) {
59 return 0; 151 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
152 /* Need to release ctx, because spu_setup_isolated will
153 * acquire it exclusively.
154 */
155 spu_release(ctx);
156 ret = spu_setup_isolated(ctx);
157 if (!ret)
158 ret = spu_acquire_runnable(ctx);
159 }
160
161 /* if userspace has set the runcntrl register (eg, to issue an
162 * isolated exit), we need to re-set it here */
163 runcntl = ctx->ops->runcntl_read(ctx) &
164 (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
165 if (runcntl == 0)
166 runcntl = SPU_RUNCNTL_RUNNABLE;
167 } else
168 ctx->ops->npc_write(ctx, *npc);
169
170 ctx->ops->runcntl_write(ctx, runcntl);
171 return ret;
60} 172}
61 173
62static inline int spu_run_fini(struct spu_context *ctx, u32 * npc, 174static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
@@ -70,13 +182,7 @@ static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
70 182
71 if (signal_pending(current)) 183 if (signal_pending(current))
72 ret = -ERESTARTSYS; 184 ret = -ERESTARTSYS;
73 if (unlikely(current->ptrace & PT_PTRACED)) { 185
74 if ((*status & SPU_STATUS_STOPPED_BY_STOP)
75 && (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
76 force_sig(SIGTRAP, current);
77 ret = -ERESTARTSYS;
78 }
79 }
80 return ret; 186 return ret;
81} 187}
82 188
@@ -204,6 +310,7 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
204 if (down_interruptible(&ctx->run_sema)) 310 if (down_interruptible(&ctx->run_sema))
205 return -ERESTARTSYS; 311 return -ERESTARTSYS;
206 312
313 ctx->ops->master_start(ctx);
207 ctx->event_return = 0; 314 ctx->event_return = 0;
208 ret = spu_run_init(ctx, npc); 315 ret = spu_run_init(ctx, npc);
209 if (ret) 316 if (ret)
@@ -223,7 +330,7 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
223 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { 330 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
224 ret = spu_reacquire_runnable(ctx, npc, &status); 331 ret = spu_reacquire_runnable(ctx, npc, &status);
225 if (ret) 332 if (ret)
226 goto out; 333 goto out2;
227 continue; 334 continue;
228 } 335 }
229 ret = spu_process_events(ctx); 336 ret = spu_process_events(ctx);
@@ -231,12 +338,24 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
231 } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | 338 } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
232 SPU_STATUS_STOPPED_BY_HALT))); 339 SPU_STATUS_STOPPED_BY_HALT)));
233 340
234 ctx->ops->runcntl_stop(ctx); 341 ctx->ops->master_stop(ctx);
235 ret = spu_run_fini(ctx, npc, &status); 342 ret = spu_run_fini(ctx, npc, &status);
236 if (!ret)
237 ret = status;
238 spu_yield(ctx); 343 spu_yield(ctx);
239 344
345out2:
346 if ((ret == 0) ||
347 ((ret == -ERESTARTSYS) &&
348 ((status & SPU_STATUS_STOPPED_BY_HALT) ||
349 ((status & SPU_STATUS_STOPPED_BY_STOP) &&
350 (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
351 ret = status;
352
353 if ((status & SPU_STATUS_STOPPED_BY_STOP)
354 && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
355 force_sig(SIGTRAP, current);
356 ret = -ERESTARTSYS;
357 }
358
240out: 359out:
241 *event = ctx->event_return; 360 *event = ctx->event_return;
242 up(&ctx->run_sema); 361 up(&ctx->run_sema);
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index a0f55ca2d488..70fb13395c04 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -29,6 +29,7 @@
29 29
30#include <asm/spu.h> 30#include <asm/spu.h>
31#include <asm/spu_csa.h> 31#include <asm/spu_csa.h>
32#include <asm/spu_info.h>
32 33
33/* The magic number for our file system */ 34/* The magic number for our file system */
34enum { 35enum {
@@ -114,13 +115,19 @@ struct spu_context_ops {
114 void (*npc_write) (struct spu_context * ctx, u32 data); 115 void (*npc_write) (struct spu_context * ctx, u32 data);
115 u32(*status_read) (struct spu_context * ctx); 116 u32(*status_read) (struct spu_context * ctx);
116 char*(*get_ls) (struct spu_context * ctx); 117 char*(*get_ls) (struct spu_context * ctx);
118 u32 (*runcntl_read) (struct spu_context * ctx);
117 void (*runcntl_write) (struct spu_context * ctx, u32 data); 119 void (*runcntl_write) (struct spu_context * ctx, u32 data);
118 void (*runcntl_stop) (struct spu_context * ctx); 120 void (*master_start) (struct spu_context * ctx);
121 void (*master_stop) (struct spu_context * ctx);
119 int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode); 122 int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode);
120 u32 (*read_mfc_tagstatus)(struct spu_context * ctx); 123 u32 (*read_mfc_tagstatus)(struct spu_context * ctx);
121 u32 (*get_mfc_free_elements)(struct spu_context *ctx); 124 u32 (*get_mfc_free_elements)(struct spu_context *ctx);
122 int (*send_mfc_command)(struct spu_context *ctx, 125 int (*send_mfc_command)(struct spu_context * ctx,
123 struct mfc_dma_command *cmd); 126 struct mfc_dma_command * cmd);
127 void (*dma_info_read) (struct spu_context * ctx,
128 struct spu_dma_info * info);
129 void (*proxydma_info_read) (struct spu_context * ctx,
130 struct spu_proxydma_info * info);
124}; 131};
125 132
126extern struct spu_context_ops spu_hw_ops; 133extern struct spu_context_ops spu_hw_ops;
@@ -135,6 +142,7 @@ struct spufs_inode_info {
135 container_of(inode, struct spufs_inode_info, vfs_inode) 142 container_of(inode, struct spufs_inode_info, vfs_inode)
136 143
137extern struct tree_descr spufs_dir_contents[]; 144extern struct tree_descr spufs_dir_contents[];
145extern struct tree_descr spufs_dir_nosched_contents[];
138 146
139/* system call implementation */ 147/* system call implementation */
140long spufs_run_spu(struct file *file, 148long spufs_run_spu(struct file *file,
@@ -162,6 +170,12 @@ void spu_acquire(struct spu_context *ctx);
162void spu_release(struct spu_context *ctx); 170void spu_release(struct spu_context *ctx);
163int spu_acquire_runnable(struct spu_context *ctx); 171int spu_acquire_runnable(struct spu_context *ctx);
164void spu_acquire_saved(struct spu_context *ctx); 172void spu_acquire_saved(struct spu_context *ctx);
173int spu_acquire_exclusive(struct spu_context *ctx);
174
175static inline void spu_release_exclusive(struct spu_context *ctx)
176{
177 up_write(&ctx->state_sema);
178}
165 179
166int spu_activate(struct spu_context *ctx, u64 flags); 180int spu_activate(struct spu_context *ctx, u64 flags);
167void spu_deactivate(struct spu_context *ctx); 181void spu_deactivate(struct spu_context *ctx);
@@ -169,6 +183,8 @@ void spu_yield(struct spu_context *ctx);
169int __init spu_sched_init(void); 183int __init spu_sched_init(void);
170void __exit spu_sched_exit(void); 184void __exit spu_sched_exit(void);
171 185
186extern char *isolated_loader;
187
172/* 188/*
173 * spufs_wait 189 * spufs_wait
174 * Same as wait_event_interruptible(), except that here 190 * Same as wait_event_interruptible(), except that here
@@ -207,4 +223,15 @@ void spufs_stop_callback(struct spu *spu);
207void spufs_mfc_callback(struct spu *spu); 223void spufs_mfc_callback(struct spu *spu);
208void spufs_dma_callback(struct spu *spu, int type); 224void spufs_dma_callback(struct spu *spu, int type);
209 225
226extern struct spu_coredump_calls spufs_coredump_calls;
227struct spufs_coredump_reader {
228 char *name;
229 ssize_t (*read)(struct spu_context *ctx,
230 char __user *buffer, size_t size, loff_t *pos);
231 u64 (*get)(void *data);
232 size_t size;
233};
234extern struct spufs_coredump_reader spufs_coredump_read[];
235extern int spufs_coredump_num_notes;
236
210#endif 237#endif
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 0f782ca662ba..c08981ff7fc6 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -102,7 +102,7 @@ static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
102 * saved at this time. 102 * saved at this time.
103 */ 103 */
104 isolate_state = SPU_STATUS_ISOLATED_STATE | 104 isolate_state = SPU_STATUS_ISOLATED_STATE |
105 SPU_STATUS_ISOLATED_LOAD_STAUTUS | SPU_STATUS_ISOLATED_EXIT_STAUTUS; 105 SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;
106 return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0; 106 return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
107} 107}
108 108
@@ -1046,12 +1046,12 @@ static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
1046 */ 1046 */
1047 if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) { 1047 if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
1048 if (in_be32(&prob->spu_status_R) & 1048 if (in_be32(&prob->spu_status_R) &
1049 SPU_STATUS_ISOLATED_EXIT_STAUTUS) { 1049 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1050 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & 1050 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1051 SPU_STATUS_RUNNING); 1051 SPU_STATUS_RUNNING);
1052 } 1052 }
1053 if ((in_be32(&prob->spu_status_R) & 1053 if ((in_be32(&prob->spu_status_R) &
1054 SPU_STATUS_ISOLATED_LOAD_STAUTUS) 1054 SPU_STATUS_ISOLATED_LOAD_STATUS)
1055 || (in_be32(&prob->spu_status_R) & 1055 || (in_be32(&prob->spu_status_R) &
1056 SPU_STATUS_ISOLATED_STATE)) { 1056 SPU_STATUS_ISOLATED_STATE)) {
1057 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); 1057 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
@@ -1085,7 +1085,7 @@ static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1085 */ 1085 */
1086 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) { 1086 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
1087 if (in_be32(&prob->spu_status_R) & 1087 if (in_be32(&prob->spu_status_R) &
1088 SPU_STATUS_ISOLATED_EXIT_STAUTUS) { 1088 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1089 spu_mfc_sr1_set(spu, 1089 spu_mfc_sr1_set(spu,
1090 MFC_STATE1_MASTER_RUN_CONTROL_MASK); 1090 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1091 eieio(); 1091 eieio();
@@ -1095,7 +1095,7 @@ static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1095 SPU_STATUS_RUNNING); 1095 SPU_STATUS_RUNNING);
1096 } 1096 }
1097 if ((in_be32(&prob->spu_status_R) & 1097 if ((in_be32(&prob->spu_status_R) &
1098 SPU_STATUS_ISOLATED_LOAD_STAUTUS) 1098 SPU_STATUS_ISOLATED_LOAD_STATUS)
1099 || (in_be32(&prob->spu_status_R) & 1099 || (in_be32(&prob->spu_status_R) &
1100 SPU_STATUS_ISOLATED_STATE)) { 1100 SPU_STATUS_ISOLATED_STATE)) {
1101 spu_mfc_sr1_set(spu, 1101 spu_mfc_sr1_set(spu,
@@ -1916,6 +1916,51 @@ static void save_lscsa(struct spu_state *prev, struct spu *spu)
1916 wait_spu_stopped(prev, spu); /* Step 57. */ 1916 wait_spu_stopped(prev, spu); /* Step 57. */
1917} 1917}
1918 1918
1919static void force_spu_isolate_exit(struct spu *spu)
1920{
1921 struct spu_problem __iomem *prob = spu->problem;
1922 struct spu_priv2 __iomem *priv2 = spu->priv2;
1923
1924 /* Stop SPE execution and wait for completion. */
1925 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1926 iobarrier_rw();
1927 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
1928
1929 /* Restart SPE master runcntl. */
1930 spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1931 iobarrier_w();
1932
1933 /* Initiate isolate exit request and wait for completion. */
1934 out_be64(&priv2->spu_privcntl_RW, 4LL);
1935 iobarrier_w();
1936 out_be32(&prob->spu_runcntl_RW, 2);
1937 iobarrier_rw();
1938 POLL_WHILE_FALSE((in_be32(&prob->spu_status_R)
1939 & SPU_STATUS_STOPPED_BY_STOP));
1940
1941 /* Reset load request to normal. */
1942 out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL);
1943 iobarrier_w();
1944}
1945
1946/**
1947 * stop_spu_isolate
1948 * Check SPU run-control state and force isolated
1949 * exit function as necessary.
1950 */
1951static void stop_spu_isolate(struct spu *spu)
1952{
1953 struct spu_problem __iomem *prob = spu->problem;
1954
1955 if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) {
1956 /* The SPU is in isolated state; the only way
1957 * to get it out is to perform an isolated
1958 * exit (clean) operation.
1959 */
1960 force_spu_isolate_exit(spu);
1961 }
1962}
1963
1919static void harvest(struct spu_state *prev, struct spu *spu) 1964static void harvest(struct spu_state *prev, struct spu *spu)
1920{ 1965{
1921 /* 1966 /*
@@ -1928,6 +1973,7 @@ static void harvest(struct spu_state *prev, struct spu *spu)
1928 inhibit_user_access(prev, spu); /* Step 3. */ 1973 inhibit_user_access(prev, spu); /* Step 3. */
1929 terminate_spu_app(prev, spu); /* Step 4. */ 1974 terminate_spu_app(prev, spu); /* Step 4. */
1930 set_switch_pending(prev, spu); /* Step 5. */ 1975 set_switch_pending(prev, spu); /* Step 5. */
1976 stop_spu_isolate(spu); /* NEW. */
1931 remove_other_spu_access(prev, spu); /* Step 6. */ 1977 remove_other_spu_access(prev, spu); /* Step 6. */
1932 suspend_mfc(prev, spu); /* Step 7. */ 1978 suspend_mfc(prev, spu); /* Step 7. */
1933 wait_suspend_mfc_complete(prev, spu); /* Step 8. */ 1979 wait_suspend_mfc_complete(prev, spu); /* Step 8. */
@@ -2096,11 +2142,11 @@ int spu_save(struct spu_state *prev, struct spu *spu)
2096 acquire_spu_lock(spu); /* Step 1. */ 2142 acquire_spu_lock(spu); /* Step 1. */
2097 rc = __do_spu_save(prev, spu); /* Steps 2-53. */ 2143 rc = __do_spu_save(prev, spu); /* Steps 2-53. */
2098 release_spu_lock(spu); 2144 release_spu_lock(spu);
2099 if (rc) { 2145 if (rc != 0 && rc != 2 && rc != 6) {
2100 panic("%s failed on SPU[%d], rc=%d.\n", 2146 panic("%s failed on SPU[%d], rc=%d.\n",
2101 __func__, spu->number, rc); 2147 __func__, spu->number, rc);
2102 } 2148 }
2103 return rc; 2149 return 0;
2104} 2150}
2105EXPORT_SYMBOL_GPL(spu_save); 2151EXPORT_SYMBOL_GPL(spu_save);
2106 2152
@@ -2165,9 +2211,6 @@ static void init_priv1(struct spu_state *csa)
2165 MFC_STATE1_PROBLEM_STATE_MASK | 2211 MFC_STATE1_PROBLEM_STATE_MASK |
2166 MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK; 2212 MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
2167 2213
2168 /* Set storage description. */
2169 csa->priv1.mfc_sdr_RW = mfspr(SPRN_SDR1);
2170
2171 /* Enable OS-specific set of interrupts. */ 2214 /* Enable OS-specific set of interrupts. */
2172 csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR | 2215 csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
2173 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR | 2216 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |