aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell/pervasive.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2006-06-19 14:33:16 -0400
committerPaul Mackerras <paulus@samba.org>2006-06-21 01:01:29 -0400
commitacf7d76827a577059636e949079021e6af6dd702 (patch)
tree283e94488c79e75dd3df9a376e1e8a27a69e26ec /arch/powerpc/platforms/cell/pervasive.c
parentef82a306b46dbedaecbb154b24d05dfab937df35 (diff)
[POWERPC] cell: add RAS support
This is a first version of support for the Cell BE "Reliability, Availability and Serviceability" features. It doesn't yet handle some of the RAS interrupts (the ones described in iic_is/iic_irr), I'm still working on a proper way to expose these. They are essentially a cascaded controller by themselves (sic !) though I may just handle them locally to the iic driver. I need also to sync with David Erb on the way he hooked in the performance monitor interrupt. So that's all for 2.6.17 and I'll do more work on that with my rework of the powerpc interrupt layer that I'm hacking on at the moment. Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/pervasive.c')
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c104
1 files changed, 21 insertions, 83 deletions
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index 7eed8c624517..695ac4e1617e 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -37,36 +37,28 @@
37#include <asm/reg.h> 37#include <asm/reg.h>
38 38
39#include "pervasive.h" 39#include "pervasive.h"
40#include "cbe_regs.h"
40 41
41static DEFINE_SPINLOCK(cbe_pervasive_lock); 42static DEFINE_SPINLOCK(cbe_pervasive_lock);
42struct cbe_pervasive {
43 struct pmd_regs __iomem *regs;
44 unsigned int thread;
45};
46
47/* can't use per_cpu from setup_arch */
48static struct cbe_pervasive cbe_pervasive[NR_CPUS];
49 43
50static void __init cbe_enable_pause_zero(void) 44static void __init cbe_enable_pause_zero(void)
51{ 45{
52 unsigned long thread_switch_control; 46 unsigned long thread_switch_control;
53 unsigned long temp_register; 47 unsigned long temp_register;
54 struct cbe_pervasive *p; 48 struct cbe_pmd_regs __iomem *pregs;
55 int thread;
56 49
57 spin_lock_irq(&cbe_pervasive_lock); 50 spin_lock_irq(&cbe_pervasive_lock);
58 p = &cbe_pervasive[smp_processor_id()]; 51 pregs = cbe_get_cpu_pmd_regs(smp_processor_id());
59 52 if (pregs == NULL)
60 if (!cbe_pervasive->regs)
61 goto out; 53 goto out;
62 54
63 pr_debug("Power Management: CPU %d\n", smp_processor_id()); 55 pr_debug("Power Management: CPU %d\n", smp_processor_id());
64 56
65 /* Enable Pause(0) control bit */ 57 /* Enable Pause(0) control bit */
66 temp_register = in_be64(&p->regs->pm_control); 58 temp_register = in_be64(&pregs->pm_control);
67 59
68 out_be64(&p->regs->pm_control, 60 out_be64(&pregs->pm_control,
69 temp_register|PMD_PAUSE_ZERO_CONTROL); 61 temp_register | CBE_PMD_PAUSE_ZERO_CONTROL);
70 62
71 /* Enable DEC and EE interrupt request */ 63 /* Enable DEC and EE interrupt request */
72 thread_switch_control = mfspr(SPRN_TSC_CELL); 64 thread_switch_control = mfspr(SPRN_TSC_CELL);
@@ -75,25 +67,16 @@ static void __init cbe_enable_pause_zero(void)
75 switch ((mfspr(SPRN_CTRLF) & CTRL_CT)) { 67 switch ((mfspr(SPRN_CTRLF) & CTRL_CT)) {
76 case CTRL_CT0: 68 case CTRL_CT0:
77 thread_switch_control |= TSC_CELL_DEC_ENABLE_0; 69 thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
78 thread = 0;
79 break; 70 break;
80 case CTRL_CT1: 71 case CTRL_CT1:
81 thread_switch_control |= TSC_CELL_DEC_ENABLE_1; 72 thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
82 thread = 1;
83 break; 73 break;
84 default: 74 default:
85 printk(KERN_WARNING "%s: unknown configuration\n", 75 printk(KERN_WARNING "%s: unknown configuration\n",
86 __FUNCTION__); 76 __FUNCTION__);
87 thread = -1;
88 break; 77 break;
89 } 78 }
90 79
91 if (p->thread != thread)
92 printk(KERN_WARNING "%s: device tree inconsistant, "
93 "cpu %i: %d/%d\n", __FUNCTION__,
94 smp_processor_id(),
95 p->thread, thread);
96
97 mtspr(SPRN_TSC_CELL, thread_switch_control); 80 mtspr(SPRN_TSC_CELL, thread_switch_control);
98 81
99out: 82out:
@@ -104,6 +87,11 @@ static void cbe_idle(void)
104{ 87{
105 unsigned long ctrl; 88 unsigned long ctrl;
106 89
90 /* Why do we do that on every idle ? Couldn't that be done once for
91 * all or do we lose the state some way ? Also, the pm_control
92 * register setting, that can't be set once at boot ? We really want
93 * to move that away in order to implement a simple powersave
94 */
107 cbe_enable_pause_zero(); 95 cbe_enable_pause_zero();
108 96
109 while (1) { 97 while (1) {
@@ -152,8 +140,15 @@ static int cbe_system_reset_exception(struct pt_regs *regs)
152 timer_interrupt(regs); 140 timer_interrupt(regs);
153 break; 141 break;
154 case SRR1_WAKEMT: 142 case SRR1_WAKEMT:
155 /* no action required */
156 break; 143 break;
144#ifdef CONFIG_CBE_RAS
145 case SRR1_WAKESYSERR:
146 cbe_system_error_exception(regs);
147 break;
148 case SRR1_WAKETHERM:
149 cbe_thermal_exception(regs);
150 break;
151#endif /* CONFIG_CBE_RAS */
157 default: 152 default:
158 /* do system reset */ 153 /* do system reset */
159 return 0; 154 return 0;
@@ -162,68 +157,11 @@ static int cbe_system_reset_exception(struct pt_regs *regs)
162 return 1; 157 return 1;
163} 158}
164 159
165static int __init cbe_find_pmd_mmio(int cpu, struct cbe_pervasive *p) 160void __init cbe_pervasive_init(void)
166{
167 struct device_node *node;
168 unsigned int *int_servers;
169 char *addr;
170 unsigned long real_address;
171 unsigned int size;
172
173 struct pmd_regs __iomem *pmd_mmio_area;
174 int hardid, thread;
175 int proplen;
176
177 pmd_mmio_area = NULL;
178 hardid = get_hard_smp_processor_id(cpu);
179 for (node = NULL; (node = of_find_node_by_type(node, "cpu"));) {
180 int_servers = (void *) get_property(node,
181 "ibm,ppc-interrupt-server#s", &proplen);
182 if (!int_servers) {
183 printk(KERN_WARNING "%s misses "
184 "ibm,ppc-interrupt-server#s property",
185 node->full_name);
186 continue;
187 }
188 for (thread = 0; thread < proplen / sizeof (int); thread++) {
189 if (hardid == int_servers[thread]) {
190 addr = get_property(node, "pervasive", NULL);
191 goto found;
192 }
193 }
194 }
195
196 printk(KERN_WARNING "%s: CPU %d not found\n", __FUNCTION__, cpu);
197 return -EINVAL;
198
199found:
200 real_address = *(unsigned long*) addr;
201 addr += sizeof (unsigned long);
202 size = *(unsigned int*) addr;
203
204 pr_debug("pervasive area for CPU %d at %lx, size %x\n",
205 cpu, real_address, size);
206 p->regs = ioremap(real_address, size);
207 p->thread = thread;
208 return 0;
209}
210
211void __init cell_pervasive_init(void)
212{ 161{
213 struct cbe_pervasive *p;
214 int cpu;
215 int ret;
216
217 if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO)) 162 if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
218 return; 163 return;
219 164
220 for_each_possible_cpu(cpu) {
221 p = &cbe_pervasive[cpu];
222 ret = cbe_find_pmd_mmio(cpu, p);
223 if (ret)
224 return;
225 }
226
227 ppc_md.idle_loop = cbe_idle; 165 ppc_md.idle_loop = cbe_idle;
228 ppc_md.system_reset_exception = cbe_system_reset_exception; 166 ppc_md.system_reset_exception = cbe_system_reset_exception;
229} 167}