aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel/ras.c
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2005-10-31 05:06:46 -0500
committerAnton Altaparmakov <aia21@cantab.net>2005-10-31 05:06:46 -0500
commit1f04c0a24b2f3cfe89c802a24396263623e3512d (patch)
treed7e2216b6e65b833c0c2b79b478d13ce17dbf296 /arch/ppc64/kernel/ras.c
parent07b188ab773e183871e57b33ae37bf635c9f12ba (diff)
parente2f2e58e7968f8446b1078a20a18bf8ea12b4fbc (diff)
Merge branch 'master' of /usr/src/ntfs-2.6/
Diffstat (limited to 'arch/ppc64/kernel/ras.c')
-rw-r--r--arch/ppc64/kernel/ras.c353
1 files changed, 0 insertions, 353 deletions
diff --git a/arch/ppc64/kernel/ras.c b/arch/ppc64/kernel/ras.c
deleted file mode 100644
index 41b97dc9cc0a..000000000000
--- a/arch/ppc64/kernel/ras.c
+++ /dev/null
@@ -1,353 +0,0 @@
1/*
2 * ras.c
3 * Copyright (C) 2001 Dave Engebretsen IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20/* Change Activity:
21 * 2001/09/21 : engebret : Created with minimal EPOW and HW exception support.
22 * End Change Activity
23 */
24
25#include <linux/errno.h>
26#include <linux/threads.h>
27#include <linux/kernel_stat.h>
28#include <linux/signal.h>
29#include <linux/sched.h>
30#include <linux/ioport.h>
31#include <linux/interrupt.h>
32#include <linux/timex.h>
33#include <linux/init.h>
34#include <linux/slab.h>
35#include <linux/pci.h>
36#include <linux/delay.h>
37#include <linux/irq.h>
38#include <linux/random.h>
39#include <linux/sysrq.h>
40#include <linux/bitops.h>
41
42#include <asm/uaccess.h>
43#include <asm/system.h>
44#include <asm/io.h>
45#include <asm/pgtable.h>
46#include <asm/irq.h>
47#include <asm/cache.h>
48#include <asm/prom.h>
49#include <asm/ptrace.h>
50#include <asm/machdep.h>
51#include <asm/rtas.h>
52#include <asm/ppcdebug.h>
53
54static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX];
55static DEFINE_SPINLOCK(ras_log_buf_lock);
56
57char mce_data_buf[RTAS_ERROR_LOG_MAX]
58;
59/* This is true if we are using the firmware NMI handler (typically LPAR) */
60extern int fwnmi_active;
61
62static int ras_get_sensor_state_token;
63static int ras_check_exception_token;
64
65#define EPOW_SENSOR_TOKEN 9
66#define EPOW_SENSOR_INDEX 0
67#define RAS_VECTOR_OFFSET 0x500
68
69static irqreturn_t ras_epow_interrupt(int irq, void *dev_id,
70 struct pt_regs * regs);
71static irqreturn_t ras_error_interrupt(int irq, void *dev_id,
72 struct pt_regs * regs);
73
74/* #define DEBUG */
75
76static void request_ras_irqs(struct device_node *np, char *propname,
77 irqreturn_t (*handler)(int, void *, struct pt_regs *),
78 const char *name)
79{
80 unsigned int *ireg, len, i;
81 int virq, n_intr;
82
83 ireg = (unsigned int *)get_property(np, propname, &len);
84 if (ireg == NULL)
85 return;
86 n_intr = prom_n_intr_cells(np);
87 len /= n_intr * sizeof(*ireg);
88
89 for (i = 0; i < len; i++) {
90 virq = virt_irq_create_mapping(*ireg);
91 if (virq == NO_IRQ) {
92 printk(KERN_ERR "Unable to allocate interrupt "
93 "number for %s\n", np->full_name);
94 return;
95 }
96 if (request_irq(irq_offset_up(virq), handler, 0, name, NULL)) {
97 printk(KERN_ERR "Unable to request interrupt %d for "
98 "%s\n", irq_offset_up(virq), np->full_name);
99 return;
100 }
101 ireg += n_intr;
102 }
103}
104
105/*
106 * Initialize handlers for the set of interrupts caused by hardware errors
107 * and power system events.
108 */
109static int __init init_ras_IRQ(void)
110{
111 struct device_node *np;
112
113 ras_get_sensor_state_token = rtas_token("get-sensor-state");
114 ras_check_exception_token = rtas_token("check-exception");
115
116 /* Internal Errors */
117 np = of_find_node_by_path("/event-sources/internal-errors");
118 if (np != NULL) {
119 request_ras_irqs(np, "open-pic-interrupt", ras_error_interrupt,
120 "RAS_ERROR");
121 request_ras_irqs(np, "interrupts", ras_error_interrupt,
122 "RAS_ERROR");
123 of_node_put(np);
124 }
125
126 /* EPOW Events */
127 np = of_find_node_by_path("/event-sources/epow-events");
128 if (np != NULL) {
129 request_ras_irqs(np, "open-pic-interrupt", ras_epow_interrupt,
130 "RAS_EPOW");
131 request_ras_irqs(np, "interrupts", ras_epow_interrupt,
132 "RAS_EPOW");
133 of_node_put(np);
134 }
135
136 return 1;
137}
138__initcall(init_ras_IRQ);
139
140/*
141 * Handle power subsystem events (EPOW).
142 *
143 * Presently we just log the event has occurred. This should be fixed
144 * to examine the type of power failure and take appropriate action where
145 * the time horizon permits something useful to be done.
146 */
147static irqreturn_t
148ras_epow_interrupt(int irq, void *dev_id, struct pt_regs * regs)
149{
150 int status = 0xdeadbeef;
151 int state = 0;
152 int critical;
153
154 status = rtas_call(ras_get_sensor_state_token, 2, 2, &state,
155 EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX);
156
157 if (state > 3)
158 critical = 1; /* Time Critical */
159 else
160 critical = 0;
161
162 spin_lock(&ras_log_buf_lock);
163
164 status = rtas_call(ras_check_exception_token, 6, 1, NULL,
165 RAS_VECTOR_OFFSET,
166 virt_irq_to_real(irq_offset_down(irq)),
167 RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS,
168 critical, __pa(&ras_log_buf),
169 rtas_get_error_log_max());
170
171 udbg_printf("EPOW <0x%lx 0x%x 0x%x>\n",
172 *((unsigned long *)&ras_log_buf), status, state);
173 printk(KERN_WARNING "EPOW <0x%lx 0x%x 0x%x>\n",
174 *((unsigned long *)&ras_log_buf), status, state);
175
176 /* format and print the extended information */
177 log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0);
178
179 spin_unlock(&ras_log_buf_lock);
180 return IRQ_HANDLED;
181}
182
183/*
184 * Handle hardware error interrupts.
185 *
186 * RTAS check-exception is called to collect data on the exception. If
187 * the error is deemed recoverable, we log a warning and return.
188 * For nonrecoverable errors, an error is logged and we stop all processing
189 * as quickly as possible in order to prevent propagation of the failure.
190 */
191static irqreturn_t
192ras_error_interrupt(int irq, void *dev_id, struct pt_regs * regs)
193{
194 struct rtas_error_log *rtas_elog;
195 int status = 0xdeadbeef;
196 int fatal;
197
198 spin_lock(&ras_log_buf_lock);
199
200 status = rtas_call(ras_check_exception_token, 6, 1, NULL,
201 RAS_VECTOR_OFFSET,
202 virt_irq_to_real(irq_offset_down(irq)),
203 RTAS_INTERNAL_ERROR, 1 /*Time Critical */,
204 __pa(&ras_log_buf),
205 rtas_get_error_log_max());
206
207 rtas_elog = (struct rtas_error_log *)ras_log_buf;
208
209 if ((status == 0) && (rtas_elog->severity >= RTAS_SEVERITY_ERROR_SYNC))
210 fatal = 1;
211 else
212 fatal = 0;
213
214 /* format and print the extended information */
215 log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal);
216
217 if (fatal) {
218 udbg_printf("Fatal HW Error <0x%lx 0x%x>\n",
219 *((unsigned long *)&ras_log_buf), status);
220 printk(KERN_EMERG "Error: Fatal hardware error <0x%lx 0x%x>\n",
221 *((unsigned long *)&ras_log_buf), status);
222
223#ifndef DEBUG
224 /* Don't actually power off when debugging so we can test
225 * without actually failing while injecting errors.
226 * Error data will not be logged to syslog.
227 */
228 ppc_md.power_off();
229#endif
230 } else {
231 udbg_printf("Recoverable HW Error <0x%lx 0x%x>\n",
232 *((unsigned long *)&ras_log_buf), status);
233 printk(KERN_WARNING
234 "Warning: Recoverable hardware error <0x%lx 0x%x>\n",
235 *((unsigned long *)&ras_log_buf), status);
236 }
237
238 spin_unlock(&ras_log_buf_lock);
239 return IRQ_HANDLED;
240}
241
242/* Get the error information for errors coming through the
243 * FWNMI vectors. The pt_regs' r3 will be updated to reflect
244 * the actual r3 if possible, and a ptr to the error log entry
245 * will be returned if found.
246 *
247 * The mce_data_buf does not have any locks or protection around it,
248 * if a second machine check comes in, or a system reset is done
249 * before we have logged the error, then we will get corruption in the
250 * error log. This is preferable over holding off on calling
251 * ibm,nmi-interlock which would result in us checkstopping if a
252 * second machine check did come in.
253 */
254static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
255{
256 unsigned long errdata = regs->gpr[3];
257 struct rtas_error_log *errhdr = NULL;
258 unsigned long *savep;
259
260 if ((errdata >= 0x7000 && errdata < 0x7fff0) ||
261 (errdata >= rtas.base && errdata < rtas.base + rtas.size - 16)) {
262 savep = __va(errdata);
263 regs->gpr[3] = savep[0]; /* restore original r3 */
264 memset(mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
265 memcpy(mce_data_buf, (char *)(savep + 1), RTAS_ERROR_LOG_MAX);
266 errhdr = (struct rtas_error_log *)mce_data_buf;
267 } else {
268 printk("FWNMI: corrupt r3\n");
269 }
270 return errhdr;
271}
272
273/* Call this when done with the data returned by FWNMI_get_errinfo.
274 * It will release the saved data area for other CPUs in the
275 * partition to receive FWNMI errors.
276 */
277static void fwnmi_release_errinfo(void)
278{
279 int ret = rtas_call(rtas_token("ibm,nmi-interlock"), 0, 1, NULL);
280 if (ret != 0)
281 printk("FWNMI: nmi-interlock failed: %d\n", ret);
282}
283
284void pSeries_system_reset_exception(struct pt_regs *regs)
285{
286 if (fwnmi_active) {
287 struct rtas_error_log *errhdr = fwnmi_get_errinfo(regs);
288 if (errhdr) {
289 /* XXX Should look at FWNMI information */
290 }
291 fwnmi_release_errinfo();
292 }
293}
294
295/*
296 * See if we can recover from a machine check exception.
297 * This is only called on power4 (or above) and only via
298 * the Firmware Non-Maskable Interrupts (fwnmi) handler
299 * which provides the error analysis for us.
300 *
301 * Return 1 if corrected (or delivered a signal).
302 * Return 0 if there is nothing we can do.
303 */
304static int recover_mce(struct pt_regs *regs, struct rtas_error_log * err)
305{
306 int nonfatal = 0;
307
308 if (err->disposition == RTAS_DISP_FULLY_RECOVERED) {
309 /* Platform corrected itself */
310 nonfatal = 1;
311 } else if ((regs->msr & MSR_RI) &&
312 user_mode(regs) &&
313 err->severity == RTAS_SEVERITY_ERROR_SYNC &&
314 err->disposition == RTAS_DISP_NOT_RECOVERED &&
315 err->target == RTAS_TARGET_MEMORY &&
316 err->type == RTAS_TYPE_ECC_UNCORR &&
317 !(current->pid == 0 || current->pid == 1)) {
318 /* Kill off a user process with an ECC error */
319 printk(KERN_ERR "MCE: uncorrectable ecc error for pid %d\n",
320 current->pid);
321 /* XXX something better for ECC error? */
322 _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
323 nonfatal = 1;
324 }
325
326 log_error((char *)err, ERR_TYPE_RTAS_LOG, !nonfatal);
327
328 return nonfatal;
329}
330
331/*
332 * Handle a machine check.
333 *
334 * Note that on Power 4 and beyond Firmware Non-Maskable Interrupts (fwnmi)
335 * should be present. If so the handler which called us tells us if the
336 * error was recovered (never true if RI=0).
337 *
338 * On hardware prior to Power 4 these exceptions were asynchronous which
339 * means we can't tell exactly where it occurred and so we can't recover.
340 */
341int pSeries_machine_check_exception(struct pt_regs *regs)
342{
343 struct rtas_error_log *errp;
344
345 if (fwnmi_active) {
346 errp = fwnmi_get_errinfo(regs);
347 fwnmi_release_errinfo();
348 if (errp && recover_mce(regs, errp))
349 return 1;
350 }
351
352 return 0;
353}