aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel/mca.c
diff options
context:
space:
mode:
authorRuss Anderson <rja@sgi.com>2006-03-24 12:49:52 -0500
committerTony Luck <tony.luck@intel.com>2006-03-24 12:49:52 -0500
commitd2a28ad9fa7bf16761d070d8a3338375e1574b32 (patch)
tree9da2c18c91a4ee12fa6a9b2d23dcb55d13dd0ca8 /arch/ia64/kernel/mca.c
parenta5b00bb4fe60796c791238cf5653b82110031c93 (diff)
[IA64] MCA recovery: kernel context recovery table
Memory errors encountered by user applications may surface when the CPU is running in kernel context. The current code will not attempt recovery if the MCA surfaces in kernel context (privilage mode 0). This patch adds a check for cases where the user initiated the load that surfaces in kernel interrupt code. An example is a user process lauching a load from memory and the data in memory had bad ECC. Before the bad data gets to the CPU register, and interrupt comes in. The code jumps to the IVT interrupt entry point and begins execution in kernel context. The process of saving the user registers (SAVE_REST) causes the bad data to be loaded into a CPU register, triggering the MCA. The MCA surfaces in kernel context, even though the load was initiated from user context. As suggested by David and Tony, this patch uses an exception table like approach, puting the tagged recovery addresses in a searchable table. One difference from the exception table is that MCAs do not surface in precise places (such as with a TLB miss), so instead of tagging specific instructions, address ranges are registers. A single macro is used to do the tagging, with the input parameter being the label of the starting address and the macro being the ending address. This limits clutter in the code. This patch only tags one spot, the interrupt ivt entry. Testing showed that spot to be a "heavy hitter" with MCAs surfacing while saving user registers. Other spots can be added as needed by adding a single macro. Signed-off-by: Russ Anderson (rja@sgi.com) Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel/mca.c')
-rw-r--r--arch/ia64/kernel/mca.c98
1 files changed, 73 insertions, 25 deletions
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index cedcae713e9f..87ff7fe33cfb 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -83,6 +83,7 @@
83#include <asm/irq.h> 83#include <asm/irq.h>
84#include <asm/hw_irq.h> 84#include <asm/hw_irq.h>
85 85
86#include "mca_drv.h"
86#include "entry.h" 87#include "entry.h"
87 88
88#if defined(IA64_MCA_DEBUG_INFO) 89#if defined(IA64_MCA_DEBUG_INFO)
@@ -281,6 +282,50 @@ ia64_mca_log_sal_error_record(int sal_info_type)
281 ia64_sal_clear_state_info(sal_info_type); 282 ia64_sal_clear_state_info(sal_info_type);
282} 283}
283 284
285/*
286 * search_mca_table
287 * See if the MCA surfaced in an instruction range
288 * that has been tagged as recoverable.
289 *
290 * Inputs
291 * first First address range to check
292 * last Last address range to check
293 * ip Instruction pointer, address we are looking for
294 *
295 * Return value:
296 * 1 on Success (in the table)/ 0 on Failure (not in the table)
297 */
298int
299search_mca_table (const struct mca_table_entry *first,
300 const struct mca_table_entry *last,
301 unsigned long ip)
302{
303 const struct mca_table_entry *curr;
304 u64 curr_start, curr_end;
305
306 curr = first;
307 while (curr <= last) {
308 curr_start = (u64) &curr->start_addr + curr->start_addr;
309 curr_end = (u64) &curr->end_addr + curr->end_addr;
310
311 if ((ip >= curr_start) && (ip <= curr_end)) {
312 return 1;
313 }
314 curr++;
315 }
316 return 0;
317}
318
319/* Given an address, look for it in the mca tables. */
320int mca_recover_range(unsigned long addr)
321{
322 extern struct mca_table_entry __start___mca_table[];
323 extern struct mca_table_entry __stop___mca_table[];
324
325 return search_mca_table(__start___mca_table, __stop___mca_table-1, addr);
326}
327EXPORT_SYMBOL_GPL(mca_recover_range);
328
284#ifdef CONFIG_ACPI 329#ifdef CONFIG_ACPI
285 330
286int cpe_vector = -1; 331int cpe_vector = -1;
@@ -747,31 +792,34 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
747 ia64_mca_modify_comm(previous_current); 792 ia64_mca_modify_comm(previous_current);
748 goto no_mod; 793 goto no_mod;
749 } 794 }
750 if (r13 != sos->prev_IA64_KR_CURRENT) { 795
751 msg = "inconsistent previous current and r13"; 796 if (!mca_recover_range(ms->pmsa_iip)) {
752 goto no_mod; 797 if (r13 != sos->prev_IA64_KR_CURRENT) {
753 } 798 msg = "inconsistent previous current and r13";
754 if ((r12 - r13) >= KERNEL_STACK_SIZE) { 799 goto no_mod;
755 msg = "inconsistent r12 and r13"; 800 }
756 goto no_mod; 801 if ((r12 - r13) >= KERNEL_STACK_SIZE) {
757 } 802 msg = "inconsistent r12 and r13";
758 if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) { 803 goto no_mod;
759 msg = "inconsistent ar.bspstore and r13"; 804 }
760 goto no_mod; 805 if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {
761 } 806 msg = "inconsistent ar.bspstore and r13";
762 va.p = old_bspstore; 807 goto no_mod;
763 if (va.f.reg < 5) { 808 }
764 msg = "old_bspstore is in the wrong region"; 809 va.p = old_bspstore;
765 goto no_mod; 810 if (va.f.reg < 5) {
766 } 811 msg = "old_bspstore is in the wrong region";
767 if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) { 812 goto no_mod;
768 msg = "inconsistent ar.bsp and r13"; 813 }
769 goto no_mod; 814 if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {
770 } 815 msg = "inconsistent ar.bsp and r13";
771 size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8; 816 goto no_mod;
772 if (ar_bspstore + size > r12) { 817 }
773 msg = "no room for blocked state"; 818 size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
774 goto no_mod; 819 if (ar_bspstore + size > r12) {
820 msg = "no room for blocked state";
821 goto no_mod;
822 }
775 } 823 }
776 824
777 ia64_mca_modify_comm(previous_current); 825 ia64_mca_modify_comm(previous_current);