aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel/mca.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 12:44:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 12:44:55 -0400
commit4786b4ee22de6304e841b12ee22b849230d7fba3 (patch)
tree08793b8fbcd63204d5d3355ac755745adcfef170 /arch/ia64/kernel/mca.c
parent253ba4e79edc695b2925bd2ef34de06ff4d4070c (diff)
parent71b264f85ff50c14fe945ffff06ae0d5e9a9124e (diff)
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: (27 commits) [IA64] kdump: Add crash_save_vmcoreinfo for INIT [IA64] Fix NUMA configuration issue [IA64] Itanium Spec updates [IA64] Untangle sync_icache_dcache() page size determination [IA64] arch/ia64/kernel/: use time_* macros [IA64] remove redundant display of free swap space in show_mem() [IA64] make IOMMU respect the segment boundary limits [IA64] kprobes: kprobe-booster for ia64 [IA64] fix getpid and set_tid_address fast system calls for pid namespaces [IA64] Replace explicit jiffies tests with time_* macros. [IA64] use goto to jump out do/while_each_thread [IA64] Fix unlock ordering in smp_callin [IA64] pgd_offset() constfication. [IA64] kdump: crash.c coding style fix [IA64] kdump: add kdump_on_fatal_mca [IA64] Minimize per_cpu reservations. [IA64] Correct pernodesize calculation. [IA64] Kernel parameter for max number of concurrent global TLB purges [IA64] Multiple outstanding ptc.g instruction support [IA64] Implement smp_call_function_mask for ia64 ...
Diffstat (limited to 'arch/ia64/kernel/mca.c')
-rw-r--r--arch/ia64/kernel/mca.c60
1 files changed, 54 insertions, 6 deletions
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6c18221dba36..e51bced3b0fa 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -69,6 +69,7 @@
69 * 2007-04-27 Russ Anderson <rja@sgi.com> 69 * 2007-04-27 Russ Anderson <rja@sgi.com>
70 * Support multiple cpus going through OS_MCA in the same event. 70 * Support multiple cpus going through OS_MCA in the same event.
71 */ 71 */
72#include <linux/jiffies.h>
72#include <linux/types.h> 73#include <linux/types.h>
73#include <linux/init.h> 74#include <linux/init.h>
74#include <linux/sched.h> 75#include <linux/sched.h>
@@ -97,6 +98,7 @@
97 98
98#include <asm/irq.h> 99#include <asm/irq.h>
99#include <asm/hw_irq.h> 100#include <asm/hw_irq.h>
101#include <asm/tlb.h>
100 102
101#include "mca_drv.h" 103#include "mca_drv.h"
102#include "entry.h" 104#include "entry.h"
@@ -112,6 +114,7 @@ DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
112DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ 114DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
113DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ 115DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
114DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */ 116DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */
117DEFINE_PER_CPU(u64, ia64_mca_tr_reload); /* Flag for TR reload */
115 118
116unsigned long __per_cpu_mca[NR_CPUS]; 119unsigned long __per_cpu_mca[NR_CPUS];
117 120
@@ -293,7 +296,8 @@ static void ia64_mlogbuf_dump_from_init(void)
293 if (mlogbuf_finished) 296 if (mlogbuf_finished)
294 return; 297 return;
295 298
296 if (mlogbuf_timestamp && (mlogbuf_timestamp + 30*HZ > jiffies)) { 299 if (mlogbuf_timestamp &&
300 time_before(jiffies, mlogbuf_timestamp + 30 * HZ)) {
297 printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT " 301 printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT "
298 " and the system seems to be messed up.\n"); 302 " and the system seems to be messed up.\n");
299 ia64_mlogbuf_finish(0); 303 ia64_mlogbuf_finish(0);
@@ -1182,6 +1186,49 @@ all_in:
1182 return; 1186 return;
1183} 1187}
1184 1188
1189/* mca_insert_tr
1190 *
1191 * Switch rid when TR reload and needed!
1192 * iord: 1: itr, 2: itr;
1193 *
1194*/
1195static void mca_insert_tr(u64 iord)
1196{
1197
1198 int i;
1199 u64 old_rr;
1200 struct ia64_tr_entry *p;
1201 unsigned long psr;
1202 int cpu = smp_processor_id();
1203
1204 psr = ia64_clear_ic();
1205 for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
1206 p = &__per_cpu_idtrs[cpu][iord-1][i];
1207 if (p->pte & 0x1) {
1208 old_rr = ia64_get_rr(p->ifa);
1209 if (old_rr != p->rr) {
1210 ia64_set_rr(p->ifa, p->rr);
1211 ia64_srlz_d();
1212 }
1213 ia64_ptr(iord, p->ifa, p->itir >> 2);
1214 ia64_srlz_i();
1215 if (iord & 0x1) {
1216 ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2);
1217 ia64_srlz_i();
1218 }
1219 if (iord & 0x2) {
1220 ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2);
1221 ia64_srlz_i();
1222 }
1223 if (old_rr != p->rr) {
1224 ia64_set_rr(p->ifa, old_rr);
1225 ia64_srlz_d();
1226 }
1227 }
1228 }
1229 ia64_set_psr(psr);
1230}
1231
1185/* 1232/*
1186 * ia64_mca_handler 1233 * ia64_mca_handler
1187 * 1234 *
@@ -1266,16 +1313,17 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1266 } else { 1313 } else {
1267 /* Dump buffered message to console */ 1314 /* Dump buffered message to console */
1268 ia64_mlogbuf_finish(1); 1315 ia64_mlogbuf_finish(1);
1269#ifdef CONFIG_KEXEC
1270 atomic_set(&kdump_in_progress, 1);
1271 monarch_cpu = -1;
1272#endif
1273 } 1316 }
1317
1318 if (__get_cpu_var(ia64_mca_tr_reload)) {
1319 mca_insert_tr(0x1); /*Reload dynamic itrs*/
1320 mca_insert_tr(0x2); /*Reload dynamic itrs*/
1321 }
1322
1274 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) 1323 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
1275 == NOTIFY_STOP) 1324 == NOTIFY_STOP)
1276 ia64_mca_spin(__func__); 1325 ia64_mca_spin(__func__);
1277 1326
1278
1279 if (atomic_dec_return(&mca_count) > 0) { 1327 if (atomic_dec_return(&mca_count) > 0) {
1280 int i; 1328 int i;
1281 1329