aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>2008-04-17 04:00:37 -0400
committerTony Luck <tony.luck@intel.com>2008-04-22 11:56:38 -0400
commit4fa2f0e672ba16b55a34ecfa514ccd92e226d3d4 (patch)
treec52bc8c86ac909c281e0362e8ff29f54ea4ca6ed
parent284e54279597e0933d785580a43be0b0194dfa00 (diff)
[IA64] simplify notify hooks in mca.c
There are many notify_die() and almost all take same style with ia64_mca_spin(). This patch defines macros and replace them all, to reduce lines and to improve readability. Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--arch/ia64/kernel/crash.c4
-rw-r--r--arch/ia64/kernel/mca.c74
-rw-r--r--include/asm-ia64/mca.h1
3 files changed, 38 insertions, 41 deletions
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index 90ef338cf46f..f065093f8e9b 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -194,8 +194,8 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
194 unw_init_running(kdump_cpu_freeze, NULL); 194 unw_init_running(kdump_cpu_freeze, NULL);
195 break; 195 break;
196 case DIE_MCA_MONARCH_LEAVE: 196 case DIE_MCA_MONARCH_LEAVE:
197 /* die_register->signr indicate if MCA is recoverable */ 197 /* *(nd->data) indicate if MCA is recoverable */
198 if (kdump_on_fatal_mca && !args->signr) { 198 if (kdump_on_fatal_mca && !(*(nd->data))) {
199 atomic_set(&kdump_in_progress, 1); 199 atomic_set(&kdump_in_progress, 1);
200 *(nd->monarch_cpu) = -1; 200 *(nd->monarch_cpu) = -1;
201 machine_kdump_on_init(); 201 machine_kdump_on_init();
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 3ba091bb20c9..705176b434b3 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -109,6 +109,20 @@
109# define IA64_MCA_DEBUG(fmt...) 109# define IA64_MCA_DEBUG(fmt...)
110#endif 110#endif
111 111
112#define NOTIFY_INIT(event, regs, arg, spin) \
113do { \
114 if ((notify_die((event), "INIT", (regs), (arg), 0, 0) \
115 == NOTIFY_STOP) && ((spin) == 1)) \
116 ia64_mca_spin(__func__); \
117} while (0)
118
119#define NOTIFY_MCA(event, regs, arg, spin) \
120do { \
121 if ((notify_die((event), "MCA", (regs), (arg), 0, 0) \
122 == NOTIFY_STOP) && ((spin) == 1)) \
123 ia64_mca_spin(__func__); \
124} while (0)
125
112/* Used by mca_asm.S */ 126/* Used by mca_asm.S */
113DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ 127DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
114DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ 128DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
@@ -766,9 +780,8 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
766 780
767 /* Mask all interrupts */ 781 /* Mask all interrupts */
768 local_irq_save(flags); 782 local_irq_save(flags);
769 if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(), 783
770 (long)&nd, 0, 0) == NOTIFY_STOP) 784 NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1);
771 ia64_mca_spin(__func__);
772 785
773 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; 786 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
774 /* Register with the SAL monarch that the slave has 787 /* Register with the SAL monarch that the slave has
@@ -776,17 +789,13 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
776 */ 789 */
777 ia64_sal_mc_rendez(); 790 ia64_sal_mc_rendez();
778 791
779 if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(), 792 NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1);
780 (long)&nd, 0, 0) == NOTIFY_STOP)
781 ia64_mca_spin(__func__);
782 793
783 /* Wait for the monarch cpu to exit. */ 794 /* Wait for the monarch cpu to exit. */
784 while (monarch_cpu != -1) 795 while (monarch_cpu != -1)
785 cpu_relax(); /* spin until monarch leaves */ 796 cpu_relax(); /* spin until monarch leaves */
786 797
787 if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(), 798 NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1);
788 (long)&nd, 0, 0) == NOTIFY_STOP)
789 ia64_mca_spin(__func__);
790 799
791 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 800 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
792 /* Enable all interrupts */ 801 /* Enable all interrupts */
@@ -1256,7 +1265,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1256 int recover, cpu = smp_processor_id(); 1265 int recover, cpu = smp_processor_id();
1257 struct task_struct *previous_current; 1266 struct task_struct *previous_current;
1258 struct ia64_mca_notify_die nd = 1267 struct ia64_mca_notify_die nd =
1259 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1268 { .sos = sos, .monarch_cpu = &monarch_cpu, .data = &recover };
1260 static atomic_t mca_count; 1269 static atomic_t mca_count;
1261 static cpumask_t mca_cpu; 1270 static cpumask_t mca_cpu;
1262 1271
@@ -1272,9 +1281,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1272 1281
1273 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); 1282 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
1274 1283
1275 if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) 1284 NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1);
1276 == NOTIFY_STOP)
1277 ia64_mca_spin(__func__);
1278 1285
1279 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; 1286 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
1280 if (sos->monarch) { 1287 if (sos->monarch) {
@@ -1293,9 +1300,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1293 cpu_relax(); /* spin until monarch wakes us */ 1300 cpu_relax(); /* spin until monarch wakes us */
1294 } 1301 }
1295 1302
1296 if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0) 1303 NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1);
1297 == NOTIFY_STOP)
1298 ia64_mca_spin(__func__);
1299 1304
1300 /* Get the MCA error record and log it */ 1305 /* Get the MCA error record and log it */
1301 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); 1306 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
@@ -1321,9 +1326,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1321 mca_insert_tr(0x2); /*Reload dynamic itrs*/ 1326 mca_insert_tr(0x2); /*Reload dynamic itrs*/
1322 } 1327 }
1323 1328
1324 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) 1329 NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1);
1325 == NOTIFY_STOP)
1326 ia64_mca_spin(__func__);
1327 1330
1328 if (atomic_dec_return(&mca_count) > 0) { 1331 if (atomic_dec_return(&mca_count) > 0) {
1329 int i; 1332 int i;
@@ -1644,7 +1647,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1644 struct ia64_mca_notify_die nd = 1647 struct ia64_mca_notify_die nd =
1645 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1648 { .sos = sos, .monarch_cpu = &monarch_cpu };
1646 1649
1647 (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0); 1650 NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0);
1648 1651
1649 mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", 1652 mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
1650 sos->proc_state_param, cpu, sos->monarch); 1653 sos->proc_state_param, cpu, sos->monarch);
@@ -1681,17 +1684,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1681 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; 1684 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
1682 while (monarch_cpu == -1) 1685 while (monarch_cpu == -1)
1683 cpu_relax(); /* spin until monarch enters */ 1686 cpu_relax(); /* spin until monarch enters */
1684 if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0) 1687
1685 == NOTIFY_STOP) 1688 NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
1686 ia64_mca_spin(__func__); 1689 NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);
1687 if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0) 1690
1688 == NOTIFY_STOP)
1689 ia64_mca_spin(__func__);
1690 while (monarch_cpu != -1) 1691 while (monarch_cpu != -1)
1691 cpu_relax(); /* spin until monarch leaves */ 1692 cpu_relax(); /* spin until monarch leaves */
1692 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0) 1693
1693 == NOTIFY_STOP) 1694 NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);
1694 ia64_mca_spin(__func__); 1695
1695 mprintk("Slave on cpu %d returning to normal service.\n", cpu); 1696 mprintk("Slave on cpu %d returning to normal service.\n", cpu);
1696 set_curr_task(cpu, previous_current); 1697 set_curr_task(cpu, previous_current);
1697 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1698 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
@@ -1700,9 +1701,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1700 } 1701 }
1701 1702
1702 monarch_cpu = cpu; 1703 monarch_cpu = cpu;
1703 if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0) 1704 NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1);
1704 == NOTIFY_STOP)
1705 ia64_mca_spin(__func__);
1706 1705
1707 /* 1706 /*
1708 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be 1707 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
@@ -1717,12 +1716,9 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1717 * to default_monarch_init_process() above and just print all the 1716 * to default_monarch_init_process() above and just print all the
1718 * tasks. 1717 * tasks.
1719 */ 1718 */
1720 if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0) 1719 NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1);
1721 == NOTIFY_STOP) 1720 NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1);
1722 ia64_mca_spin(__func__); 1721
1723 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
1724 == NOTIFY_STOP)
1725 ia64_mca_spin(__func__);
1726 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); 1722 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
1727 atomic_dec(&monarchs); 1723 atomic_dec(&monarchs);
1728 set_curr_task(cpu, previous_current); 1724 set_curr_task(cpu, previous_current);
@@ -1954,7 +1950,7 @@ ia64_mca_init(void)
1954 printk(KERN_INFO "Increasing MCA rendezvous timeout from " 1950 printk(KERN_INFO "Increasing MCA rendezvous timeout from "
1955 "%ld to %ld milliseconds\n", timeout, isrv.v0); 1951 "%ld to %ld milliseconds\n", timeout, isrv.v0);
1956 timeout = isrv.v0; 1952 timeout = isrv.v0;
1957 (void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0); 1953 NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0);
1958 continue; 1954 continue;
1959 } 1955 }
1960 printk(KERN_ERR "Failed to register rendezvous interrupt " 1956 printk(KERN_ERR "Failed to register rendezvous interrupt "
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h
index f1663aa94a52..18a4321349a3 100644
--- a/include/asm-ia64/mca.h
+++ b/include/asm-ia64/mca.h
@@ -157,6 +157,7 @@ extern void ia64_mca_printk(const char * fmt, ...)
157struct ia64_mca_notify_die { 157struct ia64_mca_notify_die {
158 struct ia64_sal_os_state *sos; 158 struct ia64_sal_os_state *sos;
159 int *monarch_cpu; 159 int *monarch_cpu;
160 int *data;
160}; 161};
161 162
162DECLARE_PER_CPU(u64, ia64_mca_pal_base); 163DECLARE_PER_CPU(u64, ia64_mca_pal_base);