aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel/mca.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-03-18 02:44:31 -0400
committerDavid S. Miller <davem@davemloft.net>2008-03-18 02:44:31 -0400
commit2f633928cbba8a5858bb39b11e7219a41b0fbef5 (patch)
tree9a82f4b7f2c3afe4b0208d8e44ea61bae90a7d22 /arch/ia64/kernel/mca.c
parent5e226e4d9016daee170699f8a4188a5505021756 (diff)
parentbde4f8fa8db2abd5ac9c542d76012d0fedab050f (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/ia64/kernel/mca.c')
-rw-r--r--arch/ia64/kernel/mca.c73
1 files changed, 35 insertions, 38 deletions
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6e17aed53135..6c18221dba36 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -413,8 +413,8 @@ ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
413 IA64_LOG_INDEX_INC(sal_info_type); 413 IA64_LOG_INDEX_INC(sal_info_type);
414 IA64_LOG_UNLOCK(sal_info_type); 414 IA64_LOG_UNLOCK(sal_info_type);
415 if (irq_safe) { 415 if (irq_safe) {
416 IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. " 416 IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. Record length = %ld\n",
417 "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len); 417 __func__, sal_info_type, total_len);
418 } 418 }
419 *buffer = (u8 *) log_buffer; 419 *buffer = (u8 *) log_buffer;
420 return total_len; 420 return total_len;
@@ -518,7 +518,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg)
518 static DEFINE_SPINLOCK(cpe_history_lock); 518 static DEFINE_SPINLOCK(cpe_history_lock);
519 519
520 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", 520 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
521 __FUNCTION__, cpe_irq, smp_processor_id()); 521 __func__, cpe_irq, smp_processor_id());
522 522
523 /* SAL spec states this should run w/ interrupts enabled */ 523 /* SAL spec states this should run w/ interrupts enabled */
524 local_irq_enable(); 524 local_irq_enable();
@@ -594,7 +594,7 @@ ia64_mca_register_cpev (int cpev)
594 } 594 }
595 595
596 IA64_MCA_DEBUG("%s: corrected platform error " 596 IA64_MCA_DEBUG("%s: corrected platform error "
597 "vector %#x registered\n", __FUNCTION__, cpev); 597 "vector %#x registered\n", __func__, cpev);
598} 598}
599#endif /* CONFIG_ACPI */ 599#endif /* CONFIG_ACPI */
600 600
@@ -621,12 +621,11 @@ ia64_mca_cmc_vector_setup (void)
621 cmcv.cmcv_vector = IA64_CMC_VECTOR; 621 cmcv.cmcv_vector = IA64_CMC_VECTOR;
622 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 622 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
623 623
624 IA64_MCA_DEBUG("%s: CPU %d corrected " 624 IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x registered.\n",
625 "machine check vector %#x registered.\n", 625 __func__, smp_processor_id(), IA64_CMC_VECTOR);
626 __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
627 626
628 IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n", 627 IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
629 __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV)); 628 __func__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
630} 629}
631 630
632/* 631/*
@@ -651,9 +650,8 @@ ia64_mca_cmc_vector_disable (void *dummy)
651 cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ 650 cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
652 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 651 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
653 652
654 IA64_MCA_DEBUG("%s: CPU %d corrected " 653 IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x disabled.\n",
655 "machine check vector %#x disabled.\n", 654 __func__, smp_processor_id(), cmcv.cmcv_vector);
656 __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
657} 655}
658 656
659/* 657/*
@@ -678,9 +676,8 @@ ia64_mca_cmc_vector_enable (void *dummy)
678 cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ 676 cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
679 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 677 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
680 678
681 IA64_MCA_DEBUG("%s: CPU %d corrected " 679 IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x enabled.\n",
682 "machine check vector %#x enabled.\n", 680 __func__, smp_processor_id(), cmcv.cmcv_vector);
683 __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
684} 681}
685 682
686/* 683/*
@@ -767,7 +764,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
767 local_irq_save(flags); 764 local_irq_save(flags);
768 if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(), 765 if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(),
769 (long)&nd, 0, 0) == NOTIFY_STOP) 766 (long)&nd, 0, 0) == NOTIFY_STOP)
770 ia64_mca_spin(__FUNCTION__); 767 ia64_mca_spin(__func__);
771 768
772 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; 769 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
773 /* Register with the SAL monarch that the slave has 770 /* Register with the SAL monarch that the slave has
@@ -777,7 +774,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
777 774
778 if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(), 775 if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(),
779 (long)&nd, 0, 0) == NOTIFY_STOP) 776 (long)&nd, 0, 0) == NOTIFY_STOP)
780 ia64_mca_spin(__FUNCTION__); 777 ia64_mca_spin(__func__);
781 778
782 /* Wait for the monarch cpu to exit. */ 779 /* Wait for the monarch cpu to exit. */
783 while (monarch_cpu != -1) 780 while (monarch_cpu != -1)
@@ -785,7 +782,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
785 782
786 if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(), 783 if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(),
787 (long)&nd, 0, 0) == NOTIFY_STOP) 784 (long)&nd, 0, 0) == NOTIFY_STOP)
788 ia64_mca_spin(__FUNCTION__); 785 ia64_mca_spin(__func__);
789 786
790 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 787 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
791 /* Enable all interrupts */ 788 /* Enable all interrupts */
@@ -1230,7 +1227,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1230 1227
1231 if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) 1228 if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
1232 == NOTIFY_STOP) 1229 == NOTIFY_STOP)
1233 ia64_mca_spin(__FUNCTION__); 1230 ia64_mca_spin(__func__);
1234 1231
1235 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; 1232 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
1236 if (sos->monarch) { 1233 if (sos->monarch) {
@@ -1246,7 +1243,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1246 ia64_mca_wakeup_all(); 1243 ia64_mca_wakeup_all();
1247 if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0) 1244 if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
1248 == NOTIFY_STOP) 1245 == NOTIFY_STOP)
1249 ia64_mca_spin(__FUNCTION__); 1246 ia64_mca_spin(__func__);
1250 } else { 1247 } else {
1251 while (cpu_isset(cpu, mca_cpu)) 1248 while (cpu_isset(cpu, mca_cpu))
1252 cpu_relax(); /* spin until monarch wakes us */ 1249 cpu_relax(); /* spin until monarch wakes us */
@@ -1276,7 +1273,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1276 } 1273 }
1277 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) 1274 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
1278 == NOTIFY_STOP) 1275 == NOTIFY_STOP)
1279 ia64_mca_spin(__FUNCTION__); 1276 ia64_mca_spin(__func__);
1280 1277
1281 1278
1282 if (atomic_dec_return(&mca_count) > 0) { 1279 if (atomic_dec_return(&mca_count) > 0) {
@@ -1328,7 +1325,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg)
1328 static DEFINE_SPINLOCK(cmc_history_lock); 1325 static DEFINE_SPINLOCK(cmc_history_lock);
1329 1326
1330 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", 1327 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
1331 __FUNCTION__, cmc_irq, smp_processor_id()); 1328 __func__, cmc_irq, smp_processor_id());
1332 1329
1333 /* SAL spec states this should run w/ interrupts enabled */ 1330 /* SAL spec states this should run w/ interrupts enabled */
1334 local_irq_enable(); 1331 local_irq_enable();
@@ -1614,7 +1611,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1614 */ 1611 */
1615 if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { 1612 if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) {
1616 mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", 1613 mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n",
1617 __FUNCTION__, cpu); 1614 __func__, cpu);
1618 atomic_dec(&slaves); 1615 atomic_dec(&slaves);
1619 sos->monarch = 1; 1616 sos->monarch = 1;
1620 } 1617 }
@@ -1626,7 +1623,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1626 */ 1623 */
1627 if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { 1624 if (sos->monarch && atomic_add_return(1, &monarchs) > 1) {
1628 mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", 1625 mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n",
1629 __FUNCTION__, cpu); 1626 __func__, cpu);
1630 atomic_dec(&monarchs); 1627 atomic_dec(&monarchs);
1631 sos->monarch = 0; 1628 sos->monarch = 0;
1632 } 1629 }
@@ -1637,15 +1634,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1637 cpu_relax(); /* spin until monarch enters */ 1634 cpu_relax(); /* spin until monarch enters */
1638 if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0) 1635 if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0)
1639 == NOTIFY_STOP) 1636 == NOTIFY_STOP)
1640 ia64_mca_spin(__FUNCTION__); 1637 ia64_mca_spin(__func__);
1641 if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0) 1638 if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0)
1642 == NOTIFY_STOP) 1639 == NOTIFY_STOP)
1643 ia64_mca_spin(__FUNCTION__); 1640 ia64_mca_spin(__func__);
1644 while (monarch_cpu != -1) 1641 while (monarch_cpu != -1)
1645 cpu_relax(); /* spin until monarch leaves */ 1642 cpu_relax(); /* spin until monarch leaves */
1646 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0) 1643 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
1647 == NOTIFY_STOP) 1644 == NOTIFY_STOP)
1648 ia64_mca_spin(__FUNCTION__); 1645 ia64_mca_spin(__func__);
1649 mprintk("Slave on cpu %d returning to normal service.\n", cpu); 1646 mprintk("Slave on cpu %d returning to normal service.\n", cpu);
1650 set_curr_task(cpu, previous_current); 1647 set_curr_task(cpu, previous_current);
1651 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1648 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
@@ -1656,7 +1653,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1656 monarch_cpu = cpu; 1653 monarch_cpu = cpu;
1657 if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0) 1654 if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0)
1658 == NOTIFY_STOP) 1655 == NOTIFY_STOP)
1659 ia64_mca_spin(__FUNCTION__); 1656 ia64_mca_spin(__func__);
1660 1657
1661 /* 1658 /*
1662 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be 1659 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
@@ -1673,10 +1670,10 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1673 */ 1670 */
1674 if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0) 1671 if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0)
1675 == NOTIFY_STOP) 1672 == NOTIFY_STOP)
1676 ia64_mca_spin(__FUNCTION__); 1673 ia64_mca_spin(__func__);
1677 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0) 1674 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
1678 == NOTIFY_STOP) 1675 == NOTIFY_STOP)
1679 ia64_mca_spin(__FUNCTION__); 1676 ia64_mca_spin(__func__);
1680 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); 1677 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
1681 atomic_dec(&monarchs); 1678 atomic_dec(&monarchs);
1682 set_curr_task(cpu, previous_current); 1679 set_curr_task(cpu, previous_current);
@@ -1884,7 +1881,7 @@ ia64_mca_init(void)
1884 .priority = 0/* we need to notified last */ 1881 .priority = 0/* we need to notified last */
1885 }; 1882 };
1886 1883
1887 IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__); 1884 IA64_MCA_DEBUG("%s: begin\n", __func__);
1888 1885
1889 /* Clear the Rendez checkin flag for all cpus */ 1886 /* Clear the Rendez checkin flag for all cpus */
1890 for(i = 0 ; i < NR_CPUS; i++) 1887 for(i = 0 ; i < NR_CPUS; i++)
@@ -1928,7 +1925,7 @@ ia64_mca_init(void)
1928 return; 1925 return;
1929 } 1926 }
1930 1927
1931 IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__); 1928 IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __func__);
1932 1929
1933 ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp); 1930 ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
1934 /* 1931 /*
@@ -1949,7 +1946,7 @@ ia64_mca_init(void)
1949 return; 1946 return;
1950 } 1947 }
1951 1948
1952 IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__, 1949 IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __func__,
1953 ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp)); 1950 ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
1954 1951
1955 /* 1952 /*
@@ -1961,7 +1958,7 @@ ia64_mca_init(void)
1961 ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp); 1958 ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp);
1962 ia64_mc_info.imi_slave_init_handler_size = 0; 1959 ia64_mc_info.imi_slave_init_handler_size = 0;
1963 1960
1964 IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__, 1961 IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __func__,
1965 ia64_mc_info.imi_monarch_init_handler); 1962 ia64_mc_info.imi_monarch_init_handler);
1966 1963
1967 /* Register the os init handler with SAL */ 1964 /* Register the os init handler with SAL */
@@ -1982,7 +1979,7 @@ ia64_mca_init(void)
1982 return; 1979 return;
1983 } 1980 }
1984 1981
1985 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__); 1982 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__);
1986 1983
1987 /* 1984 /*
1988 * Configure the CMCI/P vector and handler. Interrupts for CMC are 1985 * Configure the CMCI/P vector and handler. Interrupts for CMC are
@@ -2042,7 +2039,7 @@ ia64_mca_late_init(void)
2042 cmc_polling_enabled = 0; 2039 cmc_polling_enabled = 0;
2043 schedule_work(&cmc_enable_work); 2040 schedule_work(&cmc_enable_work);
2044 2041
2045 IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__); 2042 IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__);
2046 2043
2047#ifdef CONFIG_ACPI 2044#ifdef CONFIG_ACPI
2048 /* Setup the CPEI/P vector and handler */ 2045 /* Setup the CPEI/P vector and handler */
@@ -2065,17 +2062,17 @@ ia64_mca_late_init(void)
2065 ia64_cpe_irq = irq; 2062 ia64_cpe_irq = irq;
2066 ia64_mca_register_cpev(cpe_vector); 2063 ia64_mca_register_cpev(cpe_vector);
2067 IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", 2064 IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n",
2068 __FUNCTION__); 2065 __func__);
2069 return 0; 2066 return 0;
2070 } 2067 }
2071 printk(KERN_ERR "%s: Failed to find irq for CPE " 2068 printk(KERN_ERR "%s: Failed to find irq for CPE "
2072 "interrupt handler, vector %d\n", 2069 "interrupt handler, vector %d\n",
2073 __FUNCTION__, cpe_vector); 2070 __func__, cpe_vector);
2074 } 2071 }
2075 /* If platform doesn't support CPEI, get the timer going. */ 2072 /* If platform doesn't support CPEI, get the timer going. */
2076 if (cpe_poll_enabled) { 2073 if (cpe_poll_enabled) {
2077 ia64_mca_cpe_poll(0UL); 2074 ia64_mca_cpe_poll(0UL);
2078 IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__); 2075 IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__);
2079 } 2076 }
2080 } 2077 }
2081#endif 2078#endif