aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/ipmi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/ipmi')
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c16
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c69
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c18
3 files changed, 17 insertions, 86 deletions
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 23028559dbc4..ad26f4b997c5 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -57,8 +57,7 @@ static int ipmi_init_msghandler(void);
57static int initialized = 0; 57static int initialized = 0;
58 58
59#ifdef CONFIG_PROC_FS 59#ifdef CONFIG_PROC_FS
60struct proc_dir_entry *proc_ipmi_root = NULL; 60static struct proc_dir_entry *proc_ipmi_root = NULL;
61EXPORT_SYMBOL(proc_ipmi_root);
62#endif /* CONFIG_PROC_FS */ 61#endif /* CONFIG_PROC_FS */
63 62
64#define MAX_EVENTS_IN_QUEUE 25 63#define MAX_EVENTS_IN_QUEUE 25
@@ -3674,7 +3673,7 @@ static void send_panic_events(char *str)
3674} 3673}
3675#endif /* CONFIG_IPMI_PANIC_EVENT */ 3674#endif /* CONFIG_IPMI_PANIC_EVENT */
3676 3675
3677static int has_paniced = 0; 3676static int has_panicked = 0;
3678 3677
3679static int panic_event(struct notifier_block *this, 3678static int panic_event(struct notifier_block *this,
3680 unsigned long event, 3679 unsigned long event,
@@ -3683,9 +3682,9 @@ static int panic_event(struct notifier_block *this,
3683 int i; 3682 int i;
3684 ipmi_smi_t intf; 3683 ipmi_smi_t intf;
3685 3684
3686 if (has_paniced) 3685 if (has_panicked)
3687 return NOTIFY_DONE; 3686 return NOTIFY_DONE;
3688 has_paniced = 1; 3687 has_panicked = 1;
3689 3688
3690 /* For every registered interface, set it to run to completion. */ 3689 /* For every registered interface, set it to run to completion. */
3691 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 3690 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
@@ -3739,11 +3738,8 @@ static int ipmi_init_msghandler(void)
3739 proc_ipmi_root->owner = THIS_MODULE; 3738 proc_ipmi_root->owner = THIS_MODULE;
3740#endif /* CONFIG_PROC_FS */ 3739#endif /* CONFIG_PROC_FS */
3741 3740
3742 init_timer(&ipmi_timer); 3741 setup_timer(&ipmi_timer, ipmi_timeout, 0);
3743 ipmi_timer.data = 0; 3742 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3744 ipmi_timer.function = ipmi_timeout;
3745 ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES;
3746 add_timer(&ipmi_timer);
3747 3743
3748 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 3744 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
3749 3745
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 02a7dd7a8a55..bd4f2248b758 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -55,23 +55,6 @@
55#include <linux/mutex.h> 55#include <linux/mutex.h>
56#include <linux/kthread.h> 56#include <linux/kthread.h>
57#include <asm/irq.h> 57#include <asm/irq.h>
58#ifdef CONFIG_HIGH_RES_TIMERS
59#include <linux/hrtime.h>
60# if defined(schedule_next_int)
61/* Old high-res timer code, do translations. */
62# define get_arch_cycles(a) quick_update_jiffies_sub(a)
63# define arch_cycles_per_jiffy cycles_per_jiffies
64# endif
65static inline void add_usec_to_timer(struct timer_list *t, long v)
66{
67 t->arch_cycle_expires += nsec_to_arch_cycle(v * 1000);
68 while (t->arch_cycle_expires >= arch_cycles_per_jiffy)
69 {
70 t->expires++;
71 t->arch_cycle_expires -= arch_cycles_per_jiffy;
72 }
73}
74#endif
75#include <linux/interrupt.h> 58#include <linux/interrupt.h>
76#include <linux/rcupdate.h> 59#include <linux/rcupdate.h>
77#include <linux/ipmi_smi.h> 60#include <linux/ipmi_smi.h>
@@ -243,8 +226,6 @@ static int register_xaction_notifier(struct notifier_block * nb)
243 return atomic_notifier_chain_register(&xaction_notifier_list, nb); 226 return atomic_notifier_chain_register(&xaction_notifier_list, nb);
244} 227}
245 228
246static void si_restart_short_timer(struct smi_info *smi_info);
247
248static void deliver_recv_msg(struct smi_info *smi_info, 229static void deliver_recv_msg(struct smi_info *smi_info,
249 struct ipmi_smi_msg *msg) 230 struct ipmi_smi_msg *msg)
250{ 231{
@@ -768,7 +749,6 @@ static void sender(void *send_info,
768 && (smi_info->curr_msg == NULL)) 749 && (smi_info->curr_msg == NULL))
769 { 750 {
770 start_next_msg(smi_info); 751 start_next_msg(smi_info);
771 si_restart_short_timer(smi_info);
772 } 752 }
773 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 753 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
774} 754}
@@ -809,7 +789,7 @@ static int ipmi_thread(void *data)
809 /* do nothing */ 789 /* do nothing */
810 } 790 }
811 else if (smi_result == SI_SM_CALL_WITH_DELAY) 791 else if (smi_result == SI_SM_CALL_WITH_DELAY)
812 udelay(1); 792 schedule();
813 else 793 else
814 schedule_timeout_interruptible(1); 794 schedule_timeout_interruptible(1);
815 } 795 }
@@ -833,37 +813,6 @@ static void request_events(void *send_info)
833 813
834static int initialized = 0; 814static int initialized = 0;
835 815
836/* Must be called with interrupts off and with the si_lock held. */
837static void si_restart_short_timer(struct smi_info *smi_info)
838{
839#if defined(CONFIG_HIGH_RES_TIMERS)
840 unsigned long flags;
841 unsigned long jiffies_now;
842 unsigned long seq;
843
844 if (del_timer(&(smi_info->si_timer))) {
845 /* If we don't delete the timer, then it will go off
846 immediately, anyway. So we only process if we
847 actually delete the timer. */
848
849 do {
850 seq = read_seqbegin_irqsave(&xtime_lock, flags);
851 jiffies_now = jiffies;
852 smi_info->si_timer.expires = jiffies_now;
853 smi_info->si_timer.arch_cycle_expires
854 = get_arch_cycles(jiffies_now);
855 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
856
857 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
858
859 add_timer(&(smi_info->si_timer));
860 spin_lock_irqsave(&smi_info->count_lock, flags);
861 smi_info->timeout_restarts++;
862 spin_unlock_irqrestore(&smi_info->count_lock, flags);
863 }
864#endif
865}
866
867static void smi_timeout(unsigned long data) 816static void smi_timeout(unsigned long data)
868{ 817{
869 struct smi_info *smi_info = (struct smi_info *) data; 818 struct smi_info *smi_info = (struct smi_info *) data;
@@ -904,31 +853,15 @@ static void smi_timeout(unsigned long data)
904 /* If the state machine asks for a short delay, then shorten 853 /* If the state machine asks for a short delay, then shorten
905 the timer timeout. */ 854 the timer timeout. */
906 if (smi_result == SI_SM_CALL_WITH_DELAY) { 855 if (smi_result == SI_SM_CALL_WITH_DELAY) {
907#if defined(CONFIG_HIGH_RES_TIMERS)
908 unsigned long seq;
909#endif
910 spin_lock_irqsave(&smi_info->count_lock, flags); 856 spin_lock_irqsave(&smi_info->count_lock, flags);
911 smi_info->short_timeouts++; 857 smi_info->short_timeouts++;
912 spin_unlock_irqrestore(&smi_info->count_lock, flags); 858 spin_unlock_irqrestore(&smi_info->count_lock, flags);
913#if defined(CONFIG_HIGH_RES_TIMERS)
914 do {
915 seq = read_seqbegin_irqsave(&xtime_lock, flags);
916 smi_info->si_timer.expires = jiffies;
917 smi_info->si_timer.arch_cycle_expires
918 = get_arch_cycles(smi_info->si_timer.expires);
919 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
920 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
921#else
922 smi_info->si_timer.expires = jiffies + 1; 859 smi_info->si_timer.expires = jiffies + 1;
923#endif
924 } else { 860 } else {
925 spin_lock_irqsave(&smi_info->count_lock, flags); 861 spin_lock_irqsave(&smi_info->count_lock, flags);
926 smi_info->long_timeouts++; 862 smi_info->long_timeouts++;
927 spin_unlock_irqrestore(&smi_info->count_lock, flags); 863 spin_unlock_irqrestore(&smi_info->count_lock, flags);
928 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 864 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
929#if defined(CONFIG_HIGH_RES_TIMERS)
930 smi_info->si_timer.arch_cycle_expires = 0;
931#endif
932 } 865 }
933 866
934 do_add_timer: 867 do_add_timer:
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 8f8867170973..1a0a19c53605 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -949,9 +949,10 @@ static int wdog_reboot_handler(struct notifier_block *this,
949 /* Disable the WDT if we are shutting down. */ 949 /* Disable the WDT if we are shutting down. */
950 ipmi_watchdog_state = WDOG_TIMEOUT_NONE; 950 ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
951 panic_halt_ipmi_set_timeout(); 951 panic_halt_ipmi_set_timeout();
952 } else { 952 } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
953 /* Set a long timer to let the reboot happens, but 953 /* Set a long timer to let the reboot happens, but
954 reboot if it hangs. */ 954 reboot if it hangs, but only if the watchdog
955 timer was already running. */
955 timeout = 120; 956 timeout = 120;
956 pretimeout = 0; 957 pretimeout = 0;
957 ipmi_watchdog_state = WDOG_TIMEOUT_RESET; 958 ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
@@ -973,16 +974,17 @@ static int wdog_panic_handler(struct notifier_block *this,
973{ 974{
974 static int panic_event_handled = 0; 975 static int panic_event_handled = 0;
975 976
976 /* On a panic, if we have a panic timeout, make sure that the thing 977 /* On a panic, if we have a panic timeout, make sure to extend
977 reboots, even if it hangs during that panic. */ 978 the watchdog timer to a reasonable value to complete the
978 if (watchdog_user && !panic_event_handled) { 979 panic, if the watchdog timer is running. Plus the
979 /* Make sure the panic doesn't hang, and make sure we 980 pretimeout is meaningless at panic time. */
980 do this only once. */ 981 if (watchdog_user && !panic_event_handled &&
982 ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
983 /* Make sure we do this only once. */
981 panic_event_handled = 1; 984 panic_event_handled = 1;
982 985
983 timeout = 255; 986 timeout = 255;
984 pretimeout = 0; 987 pretimeout = 0;
985 ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
986 panic_halt_ipmi_set_timeout(); 988 panic_halt_ipmi_set_timeout();
987 } 989 }
988 990