diff options
Diffstat (limited to 'drivers/char/ipmi/ipmi_si_intf.c')
-rw-r--r-- | drivers/char/ipmi/ipmi_si_intf.c | 74 |
1 files changed, 3 insertions, 71 deletions
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 02a7dd7a8a55..f57eba0bf253 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -38,7 +38,6 @@ | |||
38 | * and drives the real SMI state machine. | 38 | * and drives the real SMI state machine. |
39 | */ | 39 | */ |
40 | 40 | ||
41 | #include <linux/config.h> | ||
42 | #include <linux/module.h> | 41 | #include <linux/module.h> |
43 | #include <linux/moduleparam.h> | 42 | #include <linux/moduleparam.h> |
44 | #include <asm/system.h> | 43 | #include <asm/system.h> |
@@ -55,23 +54,6 @@ | |||
55 | #include <linux/mutex.h> | 54 | #include <linux/mutex.h> |
56 | #include <linux/kthread.h> | 55 | #include <linux/kthread.h> |
57 | #include <asm/irq.h> | 56 | #include <asm/irq.h> |
58 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
59 | #include <linux/hrtime.h> | ||
60 | # if defined(schedule_next_int) | ||
61 | /* Old high-res timer code, do translations. */ | ||
62 | # define get_arch_cycles(a) quick_update_jiffies_sub(a) | ||
63 | # define arch_cycles_per_jiffy cycles_per_jiffies | ||
64 | # endif | ||
65 | static inline void add_usec_to_timer(struct timer_list *t, long v) | ||
66 | { | ||
67 | t->arch_cycle_expires += nsec_to_arch_cycle(v * 1000); | ||
68 | while (t->arch_cycle_expires >= arch_cycles_per_jiffy) | ||
69 | { | ||
70 | t->expires++; | ||
71 | t->arch_cycle_expires -= arch_cycles_per_jiffy; | ||
72 | } | ||
73 | } | ||
74 | #endif | ||
75 | #include <linux/interrupt.h> | 57 | #include <linux/interrupt.h> |
76 | #include <linux/rcupdate.h> | 58 | #include <linux/rcupdate.h> |
77 | #include <linux/ipmi_smi.h> | 59 | #include <linux/ipmi_smi.h> |
@@ -243,8 +225,6 @@ static int register_xaction_notifier(struct notifier_block * nb) | |||
243 | return atomic_notifier_chain_register(&xaction_notifier_list, nb); | 225 | return atomic_notifier_chain_register(&xaction_notifier_list, nb); |
244 | } | 226 | } |
245 | 227 | ||
246 | static void si_restart_short_timer(struct smi_info *smi_info); | ||
247 | |||
248 | static void deliver_recv_msg(struct smi_info *smi_info, | 228 | static void deliver_recv_msg(struct smi_info *smi_info, |
249 | struct ipmi_smi_msg *msg) | 229 | struct ipmi_smi_msg *msg) |
250 | { | 230 | { |
@@ -768,7 +748,6 @@ static void sender(void *send_info, | |||
768 | && (smi_info->curr_msg == NULL)) | 748 | && (smi_info->curr_msg == NULL)) |
769 | { | 749 | { |
770 | start_next_msg(smi_info); | 750 | start_next_msg(smi_info); |
771 | si_restart_short_timer(smi_info); | ||
772 | } | 751 | } |
773 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); | 752 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); |
774 | } | 753 | } |
@@ -809,7 +788,7 @@ static int ipmi_thread(void *data) | |||
809 | /* do nothing */ | 788 | /* do nothing */ |
810 | } | 789 | } |
811 | else if (smi_result == SI_SM_CALL_WITH_DELAY) | 790 | else if (smi_result == SI_SM_CALL_WITH_DELAY) |
812 | udelay(1); | 791 | schedule(); |
813 | else | 792 | else |
814 | schedule_timeout_interruptible(1); | 793 | schedule_timeout_interruptible(1); |
815 | } | 794 | } |
@@ -833,37 +812,6 @@ static void request_events(void *send_info) | |||
833 | 812 | ||
834 | static int initialized = 0; | 813 | static int initialized = 0; |
835 | 814 | ||
836 | /* Must be called with interrupts off and with the si_lock held. */ | ||
837 | static void si_restart_short_timer(struct smi_info *smi_info) | ||
838 | { | ||
839 | #if defined(CONFIG_HIGH_RES_TIMERS) | ||
840 | unsigned long flags; | ||
841 | unsigned long jiffies_now; | ||
842 | unsigned long seq; | ||
843 | |||
844 | if (del_timer(&(smi_info->si_timer))) { | ||
845 | /* If we don't delete the timer, then it will go off | ||
846 | immediately, anyway. So we only process if we | ||
847 | actually delete the timer. */ | ||
848 | |||
849 | do { | ||
850 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | ||
851 | jiffies_now = jiffies; | ||
852 | smi_info->si_timer.expires = jiffies_now; | ||
853 | smi_info->si_timer.arch_cycle_expires | ||
854 | = get_arch_cycles(jiffies_now); | ||
855 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | ||
856 | |||
857 | add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC); | ||
858 | |||
859 | add_timer(&(smi_info->si_timer)); | ||
860 | spin_lock_irqsave(&smi_info->count_lock, flags); | ||
861 | smi_info->timeout_restarts++; | ||
862 | spin_unlock_irqrestore(&smi_info->count_lock, flags); | ||
863 | } | ||
864 | #endif | ||
865 | } | ||
866 | |||
867 | static void smi_timeout(unsigned long data) | 815 | static void smi_timeout(unsigned long data) |
868 | { | 816 | { |
869 | struct smi_info *smi_info = (struct smi_info *) data; | 817 | struct smi_info *smi_info = (struct smi_info *) data; |
@@ -904,31 +852,15 @@ static void smi_timeout(unsigned long data) | |||
904 | /* If the state machine asks for a short delay, then shorten | 852 | /* If the state machine asks for a short delay, then shorten |
905 | the timer timeout. */ | 853 | the timer timeout. */ |
906 | if (smi_result == SI_SM_CALL_WITH_DELAY) { | 854 | if (smi_result == SI_SM_CALL_WITH_DELAY) { |
907 | #if defined(CONFIG_HIGH_RES_TIMERS) | ||
908 | unsigned long seq; | ||
909 | #endif | ||
910 | spin_lock_irqsave(&smi_info->count_lock, flags); | 855 | spin_lock_irqsave(&smi_info->count_lock, flags); |
911 | smi_info->short_timeouts++; | 856 | smi_info->short_timeouts++; |
912 | spin_unlock_irqrestore(&smi_info->count_lock, flags); | 857 | spin_unlock_irqrestore(&smi_info->count_lock, flags); |
913 | #if defined(CONFIG_HIGH_RES_TIMERS) | ||
914 | do { | ||
915 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | ||
916 | smi_info->si_timer.expires = jiffies; | ||
917 | smi_info->si_timer.arch_cycle_expires | ||
918 | = get_arch_cycles(smi_info->si_timer.expires); | ||
919 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | ||
920 | add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC); | ||
921 | #else | ||
922 | smi_info->si_timer.expires = jiffies + 1; | 858 | smi_info->si_timer.expires = jiffies + 1; |
923 | #endif | ||
924 | } else { | 859 | } else { |
925 | spin_lock_irqsave(&smi_info->count_lock, flags); | 860 | spin_lock_irqsave(&smi_info->count_lock, flags); |
926 | smi_info->long_timeouts++; | 861 | smi_info->long_timeouts++; |
927 | spin_unlock_irqrestore(&smi_info->count_lock, flags); | 862 | spin_unlock_irqrestore(&smi_info->count_lock, flags); |
928 | smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; | 863 | smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; |
929 | #if defined(CONFIG_HIGH_RES_TIMERS) | ||
930 | smi_info->si_timer.arch_cycle_expires = 0; | ||
931 | #endif | ||
932 | } | 864 | } |
933 | 865 | ||
934 | do_add_timer: | 866 | do_add_timer: |
@@ -1109,7 +1041,7 @@ static int std_irq_setup(struct smi_info *info) | |||
1109 | if (info->si_type == SI_BT) { | 1041 | if (info->si_type == SI_BT) { |
1110 | rv = request_irq(info->irq, | 1042 | rv = request_irq(info->irq, |
1111 | si_bt_irq_handler, | 1043 | si_bt_irq_handler, |
1112 | SA_INTERRUPT, | 1044 | IRQF_DISABLED, |
1113 | DEVICE_NAME, | 1045 | DEVICE_NAME, |
1114 | info); | 1046 | info); |
1115 | if (!rv) | 1047 | if (!rv) |
@@ -1119,7 +1051,7 @@ static int std_irq_setup(struct smi_info *info) | |||
1119 | } else | 1051 | } else |
1120 | rv = request_irq(info->irq, | 1052 | rv = request_irq(info->irq, |
1121 | si_irq_handler, | 1053 | si_irq_handler, |
1122 | SA_INTERRUPT, | 1054 | IRQF_DISABLED, |
1123 | DEVICE_NAME, | 1055 | DEVICE_NAME, |
1124 | info); | 1056 | info); |
1125 | if (rv) { | 1057 | if (rv) { |