aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCorey Minyard <cminyard@mvista.com>2008-04-29 04:01:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 11:06:14 -0400
commitbda4c30aa6f7dc1483f39ea1dfe37bcab8a96207 (patch)
tree760c538139c5b41ef54a27d62e5a8a1b01cf1c60
parent4ea18425436e7c72716b7f8d314775f399821195 (diff)
ipmi: run to completion fixes
The "run_to_completion" mode was somewhat broken. Locks need to be avoided in run_to_completion mode, and it shouldn't be used by normal users, just internally for panic situations. This patch removes locks in run_to_completion mode and removes the user call for setting the mode. The only user was the poweroff code, but it was easily converted to use the polling interface. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Corey Minyard <cminyard@mvista.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c8
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c20
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c43
-rw-r--r--include/linux/ipmi.h11
4 files changed, 37 insertions, 45 deletions
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 32b2b22996dc..9f0075ca34ba 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -1197,13 +1197,6 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
1197 return rv; 1197 return rv;
1198} 1198}
1199 1199
1200void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
1201{
1202 ipmi_smi_t intf = user->intf;
1203 if (intf->handlers)
1204 intf->handlers->set_run_to_completion(intf->send_info, val);
1205}
1206
1207static unsigned char 1200static unsigned char
1208ipmb_checksum(unsigned char *data, int size) 1201ipmb_checksum(unsigned char *data, int size)
1209{ 1202{
@@ -4190,5 +4183,4 @@ EXPORT_SYMBOL(ipmi_get_my_address);
4190EXPORT_SYMBOL(ipmi_set_my_LUN); 4183EXPORT_SYMBOL(ipmi_set_my_LUN);
4191EXPORT_SYMBOL(ipmi_get_my_LUN); 4184EXPORT_SYMBOL(ipmi_get_my_LUN);
4192EXPORT_SYMBOL(ipmi_smi_add_proc_entry); 4185EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
4193EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
4194EXPORT_SYMBOL(ipmi_free_recv_msg); 4186EXPORT_SYMBOL(ipmi_free_recv_msg);
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index b86186de7f07..b065a53d1ca8 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -99,11 +99,14 @@ static unsigned char ipmi_version;
99 allocate them, since we may be in a panic situation. The whole 99 allocate them, since we may be in a panic situation. The whole
100 thing is single-threaded, anyway, so multiple messages are not 100 thing is single-threaded, anyway, so multiple messages are not
101 required. */ 101 required. */
102static atomic_t dummy_count = ATOMIC_INIT(0);
102static void dummy_smi_free(struct ipmi_smi_msg *msg) 103static void dummy_smi_free(struct ipmi_smi_msg *msg)
103{ 104{
105 atomic_dec(&dummy_count);
104} 106}
105static void dummy_recv_free(struct ipmi_recv_msg *msg) 107static void dummy_recv_free(struct ipmi_recv_msg *msg)
106{ 108{
109 atomic_dec(&dummy_count);
107} 110}
108static struct ipmi_smi_msg halt_smi_msg = 111static struct ipmi_smi_msg halt_smi_msg =
109{ 112{
@@ -152,17 +155,28 @@ static int ipmi_request_wait_for_response(ipmi_user_t user,
152 return halt_recv_msg.msg.data[0]; 155 return halt_recv_msg.msg.data[0];
153} 156}
154 157
155/* We are in run-to-completion mode, no completion is desired. */ 158/* Wait for message to complete, spinning. */
156static int ipmi_request_in_rc_mode(ipmi_user_t user, 159static int ipmi_request_in_rc_mode(ipmi_user_t user,
157 struct ipmi_addr *addr, 160 struct ipmi_addr *addr,
158 struct kernel_ipmi_msg *send_msg) 161 struct kernel_ipmi_msg *send_msg)
159{ 162{
160 int rv; 163 int rv;
161 164
165 atomic_set(&dummy_count, 2);
162 rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL, 166 rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL,
163 &halt_smi_msg, &halt_recv_msg, 0); 167 &halt_smi_msg, &halt_recv_msg, 0);
164 if (rv) 168 if (rv) {
169 atomic_set(&dummy_count, 0);
165 return rv; 170 return rv;
171 }
172
173 /*
174 * Spin until our message is done.
175 */
176 while (atomic_read(&dummy_count) > 0) {
177 ipmi_poll_interface(user);
178 cpu_relax();
179 }
166 180
167 return halt_recv_msg.msg.data[0]; 181 return halt_recv_msg.msg.data[0];
168} 182}
@@ -531,9 +545,7 @@ static void ipmi_poweroff_function (void)
531 return; 545 return;
532 546
533 /* Use run-to-completion mode, since interrupts may be off. */ 547 /* Use run-to-completion mode, since interrupts may be off. */
534 ipmi_user_set_run_to_completion(ipmi_user, 1);
535 specific_poweroff_func(ipmi_user); 548 specific_poweroff_func(ipmi_user);
536 ipmi_user_set_run_to_completion(ipmi_user, 0);
537} 549}
538 550
539/* Wait for an IPMI interface to be installed, the first one installed 551/* Wait for an IPMI interface to be installed, the first one installed
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 1a8c1ca90557..30f535657342 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -806,56 +806,53 @@ static void sender(void *send_info,
806 return; 806 return;
807 } 807 }
808 808
809 spin_lock_irqsave(&(smi_info->msg_lock), flags);
810#ifdef DEBUG_TIMING 809#ifdef DEBUG_TIMING
811 do_gettimeofday(&t); 810 do_gettimeofday(&t);
812 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); 811 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
813#endif 812#endif
814 813
815 if (smi_info->run_to_completion) { 814 if (smi_info->run_to_completion) {
816 /* If we are running to completion, then throw it in 815 /*
817 the list and run transactions until everything is 816 * If we are running to completion, then throw it in
818 clear. Priority doesn't matter here. */ 817 * the list and run transactions until everything is
818 * clear. Priority doesn't matter here.
819 */
820
821 /*
822 * Run to completion means we are single-threaded, no
823 * need for locks.
824 */
819 list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); 825 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
820 826
821 /* We have to release the msg lock and claim the smi
822 lock in this case, because of race conditions. */
823 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
824
825 spin_lock_irqsave(&(smi_info->si_lock), flags);
826 result = smi_event_handler(smi_info, 0); 827 result = smi_event_handler(smi_info, 0);
827 while (result != SI_SM_IDLE) { 828 while (result != SI_SM_IDLE) {
828 udelay(SI_SHORT_TIMEOUT_USEC); 829 udelay(SI_SHORT_TIMEOUT_USEC);
829 result = smi_event_handler(smi_info, 830 result = smi_event_handler(smi_info,
830 SI_SHORT_TIMEOUT_USEC); 831 SI_SHORT_TIMEOUT_USEC);
831 } 832 }
832 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
833 return; 833 return;
834 } else {
835 if (priority > 0) {
836 list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
837 } else {
838 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
839 }
840 } 834 }
841 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
842 835
843 spin_lock_irqsave(&(smi_info->si_lock), flags); 836 spin_lock_irqsave(&smi_info->msg_lock, flags);
837 if (priority > 0)
838 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
839 else
840 list_add_tail(&msg->link, &smi_info->xmit_msgs);
841 spin_unlock_irqrestore(&smi_info->msg_lock, flags);
842
843 spin_lock_irqsave(&smi_info->si_lock, flags);
844 if ((smi_info->si_state == SI_NORMAL) 844 if ((smi_info->si_state == SI_NORMAL)
845 && (smi_info->curr_msg == NULL)) 845 && (smi_info->curr_msg == NULL))
846 { 846 {
847 start_next_msg(smi_info); 847 start_next_msg(smi_info);
848 } 848 }
849 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 849 spin_unlock_irqrestore(&smi_info->si_lock, flags);
850} 850}
851 851
852static void set_run_to_completion(void *send_info, int i_run_to_completion) 852static void set_run_to_completion(void *send_info, int i_run_to_completion)
853{ 853{
854 struct smi_info *smi_info = send_info; 854 struct smi_info *smi_info = send_info;
855 enum si_sm_result result; 855 enum si_sm_result result;
856 unsigned long flags;
857
858 spin_lock_irqsave(&(smi_info->si_lock), flags);
859 856
860 smi_info->run_to_completion = i_run_to_completion; 857 smi_info->run_to_completion = i_run_to_completion;
861 if (i_run_to_completion) { 858 if (i_run_to_completion) {
@@ -866,8 +863,6 @@ static void set_run_to_completion(void *send_info, int i_run_to_completion)
866 SI_SHORT_TIMEOUT_USEC); 863 SI_SHORT_TIMEOUT_USEC);
867 } 864 }
868 } 865 }
869
870 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
871} 866}
872 867
873static int ipmi_thread(void *data) 868static int ipmi_thread(void *data)
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index c5bd28b69aec..1144b32f5310 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -368,9 +368,8 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
368 * Poll the IPMI interface for the user. This causes the IPMI code to 368 * Poll the IPMI interface for the user. This causes the IPMI code to
369 * do an immediate check for information from the driver and handle 369 * do an immediate check for information from the driver and handle
370 * anything that is immediately pending. This will not block in any 370 * anything that is immediately pending. This will not block in any
371 * way. This is useful if you need to implement polling from the user 371 * way. This is useful if you need to spin waiting for something to
372 * for things like modifying the watchdog timeout when a panic occurs 372 * happen in the IPMI driver.
373 * or disabling the watchdog timer on a reboot.
374 */ 373 */
375void ipmi_poll_interface(ipmi_user_t user); 374void ipmi_poll_interface(ipmi_user_t user);
376 375
@@ -422,12 +421,6 @@ int ipmi_get_maintenance_mode(ipmi_user_t user);
422int ipmi_set_maintenance_mode(ipmi_user_t user, int mode); 421int ipmi_set_maintenance_mode(ipmi_user_t user, int mode);
423 422
424/* 423/*
425 * Allow run-to-completion mode to be set for the interface of
426 * a specific user.
427 */
428void ipmi_user_set_run_to_completion(ipmi_user_t user, int val);
429
430/*
431 * When the user is created, it will not receive IPMI events by 424 * When the user is created, it will not receive IPMI events by
432 * default. The user must set this to TRUE to get incoming events. 425 * default. The user must set this to TRUE to get incoming events.
433 * The first user that sets this to TRUE will receive all events that 426 * The first user that sets this to TRUE will receive all events that