aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 20:19:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 20:19:28 -0400
commit532bfc851a7475fb6a36c1e953aa395798a7cca7 (patch)
treea7892e5a31330dd59f31959efbe9fda1803784fd /drivers/char
parent0195c00244dc2e9f522475868fa278c473ba7339 (diff)
parent8da00edc1069f01c34510fa405dc15d96c090a3f (diff)
Merge branch 'akpm' (Andrew's patch-bomb)
Merge third batch of patches from Andrew Morton: - Some MM stragglers - core SMP library cleanups (on_each_cpu_mask) - Some IPI optimisations - kexec - kdump - IPMI - the radix-tree iterator work - various other misc bits. "That'll do for -rc1. I still have ~10 patches for 3.4, will send those along when they've baked a little more." * emailed from Andrew Morton <akpm@linux-foundation.org>: (35 commits) backlight: fix typo in tosa_lcd.c crc32: add help text for the algorithm select option mm: move hugepage test examples to tools/testing/selftests/vm mm: move slabinfo.c to tools/vm mm: move page-types.c from Documentation to tools/vm selftests/Makefile: make `run_tests' depend on `all' selftests: launch individual selftests from the main Makefile radix-tree: use iterators in find_get_pages* functions radix-tree: rewrite gang lookup using iterator radix-tree: introduce bit-optimized iterator fs/proc/namespaces.c: prevent crash when ns_entries[] is empty nbd: rename the nbd_device variable from lo to nbd pidns: add reboot_pid_ns() to handle the reboot syscall sysctl: use bitmap library functions ipmi: use locks on watchdog timeout set on reboot ipmi: simplify locking ipmi: fix message handling during panics ipmi: use a tasklet for handling received messages ipmi: increase KCS timeouts ipmi: decrease the IPMI message transaction time in interrupt mode ...
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c4
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c242
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c72
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c21
4 files changed, 171 insertions, 168 deletions
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index cf82fedae099..e53fc24c6af3 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -118,8 +118,8 @@ enum kcs_states {
118#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH 118#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH
119 119
120/* Timeouts in microseconds. */ 120/* Timeouts in microseconds. */
121#define IBF_RETRY_TIMEOUT 1000000 121#define IBF_RETRY_TIMEOUT 5000000
122#define OBF_RETRY_TIMEOUT 1000000 122#define OBF_RETRY_TIMEOUT 5000000
123#define MAX_ERROR_RETRIES 10 123#define MAX_ERROR_RETRIES 10
124#define ERROR0_OBF_WAIT_JIFFIES (2*HZ) 124#define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
125 125
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c90e9390b78c..2c29942b1326 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -45,6 +45,7 @@
45#include <linux/init.h> 45#include <linux/init.h>
46#include <linux/proc_fs.h> 46#include <linux/proc_fs.h>
47#include <linux/rcupdate.h> 47#include <linux/rcupdate.h>
48#include <linux/interrupt.h>
48 49
49#define PFX "IPMI message handler: " 50#define PFX "IPMI message handler: "
50 51
@@ -52,6 +53,8 @@
52 53
53static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 54static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
54static int ipmi_init_msghandler(void); 55static int ipmi_init_msghandler(void);
56static void smi_recv_tasklet(unsigned long);
57static void handle_new_recv_msgs(ipmi_smi_t intf);
55 58
56static int initialized; 59static int initialized;
57 60
@@ -354,12 +357,15 @@ struct ipmi_smi {
354 int curr_seq; 357 int curr_seq;
355 358
356 /* 359 /*
357 * Messages that were delayed for some reason (out of memory, 360 * Messages queued for delivery. If delivery fails (out of memory
358 * for instance), will go in here to be processed later in a 361 * for instance), They will stay in here to be processed later in a
359 * periodic timer interrupt. 362 * periodic timer interrupt. The tasklet is for handling received
363 * messages directly from the handler.
360 */ 364 */
361 spinlock_t waiting_msgs_lock; 365 spinlock_t waiting_msgs_lock;
362 struct list_head waiting_msgs; 366 struct list_head waiting_msgs;
367 atomic_t watchdog_pretimeouts_to_deliver;
368 struct tasklet_struct recv_tasklet;
363 369
364 /* 370 /*
365 * The list of command receivers that are registered for commands 371 * The list of command receivers that are registered for commands
@@ -492,6 +498,8 @@ static void clean_up_interface_data(ipmi_smi_t intf)
492 struct cmd_rcvr *rcvr, *rcvr2; 498 struct cmd_rcvr *rcvr, *rcvr2;
493 struct list_head list; 499 struct list_head list;
494 500
501 tasklet_kill(&intf->recv_tasklet);
502
495 free_smi_msg_list(&intf->waiting_msgs); 503 free_smi_msg_list(&intf->waiting_msgs);
496 free_recv_msg_list(&intf->waiting_events); 504 free_recv_msg_list(&intf->waiting_events);
497 505
@@ -2785,12 +2793,17 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2785 return; 2793 return;
2786} 2794}
2787 2795
2788void ipmi_poll_interface(ipmi_user_t user) 2796static void ipmi_poll(ipmi_smi_t intf)
2789{ 2797{
2790 ipmi_smi_t intf = user->intf;
2791
2792 if (intf->handlers->poll) 2798 if (intf->handlers->poll)
2793 intf->handlers->poll(intf->send_info); 2799 intf->handlers->poll(intf->send_info);
2800 /* In case something came in */
2801 handle_new_recv_msgs(intf);
2802}
2803
2804void ipmi_poll_interface(ipmi_user_t user)
2805{
2806 ipmi_poll(user->intf);
2794} 2807}
2795EXPORT_SYMBOL(ipmi_poll_interface); 2808EXPORT_SYMBOL(ipmi_poll_interface);
2796 2809
@@ -2859,6 +2872,10 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2859#endif 2872#endif
2860 spin_lock_init(&intf->waiting_msgs_lock); 2873 spin_lock_init(&intf->waiting_msgs_lock);
2861 INIT_LIST_HEAD(&intf->waiting_msgs); 2874 INIT_LIST_HEAD(&intf->waiting_msgs);
2875 tasklet_init(&intf->recv_tasklet,
2876 smi_recv_tasklet,
2877 (unsigned long) intf);
2878 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
2862 spin_lock_init(&intf->events_lock); 2879 spin_lock_init(&intf->events_lock);
2863 INIT_LIST_HEAD(&intf->waiting_events); 2880 INIT_LIST_HEAD(&intf->waiting_events);
2864 intf->waiting_events_count = 0; 2881 intf->waiting_events_count = 0;
@@ -3621,11 +3638,11 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
3621} 3638}
3622 3639
3623/* 3640/*
3624 * Handle a new message. Return 1 if the message should be requeued, 3641 * Handle a received message. Return 1 if the message should be requeued,
3625 * 0 if the message should be freed, or -1 if the message should not 3642 * 0 if the message should be freed, or -1 if the message should not
3626 * be freed or requeued. 3643 * be freed or requeued.
3627 */ 3644 */
3628static int handle_new_recv_msg(ipmi_smi_t intf, 3645static int handle_one_recv_msg(ipmi_smi_t intf,
3629 struct ipmi_smi_msg *msg) 3646 struct ipmi_smi_msg *msg)
3630{ 3647{
3631 int requeue; 3648 int requeue;
@@ -3783,12 +3800,72 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
3783 return requeue; 3800 return requeue;
3784} 3801}
3785 3802
3803/*
3804 * If there are messages in the queue or pretimeouts, handle them.
3805 */
3806static void handle_new_recv_msgs(ipmi_smi_t intf)
3807{
3808 struct ipmi_smi_msg *smi_msg;
3809 unsigned long flags = 0;
3810 int rv;
3811 int run_to_completion = intf->run_to_completion;
3812
3813 /* See if any waiting messages need to be processed. */
3814 if (!run_to_completion)
3815 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3816 while (!list_empty(&intf->waiting_msgs)) {
3817 smi_msg = list_entry(intf->waiting_msgs.next,
3818 struct ipmi_smi_msg, link);
3819 list_del(&smi_msg->link);
3820 if (!run_to_completion)
3821 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3822 rv = handle_one_recv_msg(intf, smi_msg);
3823 if (!run_to_completion)
3824 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3825 if (rv == 0) {
3826 /* Message handled */
3827 ipmi_free_smi_msg(smi_msg);
3828 } else if (rv < 0) {
3829 /* Fatal error on the message, del but don't free. */
3830 } else {
3831 /*
3832 * To preserve message order, quit if we
3833 * can't handle a message.
3834 */
3835 list_add(&smi_msg->link, &intf->waiting_msgs);
3836 break;
3837 }
3838 }
3839 if (!run_to_completion)
3840 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3841
3842 /*
3843 * If the pretimout count is non-zero, decrement one from it and
3844 * deliver pretimeouts to all the users.
3845 */
3846 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
3847 ipmi_user_t user;
3848
3849 rcu_read_lock();
3850 list_for_each_entry_rcu(user, &intf->users, link) {
3851 if (user->handler->ipmi_watchdog_pretimeout)
3852 user->handler->ipmi_watchdog_pretimeout(
3853 user->handler_data);
3854 }
3855 rcu_read_unlock();
3856 }
3857}
3858
3859static void smi_recv_tasklet(unsigned long val)
3860{
3861 handle_new_recv_msgs((ipmi_smi_t) val);
3862}
3863
3786/* Handle a new message from the lower layer. */ 3864/* Handle a new message from the lower layer. */
3787void ipmi_smi_msg_received(ipmi_smi_t intf, 3865void ipmi_smi_msg_received(ipmi_smi_t intf,
3788 struct ipmi_smi_msg *msg) 3866 struct ipmi_smi_msg *msg)
3789{ 3867{
3790 unsigned long flags = 0; /* keep us warning-free. */ 3868 unsigned long flags = 0; /* keep us warning-free. */
3791 int rv;
3792 int run_to_completion; 3869 int run_to_completion;
3793 3870
3794 3871
@@ -3842,31 +3919,11 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
3842 run_to_completion = intf->run_to_completion; 3919 run_to_completion = intf->run_to_completion;
3843 if (!run_to_completion) 3920 if (!run_to_completion)
3844 spin_lock_irqsave(&intf->waiting_msgs_lock, flags); 3921 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3845 if (!list_empty(&intf->waiting_msgs)) { 3922 list_add_tail(&msg->link, &intf->waiting_msgs);
3846 list_add_tail(&msg->link, &intf->waiting_msgs);
3847 if (!run_to_completion)
3848 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3849 goto out;
3850 }
3851 if (!run_to_completion) 3923 if (!run_to_completion)
3852 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); 3924 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3853 3925
3854 rv = handle_new_recv_msg(intf, msg); 3926 tasklet_schedule(&intf->recv_tasklet);
3855 if (rv > 0) {
3856 /*
3857 * Could not handle the message now, just add it to a
3858 * list to handle later.
3859 */
3860 run_to_completion = intf->run_to_completion;
3861 if (!run_to_completion)
3862 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3863 list_add_tail(&msg->link, &intf->waiting_msgs);
3864 if (!run_to_completion)
3865 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3866 } else if (rv == 0) {
3867 ipmi_free_smi_msg(msg);
3868 }
3869
3870 out: 3927 out:
3871 return; 3928 return;
3872} 3929}
@@ -3874,16 +3931,8 @@ EXPORT_SYMBOL(ipmi_smi_msg_received);
3874 3931
3875void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) 3932void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3876{ 3933{
3877 ipmi_user_t user; 3934 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
3878 3935 tasklet_schedule(&intf->recv_tasklet);
3879 rcu_read_lock();
3880 list_for_each_entry_rcu(user, &intf->users, link) {
3881 if (!user->handler->ipmi_watchdog_pretimeout)
3882 continue;
3883
3884 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3885 }
3886 rcu_read_unlock();
3887} 3936}
3888EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 3937EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3889 3938
@@ -3997,28 +4046,12 @@ static void ipmi_timeout_handler(long timeout_period)
3997 ipmi_smi_t intf; 4046 ipmi_smi_t intf;
3998 struct list_head timeouts; 4047 struct list_head timeouts;
3999 struct ipmi_recv_msg *msg, *msg2; 4048 struct ipmi_recv_msg *msg, *msg2;
4000 struct ipmi_smi_msg *smi_msg, *smi_msg2;
4001 unsigned long flags; 4049 unsigned long flags;
4002 int i; 4050 int i;
4003 4051
4004 rcu_read_lock(); 4052 rcu_read_lock();
4005 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 4053 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4006 /* See if any waiting messages need to be processed. */ 4054 tasklet_schedule(&intf->recv_tasklet);
4007 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
4008 list_for_each_entry_safe(smi_msg, smi_msg2,
4009 &intf->waiting_msgs, link) {
4010 if (!handle_new_recv_msg(intf, smi_msg)) {
4011 list_del(&smi_msg->link);
4012 ipmi_free_smi_msg(smi_msg);
4013 } else {
4014 /*
4015 * To preserve message order, quit if we
4016 * can't handle a message.
4017 */
4018 break;
4019 }
4020 }
4021 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
4022 4055
4023 /* 4056 /*
4024 * Go through the seq table and find any messages that 4057 * Go through the seq table and find any messages that
@@ -4172,12 +4205,48 @@ EXPORT_SYMBOL(ipmi_free_recv_msg);
4172 4205
4173#ifdef CONFIG_IPMI_PANIC_EVENT 4206#ifdef CONFIG_IPMI_PANIC_EVENT
4174 4207
4208static atomic_t panic_done_count = ATOMIC_INIT(0);
4209
4175static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 4210static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4176{ 4211{
4212 atomic_dec(&panic_done_count);
4177} 4213}
4178 4214
4179static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 4215static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4180{ 4216{
4217 atomic_dec(&panic_done_count);
4218}
4219
4220/*
4221 * Inside a panic, send a message and wait for a response.
4222 */
4223static void ipmi_panic_request_and_wait(ipmi_smi_t intf,
4224 struct ipmi_addr *addr,
4225 struct kernel_ipmi_msg *msg)
4226{
4227 struct ipmi_smi_msg smi_msg;
4228 struct ipmi_recv_msg recv_msg;
4229 int rv;
4230
4231 smi_msg.done = dummy_smi_done_handler;
4232 recv_msg.done = dummy_recv_done_handler;
4233 atomic_add(2, &panic_done_count);
4234 rv = i_ipmi_request(NULL,
4235 intf,
4236 addr,
4237 0,
4238 msg,
4239 intf,
4240 &smi_msg,
4241 &recv_msg,
4242 0,
4243 intf->channels[0].address,
4244 intf->channels[0].lun,
4245 0, 1); /* Don't retry, and don't wait. */
4246 if (rv)
4247 atomic_sub(2, &panic_done_count);
4248 while (atomic_read(&panic_done_count) != 0)
4249 ipmi_poll(intf);
4181} 4250}
4182 4251
4183#ifdef CONFIG_IPMI_PANIC_STRING 4252#ifdef CONFIG_IPMI_PANIC_STRING
@@ -4216,8 +4285,6 @@ static void send_panic_events(char *str)
4216 unsigned char data[16]; 4285 unsigned char data[16];
4217 struct ipmi_system_interface_addr *si; 4286 struct ipmi_system_interface_addr *si;
4218 struct ipmi_addr addr; 4287 struct ipmi_addr addr;
4219 struct ipmi_smi_msg smi_msg;
4220 struct ipmi_recv_msg recv_msg;
4221 4288
4222 si = (struct ipmi_system_interface_addr *) &addr; 4289 si = (struct ipmi_system_interface_addr *) &addr;
4223 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4290 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
@@ -4245,9 +4312,6 @@ static void send_panic_events(char *str)
4245 data[7] = str[2]; 4312 data[7] = str[2];
4246 } 4313 }
4247 4314
4248 smi_msg.done = dummy_smi_done_handler;
4249 recv_msg.done = dummy_recv_done_handler;
4250
4251 /* For every registered interface, send the event. */ 4315 /* For every registered interface, send the event. */
4252 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 4316 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4253 if (!intf->handlers) 4317 if (!intf->handlers)
@@ -4257,18 +4321,7 @@ static void send_panic_events(char *str)
4257 intf->run_to_completion = 1; 4321 intf->run_to_completion = 1;
4258 /* Send the event announcing the panic. */ 4322 /* Send the event announcing the panic. */
4259 intf->handlers->set_run_to_completion(intf->send_info, 1); 4323 intf->handlers->set_run_to_completion(intf->send_info, 1);
4260 i_ipmi_request(NULL, 4324 ipmi_panic_request_and_wait(intf, &addr, &msg);
4261 intf,
4262 &addr,
4263 0,
4264 &msg,
4265 intf,
4266 &smi_msg,
4267 &recv_msg,
4268 0,
4269 intf->channels[0].address,
4270 intf->channels[0].lun,
4271 0, 1); /* Don't retry, and don't wait. */
4272 } 4325 }
4273 4326
4274#ifdef CONFIG_IPMI_PANIC_STRING 4327#ifdef CONFIG_IPMI_PANIC_STRING
@@ -4316,18 +4369,7 @@ static void send_panic_events(char *str)
4316 msg.data = NULL; 4369 msg.data = NULL;
4317 msg.data_len = 0; 4370 msg.data_len = 0;
4318 intf->null_user_handler = device_id_fetcher; 4371 intf->null_user_handler = device_id_fetcher;
4319 i_ipmi_request(NULL, 4372 ipmi_panic_request_and_wait(intf, &addr, &msg);
4320 intf,
4321 &addr,
4322 0,
4323 &msg,
4324 intf,
4325 &smi_msg,
4326 &recv_msg,
4327 0,
4328 intf->channels[0].address,
4329 intf->channels[0].lun,
4330 0, 1); /* Don't retry, and don't wait. */
4331 4373
4332 if (intf->local_event_generator) { 4374 if (intf->local_event_generator) {
4333 /* Request the event receiver from the local MC. */ 4375 /* Request the event receiver from the local MC. */
@@ -4336,18 +4378,7 @@ static void send_panic_events(char *str)
4336 msg.data = NULL; 4378 msg.data = NULL;
4337 msg.data_len = 0; 4379 msg.data_len = 0;
4338 intf->null_user_handler = event_receiver_fetcher; 4380 intf->null_user_handler = event_receiver_fetcher;
4339 i_ipmi_request(NULL, 4381 ipmi_panic_request_and_wait(intf, &addr, &msg);
4340 intf,
4341 &addr,
4342 0,
4343 &msg,
4344 intf,
4345 &smi_msg,
4346 &recv_msg,
4347 0,
4348 intf->channels[0].address,
4349 intf->channels[0].lun,
4350 0, 1); /* no retry, and no wait. */
4351 } 4382 }
4352 intf->null_user_handler = NULL; 4383 intf->null_user_handler = NULL;
4353 4384
@@ -4404,18 +4435,7 @@ static void send_panic_events(char *str)
4404 strncpy(data+5, p, 11); 4435 strncpy(data+5, p, 11);
4405 p += size; 4436 p += size;
4406 4437
4407 i_ipmi_request(NULL, 4438 ipmi_panic_request_and_wait(intf, &addr, &msg);
4408 intf,
4409 &addr,
4410 0,
4411 &msg,
4412 intf,
4413 &smi_msg,
4414 &recv_msg,
4415 0,
4416 intf->channels[0].address,
4417 intf->channels[0].lun,
4418 0, 1); /* no retry, and no wait. */
4419 } 4439 }
4420 } 4440 }
4421#endif /* CONFIG_IPMI_PANIC_STRING */ 4441#endif /* CONFIG_IPMI_PANIC_STRING */
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index f9fdc114b31d..1e638fff40ea 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -170,7 +170,6 @@ struct smi_info {
170 struct si_sm_handlers *handlers; 170 struct si_sm_handlers *handlers;
171 enum si_type si_type; 171 enum si_type si_type;
172 spinlock_t si_lock; 172 spinlock_t si_lock;
173 spinlock_t msg_lock;
174 struct list_head xmit_msgs; 173 struct list_head xmit_msgs;
175 struct list_head hp_xmit_msgs; 174 struct list_head hp_xmit_msgs;
176 struct ipmi_smi_msg *curr_msg; 175 struct ipmi_smi_msg *curr_msg;
@@ -319,16 +318,8 @@ static int register_xaction_notifier(struct notifier_block *nb)
319static void deliver_recv_msg(struct smi_info *smi_info, 318static void deliver_recv_msg(struct smi_info *smi_info,
320 struct ipmi_smi_msg *msg) 319 struct ipmi_smi_msg *msg)
321{ 320{
322 /* Deliver the message to the upper layer with the lock 321 /* Deliver the message to the upper layer. */
323 released. */ 322 ipmi_smi_msg_received(smi_info->intf, msg);
324
325 if (smi_info->run_to_completion) {
326 ipmi_smi_msg_received(smi_info->intf, msg);
327 } else {
328 spin_unlock(&(smi_info->si_lock));
329 ipmi_smi_msg_received(smi_info->intf, msg);
330 spin_lock(&(smi_info->si_lock));
331 }
332} 323}
333 324
334static void return_hosed_msg(struct smi_info *smi_info, int cCode) 325static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -357,13 +348,6 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
357 struct timeval t; 348 struct timeval t;
358#endif 349#endif
359 350
360 /*
361 * No need to save flags, we aleady have interrupts off and we
362 * already hold the SMI lock.
363 */
364 if (!smi_info->run_to_completion)
365 spin_lock(&(smi_info->msg_lock));
366
367 /* Pick the high priority queue first. */ 351 /* Pick the high priority queue first. */
368 if (!list_empty(&(smi_info->hp_xmit_msgs))) { 352 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
369 entry = smi_info->hp_xmit_msgs.next; 353 entry = smi_info->hp_xmit_msgs.next;
@@ -401,9 +385,6 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
401 rv = SI_SM_CALL_WITHOUT_DELAY; 385 rv = SI_SM_CALL_WITHOUT_DELAY;
402 } 386 }
403 out: 387 out:
404 if (!smi_info->run_to_completion)
405 spin_unlock(&(smi_info->msg_lock));
406
407 return rv; 388 return rv;
408} 389}
409 390
@@ -480,9 +461,7 @@ static void handle_flags(struct smi_info *smi_info)
480 461
481 start_clear_flags(smi_info); 462 start_clear_flags(smi_info);
482 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 463 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
483 spin_unlock(&(smi_info->si_lock));
484 ipmi_smi_watchdog_pretimeout(smi_info->intf); 464 ipmi_smi_watchdog_pretimeout(smi_info->intf);
485 spin_lock(&(smi_info->si_lock));
486 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { 465 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
487 /* Messages available. */ 466 /* Messages available. */
488 smi_info->curr_msg = ipmi_alloc_smi_msg(); 467 smi_info->curr_msg = ipmi_alloc_smi_msg();
@@ -888,19 +867,6 @@ static void sender(void *send_info,
888 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); 867 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
889#endif 868#endif
890 869
891 /*
892 * last_timeout_jiffies is updated here to avoid
893 * smi_timeout() handler passing very large time_diff
894 * value to smi_event_handler() that causes
895 * the send command to abort.
896 */
897 smi_info->last_timeout_jiffies = jiffies;
898
899 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
900
901 if (smi_info->thread)
902 wake_up_process(smi_info->thread);
903
904 if (smi_info->run_to_completion) { 870 if (smi_info->run_to_completion) {
905 /* 871 /*
906 * If we are running to completion, then throw it in 872 * If we are running to completion, then throw it in
@@ -923,16 +889,29 @@ static void sender(void *send_info,
923 return; 889 return;
924 } 890 }
925 891
926 spin_lock_irqsave(&smi_info->msg_lock, flags); 892 spin_lock_irqsave(&smi_info->si_lock, flags);
927 if (priority > 0) 893 if (priority > 0)
928 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs); 894 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
929 else 895 else
930 list_add_tail(&msg->link, &smi_info->xmit_msgs); 896 list_add_tail(&msg->link, &smi_info->xmit_msgs);
931 spin_unlock_irqrestore(&smi_info->msg_lock, flags);
932 897
933 spin_lock_irqsave(&smi_info->si_lock, flags); 898 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
934 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) 899 /*
900 * last_timeout_jiffies is updated here to avoid
901 * smi_timeout() handler passing very large time_diff
902 * value to smi_event_handler() that causes
903 * the send command to abort.
904 */
905 smi_info->last_timeout_jiffies = jiffies;
906
907 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
908
909 if (smi_info->thread)
910 wake_up_process(smi_info->thread);
911
935 start_next_msg(smi_info); 912 start_next_msg(smi_info);
913 smi_event_handler(smi_info, 0);
914 }
936 spin_unlock_irqrestore(&smi_info->si_lock, flags); 915 spin_unlock_irqrestore(&smi_info->si_lock, flags);
937} 916}
938 917
@@ -1033,16 +1012,19 @@ static int ipmi_thread(void *data)
1033static void poll(void *send_info) 1012static void poll(void *send_info)
1034{ 1013{
1035 struct smi_info *smi_info = send_info; 1014 struct smi_info *smi_info = send_info;
1036 unsigned long flags; 1015 unsigned long flags = 0;
1016 int run_to_completion = smi_info->run_to_completion;
1037 1017
1038 /* 1018 /*
1039 * Make sure there is some delay in the poll loop so we can 1019 * Make sure there is some delay in the poll loop so we can
1040 * drive time forward and timeout things. 1020 * drive time forward and timeout things.
1041 */ 1021 */
1042 udelay(10); 1022 udelay(10);
1043 spin_lock_irqsave(&smi_info->si_lock, flags); 1023 if (!run_to_completion)
1024 spin_lock_irqsave(&smi_info->si_lock, flags);
1044 smi_event_handler(smi_info, 10); 1025 smi_event_handler(smi_info, 10);
1045 spin_unlock_irqrestore(&smi_info->si_lock, flags); 1026 if (!run_to_completion)
1027 spin_unlock_irqrestore(&smi_info->si_lock, flags);
1046} 1028}
1047 1029
1048static void request_events(void *send_info) 1030static void request_events(void *send_info)
@@ -1679,10 +1661,8 @@ static struct smi_info *smi_info_alloc(void)
1679{ 1661{
1680 struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL); 1662 struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
1681 1663
1682 if (info) { 1664 if (info)
1683 spin_lock_init(&info->si_lock); 1665 spin_lock_init(&info->si_lock);
1684 spin_lock_init(&info->msg_lock);
1685 }
1686 return info; 1666 return info;
1687} 1667}
1688 1668
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 020a6aec2d86..7ed356e52035 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -520,6 +520,7 @@ static void panic_halt_ipmi_heartbeat(void)
520 msg.cmd = IPMI_WDOG_RESET_TIMER; 520 msg.cmd = IPMI_WDOG_RESET_TIMER;
521 msg.data = NULL; 521 msg.data = NULL;
522 msg.data_len = 0; 522 msg.data_len = 0;
523 atomic_add(2, &panic_done_count);
523 rv = ipmi_request_supply_msgs(watchdog_user, 524 rv = ipmi_request_supply_msgs(watchdog_user,
524 (struct ipmi_addr *) &addr, 525 (struct ipmi_addr *) &addr,
525 0, 526 0,
@@ -528,8 +529,8 @@ static void panic_halt_ipmi_heartbeat(void)
528 &panic_halt_heartbeat_smi_msg, 529 &panic_halt_heartbeat_smi_msg,
529 &panic_halt_heartbeat_recv_msg, 530 &panic_halt_heartbeat_recv_msg,
530 1); 531 1);
531 if (!rv) 532 if (rv)
532 atomic_add(2, &panic_done_count); 533 atomic_sub(2, &panic_done_count);
533} 534}
534 535
535static struct ipmi_smi_msg panic_halt_smi_msg = { 536static struct ipmi_smi_msg panic_halt_smi_msg = {
@@ -553,16 +554,18 @@ static void panic_halt_ipmi_set_timeout(void)
553 /* Wait for the messages to be free. */ 554 /* Wait for the messages to be free. */
554 while (atomic_read(&panic_done_count) != 0) 555 while (atomic_read(&panic_done_count) != 0)
555 ipmi_poll_interface(watchdog_user); 556 ipmi_poll_interface(watchdog_user);
557 atomic_add(2, &panic_done_count);
556 rv = i_ipmi_set_timeout(&panic_halt_smi_msg, 558 rv = i_ipmi_set_timeout(&panic_halt_smi_msg,
557 &panic_halt_recv_msg, 559 &panic_halt_recv_msg,
558 &send_heartbeat_now); 560 &send_heartbeat_now);
559 if (!rv) { 561 if (rv) {
560 atomic_add(2, &panic_done_count); 562 atomic_sub(2, &panic_done_count);
561 if (send_heartbeat_now)
562 panic_halt_ipmi_heartbeat();
563 } else
564 printk(KERN_WARNING PFX 563 printk(KERN_WARNING PFX
565 "Unable to extend the watchdog timeout."); 564 "Unable to extend the watchdog timeout.");
565 } else {
566 if (send_heartbeat_now)
567 panic_halt_ipmi_heartbeat();
568 }
566 while (atomic_read(&panic_done_count) != 0) 569 while (atomic_read(&panic_done_count) != 0)
567 ipmi_poll_interface(watchdog_user); 570 ipmi_poll_interface(watchdog_user);
568} 571}
@@ -1164,7 +1167,7 @@ static int wdog_reboot_handler(struct notifier_block *this,
1164 if (code == SYS_POWER_OFF || code == SYS_HALT) { 1167 if (code == SYS_POWER_OFF || code == SYS_HALT) {
1165 /* Disable the WDT if we are shutting down. */ 1168 /* Disable the WDT if we are shutting down. */
1166 ipmi_watchdog_state = WDOG_TIMEOUT_NONE; 1169 ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
1167 panic_halt_ipmi_set_timeout(); 1170 ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
1168 } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { 1171 } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
1169 /* Set a long timer to let the reboot happens, but 1172 /* Set a long timer to let the reboot happens, but
1170 reboot if it hangs, but only if the watchdog 1173 reboot if it hangs, but only if the watchdog
@@ -1172,7 +1175,7 @@ static int wdog_reboot_handler(struct notifier_block *this,
1172 timeout = 120; 1175 timeout = 120;
1173 pretimeout = 0; 1176 pretimeout = 0;
1174 ipmi_watchdog_state = WDOG_TIMEOUT_RESET; 1177 ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
1175 panic_halt_ipmi_set_timeout(); 1178 ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
1176 } 1179 }
1177 } 1180 }
1178 return NOTIFY_OK; 1181 return NOTIFY_OK;