aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/ipmi/ipmi_si_intf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/ipmi/ipmi_si_intf.c')
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c72
1 files changed, 26 insertions, 46 deletions
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index f9fdc114b31d..1e638fff40ea 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -170,7 +170,6 @@ struct smi_info {
170 struct si_sm_handlers *handlers; 170 struct si_sm_handlers *handlers;
171 enum si_type si_type; 171 enum si_type si_type;
172 spinlock_t si_lock; 172 spinlock_t si_lock;
173 spinlock_t msg_lock;
174 struct list_head xmit_msgs; 173 struct list_head xmit_msgs;
175 struct list_head hp_xmit_msgs; 174 struct list_head hp_xmit_msgs;
176 struct ipmi_smi_msg *curr_msg; 175 struct ipmi_smi_msg *curr_msg;
@@ -319,16 +318,8 @@ static int register_xaction_notifier(struct notifier_block *nb)
319static void deliver_recv_msg(struct smi_info *smi_info, 318static void deliver_recv_msg(struct smi_info *smi_info,
320 struct ipmi_smi_msg *msg) 319 struct ipmi_smi_msg *msg)
321{ 320{
322 /* Deliver the message to the upper layer with the lock 321 /* Deliver the message to the upper layer. */
323 released. */ 322 ipmi_smi_msg_received(smi_info->intf, msg);
324
325 if (smi_info->run_to_completion) {
326 ipmi_smi_msg_received(smi_info->intf, msg);
327 } else {
328 spin_unlock(&(smi_info->si_lock));
329 ipmi_smi_msg_received(smi_info->intf, msg);
330 spin_lock(&(smi_info->si_lock));
331 }
332} 323}
333 324
334static void return_hosed_msg(struct smi_info *smi_info, int cCode) 325static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -357,13 +348,6 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
357 struct timeval t; 348 struct timeval t;
358#endif 349#endif
359 350
360 /*
361 * No need to save flags, we aleady have interrupts off and we
362 * already hold the SMI lock.
363 */
364 if (!smi_info->run_to_completion)
365 spin_lock(&(smi_info->msg_lock));
366
367 /* Pick the high priority queue first. */ 351 /* Pick the high priority queue first. */
368 if (!list_empty(&(smi_info->hp_xmit_msgs))) { 352 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
369 entry = smi_info->hp_xmit_msgs.next; 353 entry = smi_info->hp_xmit_msgs.next;
@@ -401,9 +385,6 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
401 rv = SI_SM_CALL_WITHOUT_DELAY; 385 rv = SI_SM_CALL_WITHOUT_DELAY;
402 } 386 }
403 out: 387 out:
404 if (!smi_info->run_to_completion)
405 spin_unlock(&(smi_info->msg_lock));
406
407 return rv; 388 return rv;
408} 389}
409 390
@@ -480,9 +461,7 @@ static void handle_flags(struct smi_info *smi_info)
480 461
481 start_clear_flags(smi_info); 462 start_clear_flags(smi_info);
482 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 463 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
483 spin_unlock(&(smi_info->si_lock));
484 ipmi_smi_watchdog_pretimeout(smi_info->intf); 464 ipmi_smi_watchdog_pretimeout(smi_info->intf);
485 spin_lock(&(smi_info->si_lock));
486 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { 465 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
487 /* Messages available. */ 466 /* Messages available. */
488 smi_info->curr_msg = ipmi_alloc_smi_msg(); 467 smi_info->curr_msg = ipmi_alloc_smi_msg();
@@ -888,19 +867,6 @@ static void sender(void *send_info,
888 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); 867 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
889#endif 868#endif
890 869
891 /*
892 * last_timeout_jiffies is updated here to avoid
893 * smi_timeout() handler passing very large time_diff
894 * value to smi_event_handler() that causes
895 * the send command to abort.
896 */
897 smi_info->last_timeout_jiffies = jiffies;
898
899 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
900
901 if (smi_info->thread)
902 wake_up_process(smi_info->thread);
903
904 if (smi_info->run_to_completion) { 870 if (smi_info->run_to_completion) {
905 /* 871 /*
906 * If we are running to completion, then throw it in 872 * If we are running to completion, then throw it in
@@ -923,16 +889,29 @@ static void sender(void *send_info,
923 return; 889 return;
924 } 890 }
925 891
926 spin_lock_irqsave(&smi_info->msg_lock, flags); 892 spin_lock_irqsave(&smi_info->si_lock, flags);
927 if (priority > 0) 893 if (priority > 0)
928 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs); 894 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
929 else 895 else
930 list_add_tail(&msg->link, &smi_info->xmit_msgs); 896 list_add_tail(&msg->link, &smi_info->xmit_msgs);
931 spin_unlock_irqrestore(&smi_info->msg_lock, flags);
932 897
933 spin_lock_irqsave(&smi_info->si_lock, flags); 898 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
934 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) 899 /*
900 * last_timeout_jiffies is updated here to avoid
901 * smi_timeout() handler passing very large time_diff
902 * value to smi_event_handler() that causes
903 * the send command to abort.
904 */
905 smi_info->last_timeout_jiffies = jiffies;
906
907 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
908
909 if (smi_info->thread)
910 wake_up_process(smi_info->thread);
911
935 start_next_msg(smi_info); 912 start_next_msg(smi_info);
913 smi_event_handler(smi_info, 0);
914 }
936 spin_unlock_irqrestore(&smi_info->si_lock, flags); 915 spin_unlock_irqrestore(&smi_info->si_lock, flags);
937} 916}
938 917
@@ -1033,16 +1012,19 @@ static int ipmi_thread(void *data)
1033static void poll(void *send_info) 1012static void poll(void *send_info)
1034{ 1013{
1035 struct smi_info *smi_info = send_info; 1014 struct smi_info *smi_info = send_info;
1036 unsigned long flags; 1015 unsigned long flags = 0;
1016 int run_to_completion = smi_info->run_to_completion;
1037 1017
1038 /* 1018 /*
1039 * Make sure there is some delay in the poll loop so we can 1019 * Make sure there is some delay in the poll loop so we can
1040 * drive time forward and timeout things. 1020 * drive time forward and timeout things.
1041 */ 1021 */
1042 udelay(10); 1022 udelay(10);
1043 spin_lock_irqsave(&smi_info->si_lock, flags); 1023 if (!run_to_completion)
1024 spin_lock_irqsave(&smi_info->si_lock, flags);
1044 smi_event_handler(smi_info, 10); 1025 smi_event_handler(smi_info, 10);
1045 spin_unlock_irqrestore(&smi_info->si_lock, flags); 1026 if (!run_to_completion)
1027 spin_unlock_irqrestore(&smi_info->si_lock, flags);
1046} 1028}
1047 1029
1048static void request_events(void *send_info) 1030static void request_events(void *send_info)
@@ -1679,10 +1661,8 @@ static struct smi_info *smi_info_alloc(void)
1679{ 1661{
1680 struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL); 1662 struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
1681 1663
1682 if (info) { 1664 if (info)
1683 spin_lock_init(&info->si_lock); 1665 spin_lock_init(&info->si_lock);
1684 spin_lock_init(&info->msg_lock);
1685 }
1686 return info; 1666 return info;
1687} 1667}
1688 1668