aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/ipmi
diff options
context:
space:
mode:
authorCorey Minyard <cminyard@mvista.com>2014-11-06 18:01:59 -0500
committerCorey Minyard <cminyard@mvista.com>2014-12-11 16:04:10 -0500
commitb874b985c816c74a9bda04082f18db88dcbc808a (patch)
tree001d8b47ac8e7fbd39573db365d254ece6aa4cad /drivers/char/ipmi
parent7ea0ed2b5be81781ba976bc03414ef5da76270b9 (diff)
ipmi: Remove the now unnecessary message queue
A message queue was added to the message handler, so the SMI interfaces only need to handle one message at a time. Pull out the message queue. This also leads to some significant simplification in the shutdown of an interface, since the message handler now does a lot of the cleanup. Signed-off-by: Corey Minyard <cminyard@mvista.com>
Diffstat (limited to 'drivers/char/ipmi')
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c112
1 files changed, 34 insertions, 78 deletions
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index e36487db0e94..d31a7fce2260 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -170,8 +170,7 @@ struct smi_info {
170 struct si_sm_handlers *handlers; 170 struct si_sm_handlers *handlers;
171 enum si_type si_type; 171 enum si_type si_type;
172 spinlock_t si_lock; 172 spinlock_t si_lock;
173 struct list_head xmit_msgs; 173 struct ipmi_smi_msg *waiting_msg;
174 struct list_head hp_xmit_msgs;
175 struct ipmi_smi_msg *curr_msg; 174 struct ipmi_smi_msg *curr_msg;
176 enum si_intf_state si_state; 175 enum si_intf_state si_state;
177 176
@@ -250,9 +249,6 @@ struct smi_info {
250 /* The time (in jiffies) the last timeout occurred at. */ 249 /* The time (in jiffies) the last timeout occurred at. */
251 unsigned long last_timeout_jiffies; 250 unsigned long last_timeout_jiffies;
252 251
253 /* Used to gracefully stop the timer without race conditions. */
254 atomic_t stop_operation;
255
256 /* Are we waiting for the events, pretimeouts, received msgs? */ 252 /* Are we waiting for the events, pretimeouts, received msgs? */
257 atomic_t need_watch; 253 atomic_t need_watch;
258 254
@@ -355,28 +351,18 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode)
355static enum si_sm_result start_next_msg(struct smi_info *smi_info) 351static enum si_sm_result start_next_msg(struct smi_info *smi_info)
356{ 352{
357 int rv; 353 int rv;
358 struct list_head *entry = NULL;
359#ifdef DEBUG_TIMING 354#ifdef DEBUG_TIMING
360 struct timeval t; 355 struct timeval t;
361#endif 356#endif
362 357
363 /* Pick the high priority queue first. */ 358 if (!smi_info->waiting_msg) {
364 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
365 entry = smi_info->hp_xmit_msgs.next;
366 } else if (!list_empty(&(smi_info->xmit_msgs))) {
367 entry = smi_info->xmit_msgs.next;
368 }
369
370 if (!entry) {
371 smi_info->curr_msg = NULL; 359 smi_info->curr_msg = NULL;
372 rv = SI_SM_IDLE; 360 rv = SI_SM_IDLE;
373 } else { 361 } else {
374 int err; 362 int err;
375 363
376 list_del(entry); 364 smi_info->curr_msg = smi_info->waiting_msg;
377 smi_info->curr_msg = list_entry(entry, 365 smi_info->waiting_msg = NULL;
378 struct ipmi_smi_msg,
379 link);
380#ifdef DEBUG_TIMING 366#ifdef DEBUG_TIMING
381 do_gettimeofday(&t); 367 do_gettimeofday(&t);
382 printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); 368 printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
@@ -916,14 +902,8 @@ static void sender(void *send_info,
916 struct timeval t; 902 struct timeval t;
917#endif 903#endif
918 904
919 if (atomic_read(&smi_info->stop_operation)) { 905 BUG_ON(smi_info->waiting_msg);
920 msg->rsp[0] = msg->data[0] | 4; 906 smi_info->waiting_msg = msg;
921 msg->rsp[1] = msg->data[1];
922 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
923 msg->rsp_size = 3;
924 deliver_recv_msg(smi_info, msg);
925 return;
926 }
927 907
928#ifdef DEBUG_TIMING 908#ifdef DEBUG_TIMING
929 do_gettimeofday(&t); 909 do_gettimeofday(&t);
@@ -932,16 +912,16 @@ static void sender(void *send_info,
932 912
933 if (smi_info->run_to_completion) { 913 if (smi_info->run_to_completion) {
934 /* 914 /*
935 * If we are running to completion, then throw it in 915 * If we are running to completion, start it and run
936 * the list and run transactions until everything is 916 * transactions until everything is clear.
937 * clear. Priority doesn't matter here.
938 */ 917 */
918 smi_info->curr_msg = smi_info->waiting_msg;
919 smi_info->waiting_msg = NULL;
939 920
940 /* 921 /*
941 * Run to completion means we are single-threaded, no 922 * Run to completion means we are single-threaded, no
942 * need for locks. 923 * need for locks.
943 */ 924 */
944 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
945 925
946 result = smi_event_handler(smi_info, 0); 926 result = smi_event_handler(smi_info, 0);
947 while (result != SI_SM_IDLE) { 927 while (result != SI_SM_IDLE) {
@@ -953,11 +933,6 @@ static void sender(void *send_info,
953 } 933 }
954 934
955 spin_lock_irqsave(&smi_info->si_lock, flags); 935 spin_lock_irqsave(&smi_info->si_lock, flags);
956 if (priority > 0)
957 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
958 else
959 list_add_tail(&msg->link, &smi_info->xmit_msgs);
960
961 check_start_timer_thread(smi_info); 936 check_start_timer_thread(smi_info);
962 spin_unlock_irqrestore(&smi_info->si_lock, flags); 937 spin_unlock_irqrestore(&smi_info->si_lock, flags);
963} 938}
@@ -1095,8 +1070,7 @@ static void request_events(void *send_info)
1095{ 1070{
1096 struct smi_info *smi_info = send_info; 1071 struct smi_info *smi_info = send_info;
1097 1072
1098 if (atomic_read(&smi_info->stop_operation) || 1073 if (!smi_info->has_event_buffer)
1099 !smi_info->has_event_buffer)
1100 return; 1074 return;
1101 1075
1102 atomic_set(&smi_info->req_events, 1); 1076 atomic_set(&smi_info->req_events, 1);
@@ -3220,15 +3194,10 @@ static void setup_xaction_handlers(struct smi_info *smi_info)
3220 3194
3221static inline void wait_for_timer_and_thread(struct smi_info *smi_info) 3195static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
3222{ 3196{
3223 if (smi_info->intf) { 3197 if (smi_info->thread != NULL)
3224 /* 3198 kthread_stop(smi_info->thread);
3225 * The timer and thread are only running if the 3199 if (smi_info->timer_running)
3226 * interface has been started up and registered.
3227 */
3228 if (smi_info->thread != NULL)
3229 kthread_stop(smi_info->thread);
3230 del_timer_sync(&smi_info->si_timer); 3200 del_timer_sync(&smi_info->si_timer);
3231 }
3232} 3201}
3233 3202
3234static struct ipmi_default_vals 3203static struct ipmi_default_vals
@@ -3403,8 +3372,7 @@ static int try_smi_init(struct smi_info *new_smi)
3403 setup_oem_data_handler(new_smi); 3372 setup_oem_data_handler(new_smi);
3404 setup_xaction_handlers(new_smi); 3373 setup_xaction_handlers(new_smi);
3405 3374
3406 INIT_LIST_HEAD(&(new_smi->xmit_msgs)); 3375 new_smi->waiting_msg = NULL;
3407 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
3408 new_smi->curr_msg = NULL; 3376 new_smi->curr_msg = NULL;
3409 atomic_set(&new_smi->req_events, 0); 3377 atomic_set(&new_smi->req_events, 0);
3410 new_smi->run_to_completion = false; 3378 new_smi->run_to_completion = false;
@@ -3412,7 +3380,6 @@ static int try_smi_init(struct smi_info *new_smi)
3412 atomic_set(&new_smi->stats[i], 0); 3380 atomic_set(&new_smi->stats[i], 0);
3413 3381
3414 new_smi->interrupt_disabled = true; 3382 new_smi->interrupt_disabled = true;
3415 atomic_set(&new_smi->stop_operation, 0);
3416 atomic_set(&new_smi->need_watch, 0); 3383 atomic_set(&new_smi->need_watch, 0);
3417 new_smi->intf_num = smi_num; 3384 new_smi->intf_num = smi_num;
3418 smi_num++; 3385 smi_num++;
@@ -3497,15 +3464,15 @@ static int try_smi_init(struct smi_info *new_smi)
3497 return 0; 3464 return 0;
3498 3465
3499 out_err_stop_timer: 3466 out_err_stop_timer:
3500 atomic_inc(&new_smi->stop_operation);
3501 wait_for_timer_and_thread(new_smi); 3467 wait_for_timer_and_thread(new_smi);
3502 3468
3503 out_err: 3469 out_err:
3504 new_smi->interrupt_disabled = true; 3470 new_smi->interrupt_disabled = true;
3505 3471
3506 if (new_smi->intf) { 3472 if (new_smi->intf) {
3507 ipmi_unregister_smi(new_smi->intf); 3473 ipmi_smi_t intf = new_smi->intf;
3508 new_smi->intf = NULL; 3474 new_smi->intf = NULL;
3475 ipmi_unregister_smi(intf);
3509 } 3476 }
3510 3477
3511 if (new_smi->irq_cleanup) { 3478 if (new_smi->irq_cleanup) {
@@ -3684,60 +3651,49 @@ module_init(init_ipmi_si);
3684static void cleanup_one_si(struct smi_info *to_clean) 3651static void cleanup_one_si(struct smi_info *to_clean)
3685{ 3652{
3686 int rv = 0; 3653 int rv = 0;
3687 unsigned long flags;
3688 3654
3689 if (!to_clean) 3655 if (!to_clean)
3690 return; 3656 return;
3691 3657
3658 if (to_clean->intf) {
3659 ipmi_smi_t intf = to_clean->intf;
3660
3661 to_clean->intf = NULL;
3662 rv = ipmi_unregister_smi(intf);
3663 if (rv) {
3664 pr_err(PFX "Unable to unregister device: errno=%d\n",
3665 rv);
3666 }
3667 }
3668
3692 if (to_clean->dev) 3669 if (to_clean->dev)
3693 dev_set_drvdata(to_clean->dev, NULL); 3670 dev_set_drvdata(to_clean->dev, NULL);
3694 3671
3695 list_del(&to_clean->link); 3672 list_del(&to_clean->link);
3696 3673
3697 /* Tell the driver that we are shutting down. */
3698 atomic_inc(&to_clean->stop_operation);
3699
3700 /* 3674 /*
3701 * Make sure the timer and thread are stopped and will not run 3675 * Make sure that interrupts, the timer and the thread are
3702 * again. 3676 * stopped and will not run again.
3703 */ 3677 */
3678 if (to_clean->irq_cleanup)
3679 to_clean->irq_cleanup(to_clean);
3704 wait_for_timer_and_thread(to_clean); 3680 wait_for_timer_and_thread(to_clean);
3705 3681
3706 /* 3682 /*
3707 * Timeouts are stopped, now make sure the interrupts are off 3683 * Timeouts are stopped, now make sure the interrupts are off
3708 * for the device. A little tricky with locks to make sure 3684 * in the BMC. Note that timers and CPU interrupts are off,
3709 * there are no races. 3685 * so no need for locks.
3710 */ 3686 */
3711 spin_lock_irqsave(&to_clean->si_lock, flags);
3712 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 3687 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3713 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3714 poll(to_clean); 3688 poll(to_clean);
3715 schedule_timeout_uninterruptible(1); 3689 schedule_timeout_uninterruptible(1);
3716 spin_lock_irqsave(&to_clean->si_lock, flags);
3717 } 3690 }
3718 disable_si_irq(to_clean); 3691 disable_si_irq(to_clean);
3719 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3720 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3721 poll(to_clean);
3722 schedule_timeout_uninterruptible(1);
3723 }
3724
3725 /* Clean up interrupts and make sure that everything is done. */
3726 if (to_clean->irq_cleanup)
3727 to_clean->irq_cleanup(to_clean);
3728 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 3692 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3729 poll(to_clean); 3693 poll(to_clean);
3730 schedule_timeout_uninterruptible(1); 3694 schedule_timeout_uninterruptible(1);
3731 } 3695 }
3732 3696
3733 if (to_clean->intf)
3734 rv = ipmi_unregister_smi(to_clean->intf);
3735
3736 if (rv) {
3737 printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n",
3738 rv);
3739 }
3740
3741 if (to_clean->handlers) 3697 if (to_clean->handlers)
3742 to_clean->handlers->cleanup(to_clean->si_sm); 3698 to_clean->handlers->cleanup(to_clean->si_sm);
3743 3699