aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
authorCorey Minyard <cminyard@mvista.com>2015-09-05 18:44:13 -0400
committerCorey Minyard <cminyard@mvista.com>2015-11-15 22:08:26 -0500
commit0cfec916e86d881e209de4b4ae9959a6271e6660 (patch)
tree21a292cfb92d2f5ad90daa5e0459bf9668c8a2b2 /drivers/char
parent8005c49d9aea74d382f474ce11afbbc7d7130bec (diff)
ipmi: Start the timer and thread on internal msgs
The timer and thread were not being started for internal messages, so in interrupt mode if something hung the timer would never go off and clean things up. Factor out the internal message sending and start the timer for those messages, too. Signed-off-by: Corey Minyard <cminyard@mvista.com> Tested-by: Gouji, Masayuki <gouji.masayuki@jp.fujitsu.com> Cc: stable@vger.kernel.org
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c73
1 files changed, 44 insertions, 29 deletions
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 654f6f36a071..20c3d7b97602 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -412,18 +412,42 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
412 return rv; 412 return rv;
413} 413}
414 414
415static void start_check_enables(struct smi_info *smi_info) 415static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
416{
417 smi_info->last_timeout_jiffies = jiffies;
418 mod_timer(&smi_info->si_timer, new_val);
419 smi_info->timer_running = true;
420}
421
422/*
423 * Start a new message and (re)start the timer and thread.
424 */
425static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
426 unsigned int size)
427{
428 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
429
430 if (smi_info->thread)
431 wake_up_process(smi_info->thread);
432
433 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
434}
435
436static void start_check_enables(struct smi_info *smi_info, bool start_timer)
416{ 437{
417 unsigned char msg[2]; 438 unsigned char msg[2];
418 439
419 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 440 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
420 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 441 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
421 442
422 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 443 if (start_timer)
444 start_new_msg(smi_info, msg, 2);
445 else
446 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
423 smi_info->si_state = SI_CHECKING_ENABLES; 447 smi_info->si_state = SI_CHECKING_ENABLES;
424} 448}
425 449
426static void start_clear_flags(struct smi_info *smi_info) 450static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
427{ 451{
428 unsigned char msg[3]; 452 unsigned char msg[3];
429 453
@@ -432,7 +456,10 @@ static void start_clear_flags(struct smi_info *smi_info)
432 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; 456 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
433 msg[2] = WDT_PRE_TIMEOUT_INT; 457 msg[2] = WDT_PRE_TIMEOUT_INT;
434 458
435 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); 459 if (start_timer)
460 start_new_msg(smi_info, msg, 3);
461 else
462 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
436 smi_info->si_state = SI_CLEARING_FLAGS; 463 smi_info->si_state = SI_CLEARING_FLAGS;
437} 464}
438 465
@@ -442,10 +469,8 @@ static void start_getting_msg_queue(struct smi_info *smi_info)
442 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; 469 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
443 smi_info->curr_msg->data_size = 2; 470 smi_info->curr_msg->data_size = 2;
444 471
445 smi_info->handlers->start_transaction( 472 start_new_msg(smi_info, smi_info->curr_msg->data,
446 smi_info->si_sm, 473 smi_info->curr_msg->data_size);
447 smi_info->curr_msg->data,
448 smi_info->curr_msg->data_size);
449 smi_info->si_state = SI_GETTING_MESSAGES; 474 smi_info->si_state = SI_GETTING_MESSAGES;
450} 475}
451 476
@@ -455,20 +480,11 @@ static void start_getting_events(struct smi_info *smi_info)
455 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; 480 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
456 smi_info->curr_msg->data_size = 2; 481 smi_info->curr_msg->data_size = 2;
457 482
458 smi_info->handlers->start_transaction( 483 start_new_msg(smi_info, smi_info->curr_msg->data,
459 smi_info->si_sm, 484 smi_info->curr_msg->data_size);
460 smi_info->curr_msg->data,
461 smi_info->curr_msg->data_size);
462 smi_info->si_state = SI_GETTING_EVENTS; 485 smi_info->si_state = SI_GETTING_EVENTS;
463} 486}
464 487
465static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
466{
467 smi_info->last_timeout_jiffies = jiffies;
468 mod_timer(&smi_info->si_timer, new_val);
469 smi_info->timer_running = true;
470}
471
472/* 488/*
473 * When we have a situtaion where we run out of memory and cannot 489 * When we have a situtaion where we run out of memory and cannot
474 * allocate messages, we just leave them in the BMC and run the system 490 * allocate messages, we just leave them in the BMC and run the system
@@ -478,11 +494,11 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
478 * Note that we cannot just use disable_irq(), since the interrupt may 494 * Note that we cannot just use disable_irq(), since the interrupt may
479 * be shared. 495 * be shared.
480 */ 496 */
481static inline bool disable_si_irq(struct smi_info *smi_info) 497static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
482{ 498{
483 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 499 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
484 smi_info->interrupt_disabled = true; 500 smi_info->interrupt_disabled = true;
485 start_check_enables(smi_info); 501 start_check_enables(smi_info, start_timer);
486 return true; 502 return true;
487 } 503 }
488 return false; 504 return false;
@@ -492,7 +508,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
492{ 508{
493 if ((smi_info->irq) && (smi_info->interrupt_disabled)) { 509 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
494 smi_info->interrupt_disabled = false; 510 smi_info->interrupt_disabled = false;
495 start_check_enables(smi_info); 511 start_check_enables(smi_info, true);
496 return true; 512 return true;
497 } 513 }
498 return false; 514 return false;
@@ -510,7 +526,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
510 526
511 msg = ipmi_alloc_smi_msg(); 527 msg = ipmi_alloc_smi_msg();
512 if (!msg) { 528 if (!msg) {
513 if (!disable_si_irq(smi_info)) 529 if (!disable_si_irq(smi_info, true))
514 smi_info->si_state = SI_NORMAL; 530 smi_info->si_state = SI_NORMAL;
515 } else if (enable_si_irq(smi_info)) { 531 } else if (enable_si_irq(smi_info)) {
516 ipmi_free_smi_msg(msg); 532 ipmi_free_smi_msg(msg);
@@ -526,7 +542,7 @@ static void handle_flags(struct smi_info *smi_info)
526 /* Watchdog pre-timeout */ 542 /* Watchdog pre-timeout */
527 smi_inc_stat(smi_info, watchdog_pretimeouts); 543 smi_inc_stat(smi_info, watchdog_pretimeouts);
528 544
529 start_clear_flags(smi_info); 545 start_clear_flags(smi_info, true);
530 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 546 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
531 if (smi_info->intf) 547 if (smi_info->intf)
532 ipmi_smi_watchdog_pretimeout(smi_info->intf); 548 ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -879,8 +895,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
879 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 895 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
880 msg[1] = IPMI_GET_MSG_FLAGS_CMD; 896 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
881 897
882 smi_info->handlers->start_transaction( 898 start_new_msg(smi_info, msg, 2);
883 smi_info->si_sm, msg, 2);
884 smi_info->si_state = SI_GETTING_FLAGS; 899 smi_info->si_state = SI_GETTING_FLAGS;
885 goto restart; 900 goto restart;
886 } 901 }
@@ -910,7 +925,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
910 * disable and messages disabled. 925 * disable and messages disabled.
911 */ 926 */
912 if (smi_info->supports_event_msg_buff || smi_info->irq) { 927 if (smi_info->supports_event_msg_buff || smi_info->irq) {
913 start_check_enables(smi_info); 928 start_check_enables(smi_info, true);
914 } else { 929 } else {
915 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); 930 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
916 if (!smi_info->curr_msg) 931 if (!smi_info->curr_msg)
@@ -3613,7 +3628,7 @@ static int try_smi_init(struct smi_info *new_smi)
3613 * Start clearing the flags before we enable interrupts or the 3628 * Start clearing the flags before we enable interrupts or the
3614 * timer to avoid racing with the timer. 3629 * timer to avoid racing with the timer.
3615 */ 3630 */
3616 start_clear_flags(new_smi); 3631 start_clear_flags(new_smi, false);
3617 3632
3618 /* 3633 /*
3619 * IRQ is defined to be set when non-zero. req_events will 3634 * IRQ is defined to be set when non-zero. req_events will
@@ -3908,7 +3923,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
3908 poll(to_clean); 3923 poll(to_clean);
3909 schedule_timeout_uninterruptible(1); 3924 schedule_timeout_uninterruptible(1);
3910 } 3925 }
3911 disable_si_irq(to_clean); 3926 disable_si_irq(to_clean, false);
3912 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 3927 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3913 poll(to_clean); 3928 poll(to_clean);
3914 schedule_timeout_uninterruptible(1); 3929 schedule_timeout_uninterruptible(1);