diff options
author | Corey Minyard <cminyard@mvista.com> | 2014-11-19 05:03:26 -0500 |
---|---|---|
committer | Corey Minyard <cminyard@mvista.com> | 2014-12-11 16:04:13 -0500 |
commit | d9b7e4f717a167610a49ceb9e5969e80146c89a8 (patch) | |
tree | 73566796f0175c0de07ec91959418bb270e59f9d /drivers/char | |
parent | 6a11e5c67a397e9a64cfde6961c83a7a64d7980c (diff) |
ipmi: Periodically check to see if irqs and messages are set right
The BMC can be reset while we are running; that means the interrupt
and event message buffer settings may be wrong. So periodically
check to see if these values are correct, and fix them if they
are wrong.
Signed-off-by: Corey Minyard <cminyard@mvista.com>
Tested-by: Tony Rex <tony.rex@ericsson.com>
Tested-by: Magnus Johansson E <magnus.e.johansson@ericsson.com>
Diffstat (limited to 'drivers/char')
-rw-r--r-- | drivers/char/ipmi/ipmi_si_intf.c | 197 |
1 files changed, 101 insertions, 96 deletions
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 4f11301a0c42..2952d2dcc855 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -92,12 +92,9 @@ enum si_intf_state { | |||
92 | SI_GETTING_FLAGS, | 92 | SI_GETTING_FLAGS, |
93 | SI_GETTING_EVENTS, | 93 | SI_GETTING_EVENTS, |
94 | SI_CLEARING_FLAGS, | 94 | SI_CLEARING_FLAGS, |
95 | SI_CLEARING_FLAGS_THEN_SET_IRQ, | ||
96 | SI_GETTING_MESSAGES, | 95 | SI_GETTING_MESSAGES, |
97 | SI_ENABLE_INTERRUPTS1, | 96 | SI_CHECKING_ENABLES, |
98 | SI_ENABLE_INTERRUPTS2, | 97 | SI_SETTING_ENABLES |
99 | SI_DISABLE_INTERRUPTS1, | ||
100 | SI_DISABLE_INTERRUPTS2 | ||
101 | /* FIXME - add watchdog stuff. */ | 98 | /* FIXME - add watchdog stuff. */ |
102 | }; | 99 | }; |
103 | 100 | ||
@@ -260,6 +257,11 @@ struct smi_info { | |||
260 | */ | 257 | */ |
261 | bool interrupt_disabled; | 258 | bool interrupt_disabled; |
262 | 259 | ||
260 | /* | ||
261 | * Does the BMC support events? | ||
262 | */ | ||
263 | bool supports_event_msg_buff; | ||
264 | |||
263 | /* From the get device id response... */ | 265 | /* From the get device id response... */ |
264 | struct ipmi_device_id device_id; | 266 | struct ipmi_device_id device_id; |
265 | 267 | ||
@@ -386,30 +388,15 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) | |||
386 | return rv; | 388 | return rv; |
387 | } | 389 | } |
388 | 390 | ||
389 | static void start_enable_irq(struct smi_info *smi_info) | 391 | static void start_check_enables(struct smi_info *smi_info) |
390 | { | 392 | { |
391 | unsigned char msg[2]; | 393 | unsigned char msg[2]; |
392 | 394 | ||
393 | /* | ||
394 | * If we are enabling interrupts, we have to tell the | ||
395 | * BMC to use them. | ||
396 | */ | ||
397 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 395 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
398 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; | 396 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; |
399 | 397 | ||
400 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); | 398 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); |
401 | smi_info->si_state = SI_ENABLE_INTERRUPTS1; | 399 | smi_info->si_state = SI_CHECKING_ENABLES; |
402 | } | ||
403 | |||
404 | static void start_disable_irq(struct smi_info *smi_info) | ||
405 | { | ||
406 | unsigned char msg[2]; | ||
407 | |||
408 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | ||
409 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; | ||
410 | |||
411 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); | ||
412 | smi_info->si_state = SI_DISABLE_INTERRUPTS1; | ||
413 | } | 400 | } |
414 | 401 | ||
415 | static void start_clear_flags(struct smi_info *smi_info) | 402 | static void start_clear_flags(struct smi_info *smi_info) |
@@ -467,8 +454,8 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) | |||
467 | static inline bool disable_si_irq(struct smi_info *smi_info) | 454 | static inline bool disable_si_irq(struct smi_info *smi_info) |
468 | { | 455 | { |
469 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { | 456 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { |
470 | start_disable_irq(smi_info); | ||
471 | smi_info->interrupt_disabled = true; | 457 | smi_info->interrupt_disabled = true; |
458 | start_check_enables(smi_info); | ||
472 | return true; | 459 | return true; |
473 | } | 460 | } |
474 | return false; | 461 | return false; |
@@ -477,8 +464,8 @@ static inline bool disable_si_irq(struct smi_info *smi_info) | |||
477 | static inline bool enable_si_irq(struct smi_info *smi_info) | 464 | static inline bool enable_si_irq(struct smi_info *smi_info) |
478 | { | 465 | { |
479 | if ((smi_info->irq) && (smi_info->interrupt_disabled)) { | 466 | if ((smi_info->irq) && (smi_info->interrupt_disabled)) { |
480 | start_enable_irq(smi_info); | ||
481 | smi_info->interrupt_disabled = false; | 467 | smi_info->interrupt_disabled = false; |
468 | start_check_enables(smi_info); | ||
482 | return true; | 469 | return true; |
483 | } | 470 | } |
484 | return false; | 471 | return false; |
@@ -538,6 +525,36 @@ static void handle_flags(struct smi_info *smi_info) | |||
538 | smi_info->si_state = SI_NORMAL; | 525 | smi_info->si_state = SI_NORMAL; |
539 | } | 526 | } |
540 | 527 | ||
528 | /* | ||
529 | * Global enables we care about. | ||
530 | */ | ||
531 | #define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \ | ||
532 | IPMI_BMC_EVT_MSG_INTR) | ||
533 | |||
534 | static u8 current_global_enables(struct smi_info *smi_info, u8 base) | ||
535 | { | ||
536 | u8 enables = 0; | ||
537 | |||
538 | if (smi_info->supports_event_msg_buff) | ||
539 | enables |= IPMI_BMC_EVT_MSG_BUFF; | ||
540 | else | ||
541 | enables &= ~IPMI_BMC_EVT_MSG_BUFF; | ||
542 | |||
543 | if (smi_info->irq && !smi_info->interrupt_disabled) | ||
544 | enables |= IPMI_BMC_RCV_MSG_INTR; | ||
545 | else | ||
546 | enables &= ~IPMI_BMC_RCV_MSG_INTR; | ||
547 | |||
548 | if (smi_info->supports_event_msg_buff && | ||
549 | smi_info->irq && !smi_info->interrupt_disabled) | ||
550 | |||
551 | enables |= IPMI_BMC_EVT_MSG_INTR; | ||
552 | else | ||
553 | enables &= ~IPMI_BMC_EVT_MSG_INTR; | ||
554 | |||
555 | return enables; | ||
556 | } | ||
557 | |||
541 | static void handle_transaction_done(struct smi_info *smi_info) | 558 | static void handle_transaction_done(struct smi_info *smi_info) |
542 | { | 559 | { |
543 | struct ipmi_smi_msg *msg; | 560 | struct ipmi_smi_msg *msg; |
@@ -592,7 +609,6 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
592 | } | 609 | } |
593 | 610 | ||
594 | case SI_CLEARING_FLAGS: | 611 | case SI_CLEARING_FLAGS: |
595 | case SI_CLEARING_FLAGS_THEN_SET_IRQ: | ||
596 | { | 612 | { |
597 | unsigned char msg[3]; | 613 | unsigned char msg[3]; |
598 | 614 | ||
@@ -603,10 +619,7 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
603 | dev_warn(smi_info->dev, | 619 | dev_warn(smi_info->dev, |
604 | "Error clearing flags: %2.2x\n", msg[2]); | 620 | "Error clearing flags: %2.2x\n", msg[2]); |
605 | } | 621 | } |
606 | if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ) | 622 | smi_info->si_state = SI_NORMAL; |
607 | start_enable_irq(smi_info); | ||
608 | else | ||
609 | smi_info->si_state = SI_NORMAL; | ||
610 | break; | 623 | break; |
611 | } | 624 | } |
612 | 625 | ||
@@ -686,9 +699,10 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
686 | break; | 699 | break; |
687 | } | 700 | } |
688 | 701 | ||
689 | case SI_ENABLE_INTERRUPTS1: | 702 | case SI_CHECKING_ENABLES: |
690 | { | 703 | { |
691 | unsigned char msg[4]; | 704 | unsigned char msg[4]; |
705 | u8 enables; | ||
692 | 706 | ||
693 | /* We got the flags from the SMI, now handle them. */ | 707 | /* We got the flags from the SMI, now handle them. */ |
694 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); | 708 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); |
@@ -698,72 +712,50 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
698 | dev_warn(smi_info->dev, | 712 | dev_warn(smi_info->dev, |
699 | "Maybe ok, but ipmi might run very slowly.\n"); | 713 | "Maybe ok, but ipmi might run very slowly.\n"); |
700 | smi_info->si_state = SI_NORMAL; | 714 | smi_info->si_state = SI_NORMAL; |
701 | } else { | 715 | break; |
716 | } | ||
717 | enables = current_global_enables(smi_info, 0); | ||
718 | if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) { | ||
719 | /* Enables are not correct, fix them. */ | ||
702 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 720 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
703 | msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; | 721 | msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; |
704 | msg[2] = (msg[3] | | 722 | msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK); |
705 | IPMI_BMC_RCV_MSG_INTR | | ||
706 | IPMI_BMC_EVT_MSG_INTR); | ||
707 | smi_info->handlers->start_transaction( | 723 | smi_info->handlers->start_transaction( |
708 | smi_info->si_sm, msg, 3); | 724 | smi_info->si_sm, msg, 3); |
709 | smi_info->si_state = SI_ENABLE_INTERRUPTS2; | 725 | smi_info->si_state = SI_SETTING_ENABLES; |
726 | } else if (smi_info->supports_event_msg_buff) { | ||
727 | smi_info->curr_msg = ipmi_alloc_smi_msg(); | ||
728 | if (!smi_info->curr_msg) { | ||
729 | smi_info->si_state = SI_NORMAL; | ||
730 | break; | ||
731 | } | ||
732 | start_getting_msg_queue(smi_info); | ||
733 | } else { | ||
734 | smi_info->si_state = SI_NORMAL; | ||
710 | } | 735 | } |
711 | break; | 736 | break; |
712 | } | 737 | } |
713 | 738 | ||
714 | case SI_ENABLE_INTERRUPTS2: | 739 | case SI_SETTING_ENABLES: |
715 | { | 740 | { |
716 | unsigned char msg[4]; | 741 | unsigned char msg[4]; |
717 | 742 | ||
718 | /* We got the flags from the SMI, now handle them. */ | ||
719 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); | 743 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); |
720 | if (msg[2] != 0) { | 744 | if (msg[2] != 0) |
721 | dev_warn(smi_info->dev, | 745 | dev_warn(smi_info->dev, |
722 | "Couldn't set irq info: %x.\n", msg[2]); | 746 | "Could not set the global enables: 0x%x.\n", |
723 | dev_warn(smi_info->dev, | 747 | msg[2]); |
724 | "Maybe ok, but ipmi might run very slowly.\n"); | ||
725 | } else | ||
726 | smi_info->interrupt_disabled = false; | ||
727 | |||
728 | /* We enabled interrupts, flags may be pending. */ | ||
729 | handle_flags(smi_info); | ||
730 | break; | ||
731 | } | ||
732 | |||
733 | case SI_DISABLE_INTERRUPTS1: | ||
734 | { | ||
735 | unsigned char msg[4]; | ||
736 | 748 | ||
737 | /* We got the flags from the SMI, now handle them. */ | 749 | if (smi_info->supports_event_msg_buff) { |
738 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); | 750 | smi_info->curr_msg = ipmi_alloc_smi_msg(); |
739 | if (msg[2] != 0) { | 751 | if (!smi_info->curr_msg) { |
740 | dev_warn(smi_info->dev, "Could not disable interrupts" | 752 | smi_info->si_state = SI_NORMAL; |
741 | ", failed get.\n"); | 753 | break; |
742 | smi_info->si_state = SI_NORMAL; | 754 | } |
755 | start_getting_msg_queue(smi_info); | ||
743 | } else { | 756 | } else { |
744 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 757 | smi_info->si_state = SI_NORMAL; |
745 | msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; | ||
746 | msg[2] = (msg[3] & | ||
747 | ~(IPMI_BMC_RCV_MSG_INTR | | ||
748 | IPMI_BMC_EVT_MSG_INTR)); | ||
749 | smi_info->handlers->start_transaction( | ||
750 | smi_info->si_sm, msg, 3); | ||
751 | smi_info->si_state = SI_DISABLE_INTERRUPTS2; | ||
752 | } | ||
753 | break; | ||
754 | } | ||
755 | |||
756 | case SI_DISABLE_INTERRUPTS2: | ||
757 | { | ||
758 | unsigned char msg[4]; | ||
759 | |||
760 | /* We got the flags from the SMI, now handle them. */ | ||
761 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); | ||
762 | if (msg[2] != 0) { | ||
763 | dev_warn(smi_info->dev, "Could not disable interrupts" | ||
764 | ", failed set.\n"); | ||
765 | } | 758 | } |
766 | smi_info->si_state = SI_NORMAL; | ||
767 | break; | 759 | break; |
768 | } | 760 | } |
769 | } | 761 | } |
@@ -859,19 +851,21 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, | |||
859 | */ | 851 | */ |
860 | atomic_set(&smi_info->req_events, 0); | 852 | atomic_set(&smi_info->req_events, 0); |
861 | 853 | ||
862 | smi_info->curr_msg = ipmi_alloc_smi_msg(); | 854 | /* |
863 | if (!smi_info->curr_msg) | 855 | * Take this opportunity to check the interrupt and |
864 | goto out; | 856 | * message enable state for the BMC. The BMC can be |
865 | 857 | * asynchronously reset, and may thus get interrupts | |
866 | smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); | 858 | * disable and messages disabled. |
867 | smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; | 859 | */ |
868 | smi_info->curr_msg->data_size = 2; | 860 | if (smi_info->supports_event_msg_buff || smi_info->irq) { |
861 | start_check_enables(smi_info); | ||
862 | } else { | ||
863 | smi_info->curr_msg = alloc_msg_handle_irq(smi_info); | ||
864 | if (!smi_info->curr_msg) | ||
865 | goto out; | ||
869 | 866 | ||
870 | smi_info->handlers->start_transaction( | 867 | start_getting_events(smi_info); |
871 | smi_info->si_sm, | 868 | } |
872 | smi_info->curr_msg->data, | ||
873 | smi_info->curr_msg->data_size); | ||
874 | smi_info->si_state = SI_GETTING_EVENTS; | ||
875 | goto restart; | 869 | goto restart; |
876 | } | 870 | } |
877 | out: | 871 | out: |
@@ -2918,9 +2912,11 @@ static int try_enable_event_buffer(struct smi_info *smi_info) | |||
2918 | goto out; | 2912 | goto out; |
2919 | } | 2913 | } |
2920 | 2914 | ||
2921 | if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) | 2915 | if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) { |
2922 | /* buffer is already enabled, nothing to do. */ | 2916 | /* buffer is already enabled, nothing to do. */ |
2917 | smi_info->supports_event_msg_buff = true; | ||
2923 | goto out; | 2918 | goto out; |
2919 | } | ||
2924 | 2920 | ||
2925 | msg[0] = IPMI_NETFN_APP_REQUEST << 2; | 2921 | msg[0] = IPMI_NETFN_APP_REQUEST << 2; |
2926 | msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; | 2922 | msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; |
@@ -2953,6 +2949,9 @@ static int try_enable_event_buffer(struct smi_info *smi_info) | |||
2953 | * that the event buffer is not supported. | 2949 | * that the event buffer is not supported. |
2954 | */ | 2950 | */ |
2955 | rv = -ENOENT; | 2951 | rv = -ENOENT; |
2952 | else | ||
2953 | smi_info->supports_event_msg_buff = true; | ||
2954 | |||
2956 | out: | 2955 | out: |
2957 | kfree(resp); | 2956 | kfree(resp); |
2958 | return rv; | 2957 | return rv; |
@@ -3392,9 +3391,15 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3392 | * timer to avoid racing with the timer. | 3391 | * timer to avoid racing with the timer. |
3393 | */ | 3392 | */ |
3394 | start_clear_flags(new_smi); | 3393 | start_clear_flags(new_smi); |
3395 | /* IRQ is defined to be set when non-zero. */ | 3394 | |
3396 | if (new_smi->irq) | 3395 | /* |
3397 | new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ; | 3396 | * IRQ is defined to be set when non-zero. req_events will |
3397 | * cause a global flags check that will enable interrupts. | ||
3398 | */ | ||
3399 | if (new_smi->irq) { | ||
3400 | new_smi->interrupt_disabled = false; | ||
3401 | atomic_set(&new_smi->req_events, 1); | ||
3402 | } | ||
3398 | 3403 | ||
3399 | if (!new_smi->dev) { | 3404 | if (!new_smi->dev) { |
3400 | /* | 3405 | /* |