aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorKrishna Gudipati <kgudipat@brocade.com>2010-12-13 19:17:11 -0500
committerJames Bottomley <James.Bottomley@suse.de>2010-12-21 13:37:15 -0500
commitf1d584d70f31f54e0a559049906f42db89e2746d (patch)
tree7bfa223d53221c5930802b988a8bb6c0aed201d6 /drivers/scsi
parentf3a060ca57903daaf2f1a88c6c25832619b2a74f (diff)
[SCSI] bfa: IOC auto recovery fix.
- Made IOC auto_recovery synchronized and not timer based. - Only one PCI function will attempt to recover and reinitialize the ASIC on a failure, after all the active PCI fns acknowledge the IOC failure. Signed-off-by: Krishna Gudipati <kgudipat@brocade.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c288
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h19
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c95
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c101
-rw-r--r--drivers/scsi/bfa/bfi_cbreg.h1
-rw-r--r--drivers/scsi/bfa/bfi_ctreg.h41
6 files changed, 445 insertions, 100 deletions
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 9173bf20ffba..05b0ff93284a 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -29,7 +29,7 @@ BFA_TRC_FILE(CNA, IOC);
29#define BFA_IOC_TOV 3000 /* msecs */ 29#define BFA_IOC_TOV 3000 /* msecs */
30#define BFA_IOC_HWSEM_TOV 500 /* msecs */ 30#define BFA_IOC_HWSEM_TOV 500 /* msecs */
31#define BFA_IOC_HB_TOV 500 /* msecs */ 31#define BFA_IOC_HB_TOV 500 /* msecs */
32#define BFA_IOC_HWINIT_MAX 2 32#define BFA_IOC_HWINIT_MAX 5
33#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV 33#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
34 34
35#define bfa_ioc_timer_start(__ioc) \ 35#define bfa_ioc_timer_start(__ioc) \
@@ -54,17 +54,16 @@ BFA_TRC_FILE(CNA, IOC);
54 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) 54 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
55#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) 55#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
56#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 56#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
57#define bfa_ioc_notify_hbfail(__ioc) \ 57#define bfa_ioc_notify_fail(__ioc) \
58 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) 58 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
59 59#define bfa_ioc_sync_join(__ioc) \
60#ifdef BFA_IOC_IS_UEFI 60 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
61#define bfa_ioc_is_bios_optrom(__ioc) (0) 61#define bfa_ioc_sync_leave(__ioc) \
62#define bfa_ioc_is_uefi(__ioc) BFA_IOC_IS_UEFI 62 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
63#else 63#define bfa_ioc_sync_ack(__ioc) \
64#define bfa_ioc_is_bios_optrom(__ioc) \ 64 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
65 (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ) 65#define bfa_ioc_sync_complete(__ioc) \
66#define bfa_ioc_is_uefi(__ioc) (0) 66 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
67#endif
68 67
69#define bfa_ioc_mbox_cmd_pending(__ioc) \ 68#define bfa_ioc_mbox_cmd_pending(__ioc) \
70 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ 69 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
@@ -104,10 +103,11 @@ enum ioc_event {
104 IOC_E_ENABLED = 5, /* f/w enabled */ 103 IOC_E_ENABLED = 5, /* f/w enabled */
105 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */ 104 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
106 IOC_E_DISABLED = 7, /* f/w disabled */ 105 IOC_E_DISABLED = 7, /* f/w disabled */
107 IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */ 106 IOC_E_INITFAILED = 8, /* failure notice by iocpf sm */
108 IOC_E_HBFAIL = 9, /* heartbeat failure */ 107 IOC_E_PFFAILED = 9, /* failure notice by iocpf sm */
109 IOC_E_HWERROR = 10, /* hardware error interrupt */ 108 IOC_E_HBFAIL = 10, /* heartbeat failure */
110 IOC_E_TIMEOUT = 11, /* timeout */ 109 IOC_E_HWERROR = 11, /* hardware error interrupt */
110 IOC_E_TIMEOUT = 12, /* timeout */
111}; 111};
112 112
113bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event); 113bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
@@ -195,9 +195,14 @@ bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
195bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event); 195bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
196bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event); 196bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
197bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event); 197bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
198bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
199 enum iocpf_event);
198bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event); 200bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
201bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
199bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event); 202bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
200bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event); 203bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
204bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
205 enum iocpf_event);
201bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event); 206bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
202 207
203static struct bfa_sm_table_s iocpf_sm_table[] = { 208static struct bfa_sm_table_s iocpf_sm_table[] = {
@@ -208,9 +213,12 @@ static struct bfa_sm_table_s iocpf_sm_table[] = {
208 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, 213 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
209 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, 214 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
210 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, 215 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
216 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
211 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, 217 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
218 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
212 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, 219 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
213 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, 220 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
221 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
214 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, 222 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
215}; 223};
216 224
@@ -497,7 +505,7 @@ bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
497} 505}
498 506
499/* 507/*
500 * Hardware initialization failed. 508 * Hardware initialization retry.
501 */ 509 */
502static void 510static void
503bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event) 511bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
@@ -519,6 +527,10 @@ bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
519 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); 527 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
520 break; 528 break;
521 529
530 case IOC_E_INITFAILED:
531 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
532 break;
533
522 case IOC_E_ENABLE: 534 case IOC_E_ENABLE:
523 break; 535 break;
524 536
@@ -561,6 +573,11 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
561 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 573 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
562 break; 574 break;
563 575
576 case IOC_E_DETACH:
577 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
578 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
579 break;
580
564 case IOC_E_HWERROR: 581 case IOC_E_HWERROR:
565 /* 582 /*
566 * HB failure notification, ignore. 583 * HB failure notification, ignore.
@@ -630,8 +647,15 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
630 switch (event) { 647 switch (event) {
631 case IOCPF_E_SEMLOCKED: 648 case IOCPF_E_SEMLOCKED:
632 if (bfa_ioc_firmware_lock(ioc)) { 649 if (bfa_ioc_firmware_lock(ioc)) {
633 iocpf->retry_count = 0; 650 if (bfa_ioc_sync_complete(ioc)) {
634 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 651 iocpf->retry_count = 0;
652 bfa_ioc_sync_join(ioc);
653 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
654 } else {
655 bfa_ioc_firmware_unlock(ioc);
656 writel(1, ioc->ioc_regs.ioc_sem_reg);
657 bfa_sem_timer_start(ioc);
658 }
635 } else { 659 } else {
636 writel(1, ioc->ioc_regs.ioc_sem_reg); 660 writel(1, ioc->ioc_regs.ioc_sem_reg);
637 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); 661 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
@@ -722,13 +746,18 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
722 746
723 switch (event) { 747 switch (event) {
724 case IOCPF_E_SEMLOCKED: 748 case IOCPF_E_SEMLOCKED:
725 iocpf->retry_count = 0; 749 if (bfa_ioc_sync_complete(ioc)) {
726 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 750 bfa_ioc_sync_join(ioc);
751 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
752 } else {
753 writel(1, ioc->ioc_regs.ioc_sem_reg);
754 bfa_sem_timer_start(ioc);
755 }
727 break; 756 break;
728 757
729 case IOCPF_E_DISABLE: 758 case IOCPF_E_DISABLE:
730 bfa_sem_timer_stop(ioc); 759 bfa_sem_timer_stop(ioc);
731 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 760 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
732 break; 761 break;
733 762
734 default: 763 default:
@@ -767,23 +796,16 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
767 */ 796 */
768 797
769 case IOCPF_E_TIMEOUT: 798 case IOCPF_E_TIMEOUT:
770 iocpf->retry_count++;
771 if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
772 bfa_iocpf_timer_start(ioc);
773 bfa_ioc_hwinit(ioc, BFA_TRUE);
774 break;
775 }
776
777 writel(1, ioc->ioc_regs.ioc_sem_reg); 799 writel(1, ioc->ioc_regs.ioc_sem_reg);
778 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
779
780 if (event == IOCPF_E_TIMEOUT) 800 if (event == IOCPF_E_TIMEOUT)
781 bfa_fsm_send_event(ioc, IOC_E_PFFAILED); 801 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
802 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
782 break; 803 break;
783 804
784 case IOCPF_E_DISABLE: 805 case IOCPF_E_DISABLE:
785 writel(1, ioc->ioc_regs.ioc_sem_reg);
786 bfa_iocpf_timer_stop(ioc); 806 bfa_iocpf_timer_stop(ioc);
807 bfa_ioc_sync_leave(ioc);
808 writel(1, ioc->ioc_regs.ioc_sem_reg);
787 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 809 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
788 break; 810 break;
789 811
@@ -824,18 +846,10 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
824 */ 846 */
825 847
826 case IOCPF_E_TIMEOUT: 848 case IOCPF_E_TIMEOUT:
827 iocpf->retry_count++;
828 if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
829 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
830 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
831 break;
832 }
833
834 writel(1, ioc->ioc_regs.ioc_sem_reg); 849 writel(1, ioc->ioc_regs.ioc_sem_reg);
835 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
836
837 if (event == IOCPF_E_TIMEOUT) 850 if (event == IOCPF_E_TIMEOUT)
838 bfa_fsm_send_event(ioc, IOC_E_PFFAILED); 851 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
852 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
839 break; 853 break;
840 854
841 case IOCPF_E_DISABLE: 855 case IOCPF_E_DISABLE:
@@ -872,20 +886,21 @@ bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
872 break; 886 break;
873 887
874 case IOCPF_E_GETATTRFAIL: 888 case IOCPF_E_GETATTRFAIL:
875 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); 889 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
876 break; 890 break;
877 891
878 case IOCPF_E_FAIL: 892 case IOCPF_E_FAIL:
879 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 893 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
880 break; 894 break;
881 895
882 case IOCPF_E_FWREADY: 896 case IOCPF_E_FWREADY:
883 if (bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op)) 897 if (bfa_ioc_is_operational(ioc)) {
884 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); 898 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
885 else 899 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
886 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); 900 } else {
887 901 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
888 bfa_fsm_send_event(ioc, IOC_E_PFFAILED); 902 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
903 }
889 break; 904 break;
890 905
891 default: 906 default:
@@ -914,7 +929,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
914 case IOCPF_E_FWRSP_DISABLE: 929 case IOCPF_E_FWRSP_DISABLE:
915 case IOCPF_E_FWREADY: 930 case IOCPF_E_FWREADY:
916 bfa_iocpf_timer_stop(ioc); 931 bfa_iocpf_timer_stop(ioc);
917 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 932 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
918 break; 933 break;
919 934
920 case IOCPF_E_FAIL: 935 case IOCPF_E_FAIL:
@@ -925,7 +940,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
925 940
926 case IOCPF_E_TIMEOUT: 941 case IOCPF_E_TIMEOUT:
927 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 942 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
928 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 943 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
929 break; 944 break;
930 945
931 case IOCPF_E_FWRSP_ENABLE: 946 case IOCPF_E_FWRSP_ENABLE:
@@ -936,6 +951,37 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
936 } 951 }
937} 952}
938 953
954static void
955bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
956{
957 bfa_ioc_hw_sem_get(iocpf->ioc);
958}
959
960/**
961 * IOC hb ack request is being removed.
962 */
963static void
964bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
965{
966 struct bfa_ioc_s *ioc = iocpf->ioc;
967
968 bfa_trc(ioc, event);
969
970 switch (event) {
971 case IOCPF_E_SEMLOCKED:
972 bfa_ioc_sync_leave(ioc);
973 writel(1, ioc->ioc_regs.ioc_sem_reg);
974 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
975 break;
976
977 case IOCPF_E_FAIL:
978 break;
979
980 default:
981 bfa_sm_fault(ioc, event);
982 }
983}
984
939/* 985/*
940 * IOC disable completion entry. 986 * IOC disable completion entry.
941 */ 987 */
@@ -954,6 +1000,7 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
954 1000
955 switch (event) { 1001 switch (event) {
956 case IOCPF_E_ENABLE: 1002 case IOCPF_E_ENABLE:
1003 iocpf->retry_count = 0;
957 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); 1004 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
958 break; 1005 break;
959 1006
@@ -968,9 +1015,64 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
968} 1015}
969 1016
970static void 1017static void
1018bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1019{
1020 bfa_ioc_hw_sem_get(iocpf->ioc);
1021}
1022
1023/**
1024 * @brief
1025 * Hardware initialization failed.
1026 */
1027static void
1028bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1029{
1030 struct bfa_ioc_s *ioc = iocpf->ioc;
1031
1032 bfa_trc(ioc, event);
1033
1034 switch (event) {
1035 case IOCPF_E_SEMLOCKED:
1036 bfa_ioc_notify_fail(ioc);
1037 bfa_ioc_sync_ack(ioc);
1038 iocpf->retry_count++;
1039 if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
1040 bfa_ioc_sync_leave(ioc);
1041 writel(1, ioc->ioc_regs.ioc_sem_reg);
1042 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1043 } else {
1044 if (bfa_ioc_sync_complete(ioc))
1045 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1046 else {
1047 writel(1, ioc->ioc_regs.ioc_sem_reg);
1048 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1049 }
1050 }
1051 break;
1052
1053 case IOCPF_E_DISABLE:
1054 bfa_sem_timer_stop(ioc);
1055 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1056 break;
1057
1058 case IOCPF_E_STOP:
1059 bfa_sem_timer_stop(ioc);
1060 bfa_ioc_firmware_unlock(ioc);
1061 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1062 break;
1063
1064 case IOCPF_E_FAIL:
1065 break;
1066
1067 default:
1068 bfa_sm_fault(ioc, event);
1069 }
1070}
1071
1072static void
971bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf) 1073bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
972{ 1074{
973 bfa_iocpf_timer_start(iocpf->ioc); 1075 bfa_fsm_send_event(iocpf->ioc, IOC_E_INITFAILED);
974} 1076}
975 1077
976/* 1078/*
@@ -985,46 +1087,77 @@ bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
985 1087
986 switch (event) { 1088 switch (event) {
987 case IOCPF_E_DISABLE: 1089 case IOCPF_E_DISABLE:
988 bfa_iocpf_timer_stop(ioc);
989 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 1090 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
990 break; 1091 break;
991 1092
992 case IOCPF_E_STOP: 1093 case IOCPF_E_STOP:
993 bfa_iocpf_timer_stop(ioc);
994 bfa_ioc_firmware_unlock(ioc); 1094 bfa_ioc_firmware_unlock(ioc);
995 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 1095 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
996 break; 1096 break;
997 1097
998 case IOCPF_E_TIMEOUT:
999 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1000 break;
1001
1002 default: 1098 default:
1003 bfa_sm_fault(ioc, event); 1099 bfa_sm_fault(ioc, event);
1004 } 1100 }
1005} 1101}
1006 1102
1007static void 1103static void
1008bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf) 1104bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1009{ 1105{
1010 /* 1106 /**
1011 * Mark IOC as failed in hardware and stop firmware. 1107 * Mark IOC as failed in hardware and stop firmware.
1012 */ 1108 */
1013 bfa_ioc_lpu_stop(iocpf->ioc); 1109 bfa_ioc_lpu_stop(iocpf->ioc);
1014 writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate);
1015
1016 /*
1017 * Notify other functions on HB failure.
1018 */
1019 bfa_ioc_notify_hbfail(iocpf->ioc);
1020 1110
1021 /* 1111 /**
1022 * Flush any queued up mailbox requests. 1112 * Flush any queued up mailbox requests.
1023 */ 1113 */
1024 bfa_ioc_mbox_hbfail(iocpf->ioc); 1114 bfa_ioc_mbox_hbfail(iocpf->ioc);
1025 1115
1026 if (iocpf->auto_recover) 1116 bfa_ioc_hw_sem_get(iocpf->ioc);
1027 bfa_iocpf_recovery_timer_start(iocpf->ioc); 1117}
1118
1119static void
1120bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1121{
1122 struct bfa_ioc_s *ioc = iocpf->ioc;
1123
1124 bfa_trc(ioc, event);
1125
1126 switch (event) {
1127 case IOCPF_E_SEMLOCKED:
1128 iocpf->retry_count = 0;
1129 bfa_ioc_sync_ack(ioc);
1130 bfa_ioc_notify_fail(ioc);
1131 if (!iocpf->auto_recover) {
1132 bfa_ioc_sync_leave(ioc);
1133 writel(1, ioc->ioc_regs.ioc_sem_reg);
1134 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1135 } else {
1136 if (bfa_ioc_sync_complete(ioc))
1137 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1138 else {
1139 writel(1, ioc->ioc_regs.ioc_sem_reg);
1140 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1141 }
1142 }
1143 break;
1144
1145 case IOCPF_E_DISABLE:
1146 bfa_sem_timer_stop(ioc);
1147 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1148 break;
1149
1150 case IOCPF_E_FAIL:
1151 break;
1152
1153 default:
1154 bfa_sm_fault(ioc, event);
1155 }
1156}
1157
1158static void
1159bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1160{
1028} 1161}
1029 1162
1030/* 1163/*
@@ -1039,15 +1172,9 @@ bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1039 1172
1040 switch (event) { 1173 switch (event) {
1041 case IOCPF_E_DISABLE: 1174 case IOCPF_E_DISABLE:
1042 if (iocpf->auto_recover)
1043 bfa_iocpf_timer_stop(ioc);
1044 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); 1175 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1045 break; 1176 break;
1046 1177
1047 case IOCPF_E_TIMEOUT:
1048 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1049 break;
1050
1051 default: 1178 default:
1052 bfa_sm_fault(ioc, event); 1179 bfa_sm_fault(ioc, event);
1053 } 1180 }
@@ -1438,7 +1565,6 @@ bfa_ioc_hb_check(void *cbarg)
1438 1565
1439 hb_count = readl(ioc->ioc_regs.heartbeat); 1566 hb_count = readl(ioc->ioc_regs.heartbeat);
1440 if (ioc->hb_count == hb_count) { 1567 if (ioc->hb_count == hb_count) {
1441 printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count);
1442 bfa_ioc_recover(ioc); 1568 bfa_ioc_recover(ioc);
1443 return; 1569 return;
1444 } else { 1570 } else {
@@ -2153,6 +2279,16 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2153 return BFA_TRUE; 2279 return BFA_TRUE;
2154} 2280}
2155 2281
2282/**
2283 * Reset IOC fwstate registers.
2284 */
2285void
2286bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2287{
2288 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2289 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2290}
2291
2156#define BFA_MFG_NAME "Brocade" 2292#define BFA_MFG_NAME "Brocade"
2157void 2293void
2158bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, 2294bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 62153f283216..5d2f34290e9d 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -149,8 +149,11 @@ struct bfa_ioc_regs_s {
149 void __iomem *host_page_num_fn; 149 void __iomem *host_page_num_fn;
150 void __iomem *heartbeat; 150 void __iomem *heartbeat;
151 void __iomem *ioc_fwstate; 151 void __iomem *ioc_fwstate;
152 void __iomem *alt_ioc_fwstate;
152 void __iomem *ll_halt; 153 void __iomem *ll_halt;
154 void __iomem *alt_ll_halt;
153 void __iomem *err_set; 155 void __iomem *err_set;
156 void __iomem *ioc_fail_sync;
154 void __iomem *shirq_isr_next; 157 void __iomem *shirq_isr_next;
155 void __iomem *shirq_msk_next; 158 void __iomem *shirq_msk_next;
156 void __iomem *smem_page_start; 159 void __iomem *smem_page_start;
@@ -258,8 +261,12 @@ struct bfa_ioc_hwif_s {
258 void (*ioc_map_port) (struct bfa_ioc_s *ioc); 261 void (*ioc_map_port) (struct bfa_ioc_s *ioc);
259 void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc, 262 void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc,
260 bfa_boolean_t msix); 263 bfa_boolean_t msix);
261 void (*ioc_notify_hbfail) (struct bfa_ioc_s *ioc); 264 void (*ioc_notify_fail) (struct bfa_ioc_s *ioc);
262 void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc); 265 void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc);
266 void (*ioc_sync_join) (struct bfa_ioc_s *ioc);
267 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
268 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
269 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
263}; 270};
264 271
265#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) 272#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@@ -289,6 +296,15 @@ struct bfa_ioc_hwif_s {
289#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) 296#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
290#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) 297#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
291 298
299#ifdef BFA_IOC_IS_UEFI
300#define bfa_ioc_is_bios_optrom(__ioc) (0)
301#define bfa_ioc_is_uefi(__ioc) BFA_IOC_IS_UEFI
302#else
303#define bfa_ioc_is_bios_optrom(__ioc) \
304 (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
305#define bfa_ioc_is_uefi(__ioc) (0)
306#endif
307
292/* 308/*
293 * IOC mailbox interface 309 * IOC mailbox interface
294 */ 310 */
@@ -343,6 +359,7 @@ bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc);
343bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); 359bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
344bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); 360bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
345bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc); 361bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
362void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc);
346enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc); 363enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc);
347void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num); 364void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num);
348void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver); 365void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver);
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index a0e05da9df51..788ecca5aa01 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -30,8 +30,12 @@ static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
30static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc); 30static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
31static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc); 31static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
32static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); 32static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
33static void bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc); 33static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc);
34static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc); 34static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
35static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
36static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
37static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
38static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc);
35 39
36static struct bfa_ioc_hwif_s hwif_cb; 40static struct bfa_ioc_hwif_s hwif_cb;
37 41
@@ -47,18 +51,38 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
47 hwif_cb.ioc_reg_init = bfa_ioc_cb_reg_init; 51 hwif_cb.ioc_reg_init = bfa_ioc_cb_reg_init;
48 hwif_cb.ioc_map_port = bfa_ioc_cb_map_port; 52 hwif_cb.ioc_map_port = bfa_ioc_cb_map_port;
49 hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set; 53 hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
50 hwif_cb.ioc_notify_hbfail = bfa_ioc_cb_notify_hbfail; 54 hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail;
51 hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset; 55 hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
56 hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join;
57 hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
58 hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
59 hwif_cb.ioc_sync_complete = bfa_ioc_cb_sync_complete;
52 60
53 ioc->ioc_hwif = &hwif_cb; 61 ioc->ioc_hwif = &hwif_cb;
54} 62}
55 63
56/* 64/**
57 * Return true if firmware of current driver matches the running firmware. 65 * Return true if firmware of current driver matches the running firmware.
58 */ 66 */
59static bfa_boolean_t 67static bfa_boolean_t
60bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc) 68bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)
61{ 69{
70 struct bfi_ioc_image_hdr_s fwhdr;
71 uint32_t fwstate = readl(ioc->ioc_regs.ioc_fwstate);
72
73 if ((fwstate == BFI_IOC_UNINIT) || bfa_ioc_is_uefi(ioc) ||
74 bfa_ioc_is_bios_optrom(ioc))
75 return BFA_TRUE;
76
77 bfa_ioc_fwver_get(ioc, &fwhdr);
78
79 if (swab32(fwhdr.exec) == BFI_BOOT_TYPE_NORMAL)
80 return BFA_TRUE;
81
82 bfa_trc(ioc, fwstate);
83 bfa_trc(ioc, fwhdr.exec);
84 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
85
62 return BFA_TRUE; 86 return BFA_TRUE;
63} 87}
64 88
@@ -71,7 +95,7 @@ bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
71 * Notify other functions on HB failure. 95 * Notify other functions on HB failure.
72 */ 96 */
73static void 97static void
74bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc) 98bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc)
75{ 99{
76 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); 100 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
77 readl(ioc->ioc_regs.err_set); 101 readl(ioc->ioc_regs.err_set);
@@ -109,9 +133,11 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
109 if (ioc->port_id == 0) { 133 if (ioc->port_id == 0) {
110 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; 134 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
111 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; 135 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
136 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
112 } else { 137 } else {
113 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); 138 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
114 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); 139 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
140 ioc->ioc_regs.alt_ioc_fwstate = (rb + BFA_IOC0_STATE_REG);
115 } 141 }
116 142
117 /* 143 /*
@@ -185,7 +211,68 @@ bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
185 writel(1, ioc->ioc_regs.ioc_sem_reg); 211 writel(1, ioc->ioc_regs.ioc_sem_reg);
186} 212}
187 213
214/**
215 * Synchronized IOC failure processing routines
216 */
217static void
218bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc)
219{
220}
188 221
222static void
223bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc)
224{
225}
226
227static void
228bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc)
229{
230 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
231}
232
233static bfa_boolean_t
234bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
235{
236 uint32_t fwstate, alt_fwstate;
237 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
238
239 /**
240 * At this point, this IOC is hoding the hw sem in the
241 * start path (fwcheck) OR in the disable/enable path
242 * OR to check if the other IOC has acknowledged failure.
243 *
244 * So, this IOC can be in UNINIT, INITING, DISABLED, FAIL
245 * or in MEMTEST states. In a normal scenario, this IOC
246 * can not be in OP state when this function is called.
247 *
248 * However, this IOC could still be in OP state when
249 * the OS driver is starting up, if the OptROM code has
250 * left it in that state.
251 *
252 * If we had marked this IOC's fwstate as BFI_IOC_FAIL
253 * in the failure case and now, if the fwstate is not
254 * BFI_IOC_FAIL it implies that the other PCI fn have
255 * reinitialized the ASIC or this IOC got disabled, so
256 * return TRUE.
257 */
258 if (fwstate == BFI_IOC_UNINIT ||
259 fwstate == BFI_IOC_INITING ||
260 fwstate == BFI_IOC_DISABLED ||
261 fwstate == BFI_IOC_MEMTEST ||
262 fwstate == BFI_IOC_OP)
263 return BFA_TRUE;
264 else {
265 alt_fwstate = readl(ioc->ioc_regs.alt_ioc_fwstate);
266 if (alt_fwstate == BFI_IOC_FAIL ||
267 alt_fwstate == BFI_IOC_DISABLED ||
268 alt_fwstate == BFI_IOC_UNINIT ||
269 alt_fwstate == BFI_IOC_INITING ||
270 alt_fwstate == BFI_IOC_MEMTEST)
271 return BFA_TRUE;
272 else
273 return BFA_FALSE;
274 }
275}
189 276
190bfa_status_t 277bfa_status_t
191bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode) 278bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 25a5d3c339c8..9da55a836bfb 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -22,6 +22,15 @@
22 22
23BFA_TRC_FILE(CNA, IOC_CT); 23BFA_TRC_FILE(CNA, IOC_CT);
24 24
25#define bfa_ioc_ct_sync_pos(__ioc) \
26 ((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
27#define BFA_IOC_SYNC_REQD_SH 16
28#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
29#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
30#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
31#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
32 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
33
25/* 34/*
26 * forward declarations 35 * forward declarations
27 */ 36 */
@@ -30,8 +39,12 @@ static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
30static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc); 39static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
31static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc); 40static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
32static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); 41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
33static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc); 42static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
34static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc); 43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
44static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
45static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
46static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
47static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
35 48
36static struct bfa_ioc_hwif_s hwif_ct; 49static struct bfa_ioc_hwif_s hwif_ct;
37 50
@@ -47,8 +60,12 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
47 hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; 60 hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
48 hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; 61 hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
49 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; 62 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
50 hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail; 63 hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
51 hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; 64 hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
65 hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
66 hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
67 hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
68 hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
52 69
53 ioc->ioc_hwif = &hwif_ct; 70 ioc->ioc_hwif = &hwif_ct;
54} 71}
@@ -85,6 +102,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
85 if (usecnt == 0) { 102 if (usecnt == 0) {
86 writel(1, ioc->ioc_regs.ioc_usage_reg); 103 writel(1, ioc->ioc_regs.ioc_usage_reg);
87 writel(1, ioc->ioc_regs.ioc_usage_sem_reg); 104 writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
105 writel(0, ioc->ioc_regs.ioc_fail_sync);
88 bfa_trc(ioc, usecnt); 106 bfa_trc(ioc, usecnt);
89 return BFA_TRUE; 107 return BFA_TRUE;
90 } 108 }
@@ -153,12 +171,14 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
153 * Notify other functions on HB failure. 171 * Notify other functions on HB failure.
154 */ 172 */
155static void 173static void
156bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc) 174bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
157{ 175{
158 if (ioc->cna) { 176 if (ioc->cna) {
159 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); 177 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
178 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
160 /* Wait for halt to take effect */ 179 /* Wait for halt to take effect */
161 readl(ioc->ioc_regs.ll_halt); 180 readl(ioc->ioc_regs.ll_halt);
181 readl(ioc->ioc_regs.alt_ll_halt);
162 } else { 182 } else {
163 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); 183 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
164 readl(ioc->ioc_regs.err_set); 184 readl(ioc->ioc_regs.err_set);
@@ -210,15 +230,19 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
210 if (ioc->port_id == 0) { 230 if (ioc->port_id == 0) {
211 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; 231 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
212 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; 232 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
233 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
213 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn; 234 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
214 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu; 235 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
215 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; 236 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
237 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
216 } else { 238 } else {
217 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); 239 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
218 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); 240 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
241 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
219 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn; 242 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
220 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu; 243 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
221 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; 244 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
245 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
222 } 246 }
223 247
224 /* 248 /*
@@ -236,6 +260,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
236 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); 260 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
237 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 261 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
238 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); 262 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
263 ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
239 264
240 /* 265 /*
241 * sram memory access 266 * sram memory access
@@ -326,7 +351,77 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
326 writel(1, ioc->ioc_regs.ioc_sem_reg); 351 writel(1, ioc->ioc_regs.ioc_sem_reg);
327} 352}
328 353
354/**
355 * Synchronized IOC failure processing routines
356 */
357static void
358bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
359{
360 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
361 uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
362
363 writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
364}
329 365
366static void
367bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
368{
369 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
370 uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
371 bfa_ioc_ct_sync_pos(ioc);
372
373 writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
374}
375
376static void
377bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
378{
379 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
380
381 writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
382 ioc->ioc_regs.ioc_fail_sync);
383}
384
385static bfa_boolean_t
386bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
387{
388 uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
389 uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
390 uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
391 uint32_t tmp_ackd;
392
393 if (sync_ackd == 0)
394 return BFA_TRUE;
395
396 /**
397 * The check below is to see whether any other PCI fn
398 * has reinitialized the ASIC (reset sync_ackd bits)
399 * and failed again while this IOC was waiting for hw
400 * semaphore (in bfa_iocpf_sm_semwait()).
401 */
402 tmp_ackd = sync_ackd;
403 if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
404 !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
405 sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
406
407 if (sync_reqd == sync_ackd) {
408 writel(bfa_ioc_ct_clear_sync_ackd(r32),
409 ioc->ioc_regs.ioc_fail_sync);
410 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
411 writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
412 return BFA_TRUE;
413 }
414
415 /**
416 * If another PCI fn reinitialized and failed again while
417 * this IOC was waiting for hw sem, the sync_ackd bit for
418 * this IOC need to be set again to allow reinitialization.
419 */
420 if (tmp_ackd != sync_ackd)
421 writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
422
423 return BFA_FALSE;
424}
330 425
331/* 426/*
332 * Check the firmware state to know if pll_init has been completed already 427 * Check the firmware state to know if pll_init has been completed already
diff --git a/drivers/scsi/bfa/bfi_cbreg.h b/drivers/scsi/bfa/bfi_cbreg.h
index 6f03ed382c69..39ad42b66b5b 100644
--- a/drivers/scsi/bfa/bfi_cbreg.h
+++ b/drivers/scsi/bfa/bfi_cbreg.h
@@ -208,6 +208,7 @@
208#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG 208#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
209#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG 209#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
210#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG 210#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
211#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
211 212
212#define CPE_Q_DEPTH(__n) \ 213#define CPE_Q_DEPTH(__n) \
213 (CPE_Q0_DEPTH + (__n) * (CPE_Q1_DEPTH - CPE_Q0_DEPTH)) 214 (CPE_Q0_DEPTH + (__n) * (CPE_Q1_DEPTH - CPE_Q0_DEPTH))
diff --git a/drivers/scsi/bfa/bfi_ctreg.h b/drivers/scsi/bfa/bfi_ctreg.h
index 62b86a4b0e4b..fc4ce4a5a183 100644
--- a/drivers/scsi/bfa/bfi_ctreg.h
+++ b/drivers/scsi/bfa/bfi_ctreg.h
@@ -522,6 +522,7 @@ enum {
522#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG 522#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
523#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG 523#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
524#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG 524#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
525#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
525 526
526#define CPE_DEPTH_Q(__n) \ 527#define CPE_DEPTH_Q(__n) \
527 (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0)) 528 (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
@@ -539,22 +540,30 @@ enum {
539 (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0)) 540 (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
540#define RME_CI_PTR_Q(__n) \ 541#define RME_CI_PTR_Q(__n) \
541 (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0)) 542 (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
542#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \ 543#define HQM_QSET_RXQ_DRBL_P0(__n) \
543 * (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0)) 544 (HQM_QSET0_RXQ_DRBL_P0 + (__n) * \
544#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \ 545 (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
545 * (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0)) 546#define HQM_QSET_TXQ_DRBL_P0(__n) \
546#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \ 547 (HQM_QSET0_TXQ_DRBL_P0 + (__n) * \
547 * (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0)) 548 (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
548#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \ 549#define HQM_QSET_IB_DRBL_1_P0(__n) \
549 * (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0)) 550 (HQM_QSET0_IB_DRBL_1_P0 + (__n) * \
550#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \ 551 (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
551 * (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1)) 552#define HQM_QSET_IB_DRBL_2_P0(__n) \
552#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \ 553 (HQM_QSET0_IB_DRBL_2_P0 + (__n) * \
553 * (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1)) 554 (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
554#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \ 555#define HQM_QSET_RXQ_DRBL_P1(__n) \
555 * (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1)) 556 (HQM_QSET0_RXQ_DRBL_P1 + (__n) * \
556#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \ 557 (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
557 * (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1)) 558#define HQM_QSET_TXQ_DRBL_P1(__n) \
559 (HQM_QSET0_TXQ_DRBL_P1 + (__n) * \
560 (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
561#define HQM_QSET_IB_DRBL_1_P1(__n) \
562 (HQM_QSET0_IB_DRBL_1_P1 + (__n) * \
563 (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
564#define HQM_QSET_IB_DRBL_2_P1(__n) \
565 (HQM_QSET0_IB_DRBL_2_P1 + (__n) * \
566 (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
558 567
559#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) 568#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
560#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) 569#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))