diff options
author | Krishna Gudipati <kgudipat@brocade.com> | 2010-03-05 22:34:20 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2010-03-07 02:19:48 -0500 |
commit | 0a20de446c76529028cb239bf2a13cb0f05b263a (patch) | |
tree | 081a440b7e877da08a2f84e0bf8efed233457d11 /drivers/scsi/bfa/bfa_ioc.c | |
parent | e67143243a1a6b47e1bdcda189ffac46d2a8744d (diff) |
[SCSI] bfa: IOC changes: Support faster recovery and split bfa_ioc.c into ASIC specific code.
Add support for faster IOC recovery after failure.
Split bfa_ioc.c into three files:
bfa_ioc.c: Common code shared between crossbow and catapult ASIC's.
bfa_ioc_cb.c: Code specific to the crossbow, reg mapping and
interrupt related routines.
bfa_ioc_ct.c: Code specific to the catapult, reg mapping and
interrupt related routines.
Fix to make sure IOC reinitialize's properly on enable request -
update the ioc_fwstate reg with BFI_IOC_FAIL on ioc disable mbox cmd
timeout.
Makefile changes to support the 2 newly added files bfa_ioc_cb.c and
bfa_ioc_ct.c.
Signed-off-by: Krishna Gudipati <kgudipat@brocade.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/bfa/bfa_ioc.c')
-rw-r--r-- | drivers/scsi/bfa/bfa_ioc.c | 546 |
1 files changed, 97 insertions, 449 deletions
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 569b35d19a25..a5f9745315b1 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c | |||
@@ -33,12 +33,11 @@ BFA_TRC_FILE(HAL, IOC); | |||
33 | * IOC local definitions | 33 | * IOC local definitions |
34 | */ | 34 | */ |
35 | #define BFA_IOC_TOV 2000 /* msecs */ | 35 | #define BFA_IOC_TOV 2000 /* msecs */ |
36 | #define BFA_IOC_HB_TOV 1000 /* msecs */ | 36 | #define BFA_IOC_HWSEM_TOV 500 /* msecs */ |
37 | #define BFA_IOC_HB_FAIL_MAX 4 | 37 | #define BFA_IOC_HB_TOV 500 /* msecs */ |
38 | #define BFA_IOC_HWINIT_MAX 2 | 38 | #define BFA_IOC_HWINIT_MAX 2 |
39 | #define BFA_IOC_FWIMG_MINSZ (16 * 1024) | 39 | #define BFA_IOC_FWIMG_MINSZ (16 * 1024) |
40 | #define BFA_IOC_TOV_RECOVER (BFA_IOC_HB_FAIL_MAX * BFA_IOC_HB_TOV \ | 40 | #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV |
41 | + BFA_IOC_TOV) | ||
42 | 41 | ||
43 | #define bfa_ioc_timer_start(__ioc) \ | 42 | #define bfa_ioc_timer_start(__ioc) \ |
44 | bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ | 43 | bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ |
@@ -51,11 +50,24 @@ BFA_TRC_FILE(HAL, IOC); | |||
51 | (sizeof(struct bfa_trc_mod_s) - \ | 50 | (sizeof(struct bfa_trc_mod_s) - \ |
52 | BFA_TRC_MAX * sizeof(struct bfa_trc_s))) | 51 | BFA_TRC_MAX * sizeof(struct bfa_trc_s))) |
53 | #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) | 52 | #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) |
54 | #define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++) | ||
55 | 53 | ||
56 | #define BFA_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) | 54 | /** |
57 | #define BFA_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) | 55 | * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. |
58 | #define BFA_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) | 56 | */ |
57 | |||
58 | #define bfa_ioc_firmware_lock(__ioc) \ | ||
59 | ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) | ||
60 | #define bfa_ioc_firmware_unlock(__ioc) \ | ||
61 | ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) | ||
62 | #define bfa_ioc_fwimg_get_chunk(__ioc, __off) \ | ||
63 | ((__ioc)->ioc_hwif->ioc_fwimg_get_chunk(__ioc, __off)) | ||
64 | #define bfa_ioc_fwimg_get_size(__ioc) \ | ||
65 | ((__ioc)->ioc_hwif->ioc_fwimg_get_size(__ioc)) | ||
66 | #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) | ||
67 | #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) | ||
68 | #define bfa_ioc_notify_hbfail(__ioc) \ | ||
69 | ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) | ||
70 | |||
59 | bfa_boolean_t bfa_auto_recover = BFA_TRUE; | 71 | bfa_boolean_t bfa_auto_recover = BFA_TRUE; |
60 | 72 | ||
61 | /* | 73 | /* |
@@ -64,7 +76,6 @@ bfa_boolean_t bfa_auto_recover = BFA_TRUE; | |||
64 | static void bfa_ioc_aen_post(struct bfa_ioc_s *bfa, | 76 | static void bfa_ioc_aen_post(struct bfa_ioc_s *bfa, |
65 | enum bfa_ioc_aen_event event); | 77 | enum bfa_ioc_aen_event event); |
66 | static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); | 78 | static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); |
67 | static void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc); | ||
68 | static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc); | 79 | static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc); |
69 | static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); | 80 | static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); |
70 | static void bfa_ioc_timeout(void *ioc); | 81 | static void bfa_ioc_timeout(void *ioc); |
@@ -77,8 +88,6 @@ static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force); | |||
77 | static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); | 88 | static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); |
78 | static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); | 89 | static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); |
79 | static void bfa_ioc_recover(struct bfa_ioc_s *ioc); | 90 | static void bfa_ioc_recover(struct bfa_ioc_s *ioc); |
80 | static bfa_boolean_t bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc); | ||
81 | static void bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc); | ||
82 | static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); | 91 | static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); |
83 | static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); | 92 | static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); |
84 | 93 | ||
@@ -508,14 +517,19 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
508 | bfa_trc(ioc, event); | 517 | bfa_trc(ioc, event); |
509 | 518 | ||
510 | switch (event) { | 519 | switch (event) { |
511 | case IOC_E_HWERROR: | ||
512 | case IOC_E_FWRSP_DISABLE: | 520 | case IOC_E_FWRSP_DISABLE: |
513 | bfa_ioc_timer_stop(ioc); | 521 | bfa_ioc_timer_stop(ioc); |
522 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | ||
523 | break; | ||
524 | |||
525 | case IOC_E_HWERROR: | ||
526 | bfa_ioc_timer_stop(ioc); | ||
514 | /* | 527 | /* |
515 | * !!! fall through !!! | 528 | * !!! fall through !!! |
516 | */ | 529 | */ |
517 | 530 | ||
518 | case IOC_E_TIMEOUT: | 531 | case IOC_E_TIMEOUT: |
532 | bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); | ||
519 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | 533 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); |
520 | break; | 534 | break; |
521 | 535 | ||
@@ -608,15 +622,12 @@ bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc) | |||
608 | * Mark IOC as failed in hardware and stop firmware. | 622 | * Mark IOC as failed in hardware and stop firmware. |
609 | */ | 623 | */ |
610 | bfa_ioc_lpu_stop(ioc); | 624 | bfa_ioc_lpu_stop(ioc); |
611 | bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_HBFAIL); | 625 | bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); |
612 | 626 | ||
613 | if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) { | 627 | /** |
614 | bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P); | 628 | * Notify other functions on HB failure. |
615 | /* | 629 | */ |
616 | * Wait for halt to take effect | 630 | bfa_ioc_notify_hbfail(ioc); |
617 | */ | ||
618 | bfa_reg_read(ioc->ioc_regs.ll_halt); | ||
619 | } | ||
620 | 631 | ||
621 | /** | 632 | /** |
622 | * Notify driver and common modules registered for notification. | 633 | * Notify driver and common modules registered for notification. |
@@ -672,6 +683,12 @@ bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
672 | */ | 683 | */ |
673 | break; | 684 | break; |
674 | 685 | ||
686 | case IOC_E_HWERROR: | ||
687 | /* | ||
688 | * HB failure notification, ignore. | ||
689 | */ | ||
690 | break; | ||
691 | |||
675 | default: | 692 | default: |
676 | bfa_sm_fault(ioc, event); | 693 | bfa_sm_fault(ioc, event); |
677 | } | 694 | } |
@@ -700,7 +717,7 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc) | |||
700 | } | 717 | } |
701 | } | 718 | } |
702 | 719 | ||
703 | static void | 720 | void |
704 | bfa_ioc_sem_timeout(void *ioc_arg) | 721 | bfa_ioc_sem_timeout(void *ioc_arg) |
705 | { | 722 | { |
706 | struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg; | 723 | struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg; |
@@ -708,26 +725,32 @@ bfa_ioc_sem_timeout(void *ioc_arg) | |||
708 | bfa_ioc_hw_sem_get(ioc); | 725 | bfa_ioc_hw_sem_get(ioc); |
709 | } | 726 | } |
710 | 727 | ||
711 | static void | 728 | bfa_boolean_t |
712 | bfa_ioc_usage_sem_get(struct bfa_ioc_s *ioc) | 729 | bfa_ioc_sem_get(bfa_os_addr_t sem_reg) |
713 | { | 730 | { |
714 | u32 r32; | 731 | u32 r32; |
715 | int cnt = 0; | 732 | int cnt = 0; |
716 | #define BFA_SEM_SPINCNT 1000 | 733 | #define BFA_SEM_SPINCNT 3000 |
717 | 734 | ||
718 | do { | 735 | r32 = bfa_reg_read(sem_reg); |
719 | r32 = bfa_reg_read(ioc->ioc_regs.ioc_usage_sem_reg); | 736 | |
737 | while (r32 && (cnt < BFA_SEM_SPINCNT)) { | ||
720 | cnt++; | 738 | cnt++; |
721 | if (cnt > BFA_SEM_SPINCNT) | 739 | bfa_os_udelay(2); |
722 | break; | 740 | r32 = bfa_reg_read(sem_reg); |
723 | } while (r32 != 0); | 741 | } |
742 | |||
743 | if (r32 == 0) | ||
744 | return BFA_TRUE; | ||
745 | |||
724 | bfa_assert(cnt < BFA_SEM_SPINCNT); | 746 | bfa_assert(cnt < BFA_SEM_SPINCNT); |
747 | return BFA_FALSE; | ||
725 | } | 748 | } |
726 | 749 | ||
727 | static void | 750 | void |
728 | bfa_ioc_usage_sem_release(struct bfa_ioc_s *ioc) | 751 | bfa_ioc_sem_release(bfa_os_addr_t sem_reg) |
729 | { | 752 | { |
730 | bfa_reg_write(ioc->ioc_regs.ioc_usage_sem_reg, 1); | 753 | bfa_reg_write(sem_reg, 1); |
731 | } | 754 | } |
732 | 755 | ||
733 | static void | 756 | static void |
@@ -737,7 +760,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) | |||
737 | 760 | ||
738 | /** | 761 | /** |
739 | * First read to the semaphore register will return 0, subsequent reads | 762 | * First read to the semaphore register will return 0, subsequent reads |
740 | * will return 1. Semaphore is released by writing 0 to the register | 763 | * will return 1. Semaphore is released by writing 1 to the register |
741 | */ | 764 | */ |
742 | r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); | 765 | r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); |
743 | if (r32 == 0) { | 766 | if (r32 == 0) { |
@@ -746,10 +769,10 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) | |||
746 | } | 769 | } |
747 | 770 | ||
748 | bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout, | 771 | bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout, |
749 | ioc, BFA_IOC_TOV); | 772 | ioc, BFA_IOC_HWSEM_TOV); |
750 | } | 773 | } |
751 | 774 | ||
752 | static void | 775 | void |
753 | bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc) | 776 | bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc) |
754 | { | 777 | { |
755 | bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1); | 778 | bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1); |
@@ -828,7 +851,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc) | |||
828 | /** | 851 | /** |
829 | * Get driver and firmware versions. | 852 | * Get driver and firmware versions. |
830 | */ | 853 | */ |
831 | static void | 854 | void |
832 | bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) | 855 | bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) |
833 | { | 856 | { |
834 | u32 pgnum, pgoff; | 857 | u32 pgnum, pgoff; |
@@ -847,24 +870,10 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) | |||
847 | } | 870 | } |
848 | } | 871 | } |
849 | 872 | ||
850 | static u32 * | ||
851 | bfa_ioc_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off) | ||
852 | { | ||
853 | if (ioc->ctdev) | ||
854 | return bfi_image_ct_get_chunk(off); | ||
855 | return bfi_image_cb_get_chunk(off); | ||
856 | } | ||
857 | |||
858 | static u32 | ||
859 | bfa_ioc_fwimg_get_size(struct bfa_ioc_s *ioc) | ||
860 | { | ||
861 | return (ioc->ctdev) ? bfi_image_ct_size : bfi_image_cb_size; | ||
862 | } | ||
863 | |||
864 | /** | 873 | /** |
865 | * Returns TRUE if same. | 874 | * Returns TRUE if same. |
866 | */ | 875 | */ |
867 | static bfa_boolean_t | 876 | bfa_boolean_t |
868 | bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) | 877 | bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) |
869 | { | 878 | { |
870 | struct bfi_ioc_image_hdr_s *drv_fwhdr; | 879 | struct bfi_ioc_image_hdr_s *drv_fwhdr; |
@@ -921,95 +930,6 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc) | |||
921 | } | 930 | } |
922 | 931 | ||
923 | /** | 932 | /** |
924 | * Return true if firmware of current driver matches the running firmware. | ||
925 | */ | ||
926 | static bfa_boolean_t | ||
927 | bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc) | ||
928 | { | ||
929 | enum bfi_ioc_state ioc_fwstate; | ||
930 | u32 usecnt; | ||
931 | struct bfi_ioc_image_hdr_s fwhdr; | ||
932 | |||
933 | /** | ||
934 | * Firmware match check is relevant only for CNA. | ||
935 | */ | ||
936 | if (!ioc->cna) | ||
937 | return BFA_TRUE; | ||
938 | |||
939 | /** | ||
940 | * If bios boot (flash based) -- do not increment usage count | ||
941 | */ | ||
942 | if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) | ||
943 | return BFA_TRUE; | ||
944 | |||
945 | bfa_ioc_usage_sem_get(ioc); | ||
946 | usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg); | ||
947 | |||
948 | /** | ||
949 | * If usage count is 0, always return TRUE. | ||
950 | */ | ||
951 | if (usecnt == 0) { | ||
952 | bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1); | ||
953 | bfa_ioc_usage_sem_release(ioc); | ||
954 | bfa_trc(ioc, usecnt); | ||
955 | return BFA_TRUE; | ||
956 | } | ||
957 | |||
958 | ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); | ||
959 | bfa_trc(ioc, ioc_fwstate); | ||
960 | |||
961 | /** | ||
962 | * Use count cannot be non-zero and chip in uninitialized state. | ||
963 | */ | ||
964 | bfa_assert(ioc_fwstate != BFI_IOC_UNINIT); | ||
965 | |||
966 | /** | ||
967 | * Check if another driver with a different firmware is active | ||
968 | */ | ||
969 | bfa_ioc_fwver_get(ioc, &fwhdr); | ||
970 | if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) { | ||
971 | bfa_ioc_usage_sem_release(ioc); | ||
972 | bfa_trc(ioc, usecnt); | ||
973 | return BFA_FALSE; | ||
974 | } | ||
975 | |||
976 | /** | ||
977 | * Same firmware version. Increment the reference count. | ||
978 | */ | ||
979 | usecnt++; | ||
980 | bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt); | ||
981 | bfa_ioc_usage_sem_release(ioc); | ||
982 | bfa_trc(ioc, usecnt); | ||
983 | return BFA_TRUE; | ||
984 | } | ||
985 | |||
986 | static void | ||
987 | bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc) | ||
988 | { | ||
989 | u32 usecnt; | ||
990 | |||
991 | /** | ||
992 | * Firmware lock is relevant only for CNA. | ||
993 | * If bios boot (flash based) -- do not decrement usage count | ||
994 | */ | ||
995 | if (!ioc->cna || (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)) | ||
996 | return; | ||
997 | |||
998 | /** | ||
999 | * decrement usage count | ||
1000 | */ | ||
1001 | bfa_ioc_usage_sem_get(ioc); | ||
1002 | usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg); | ||
1003 | bfa_assert(usecnt > 0); | ||
1004 | |||
1005 | usecnt--; | ||
1006 | bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt); | ||
1007 | bfa_trc(ioc, usecnt); | ||
1008 | |||
1009 | bfa_ioc_usage_sem_release(ioc); | ||
1010 | } | ||
1011 | |||
1012 | /** | ||
1013 | * Conditionally flush any pending message from firmware at start. | 933 | * Conditionally flush any pending message from firmware at start. |
1014 | */ | 934 | */ |
1015 | static void | 935 | static void |
@@ -1152,33 +1072,27 @@ bfa_ioc_send_getattr(struct bfa_ioc_s *ioc) | |||
1152 | static void | 1072 | static void |
1153 | bfa_ioc_hb_check(void *cbarg) | 1073 | bfa_ioc_hb_check(void *cbarg) |
1154 | { | 1074 | { |
1155 | struct bfa_ioc_s *ioc = cbarg; | 1075 | struct bfa_ioc_s *ioc = cbarg; |
1156 | u32 hb_count; | 1076 | u32 hb_count; |
1157 | 1077 | ||
1158 | hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); | 1078 | hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); |
1159 | if (ioc->hb_count == hb_count) { | 1079 | if (ioc->hb_count == hb_count) { |
1160 | ioc->hb_fail++; | 1080 | bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, |
1161 | } else { | 1081 | hb_count); |
1162 | ioc->hb_count = hb_count; | ||
1163 | ioc->hb_fail = 0; | ||
1164 | } | ||
1165 | |||
1166 | if (ioc->hb_fail >= BFA_IOC_HB_FAIL_MAX) { | ||
1167 | bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, hb_count); | ||
1168 | ioc->hb_fail = 0; | ||
1169 | bfa_ioc_recover(ioc); | 1082 | bfa_ioc_recover(ioc); |
1170 | return; | 1083 | return; |
1084 | } else { | ||
1085 | ioc->hb_count = hb_count; | ||
1171 | } | 1086 | } |
1172 | 1087 | ||
1173 | bfa_ioc_mbox_poll(ioc); | 1088 | bfa_ioc_mbox_poll(ioc); |
1174 | bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc, | 1089 | bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, |
1175 | BFA_IOC_HB_TOV); | 1090 | ioc, BFA_IOC_HB_TOV); |
1176 | } | 1091 | } |
1177 | 1092 | ||
1178 | static void | 1093 | static void |
1179 | bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) | 1094 | bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) |
1180 | { | 1095 | { |
1181 | ioc->hb_fail = 0; | ||
1182 | ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); | 1096 | ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); |
1183 | bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc, | 1097 | bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc, |
1184 | BFA_IOC_HB_TOV); | 1098 | BFA_IOC_HB_TOV); |
@@ -1191,112 +1105,6 @@ bfa_ioc_hb_stop(struct bfa_ioc_s *ioc) | |||
1191 | } | 1105 | } |
1192 | 1106 | ||
1193 | /** | 1107 | /** |
1194 | * Host to LPU mailbox message addresses | ||
1195 | */ | ||
1196 | static struct { | ||
1197 | u32 hfn_mbox, lpu_mbox, hfn_pgn; | ||
1198 | } iocreg_fnreg[] = { | ||
1199 | { | ||
1200 | HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0}, { | ||
1201 | HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1}, { | ||
1202 | HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2}, { | ||
1203 | HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3} | ||
1204 | }; | ||
1205 | |||
1206 | /** | ||
1207 | * Host <-> LPU mailbox command/status registers - port 0 | ||
1208 | */ | ||
1209 | static struct { | ||
1210 | u32 hfn, lpu; | ||
1211 | } iocreg_mbcmd_p0[] = { | ||
1212 | { | ||
1213 | HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT}, { | ||
1214 | HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT}, { | ||
1215 | HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT}, { | ||
1216 | HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT} | ||
1217 | }; | ||
1218 | |||
1219 | /** | ||
1220 | * Host <-> LPU mailbox command/status registers - port 1 | ||
1221 | */ | ||
1222 | static struct { | ||
1223 | u32 hfn, lpu; | ||
1224 | } iocreg_mbcmd_p1[] = { | ||
1225 | { | ||
1226 | HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT}, { | ||
1227 | HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT}, { | ||
1228 | HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT}, { | ||
1229 | HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT} | ||
1230 | }; | ||
1231 | |||
1232 | /** | ||
1233 | * Shared IRQ handling in INTX mode | ||
1234 | */ | ||
1235 | static struct { | ||
1236 | u32 isr, msk; | ||
1237 | } iocreg_shirq_next[] = { | ||
1238 | { | ||
1239 | HOSTFN1_INT_STATUS, HOSTFN1_INT_MSK}, { | ||
1240 | HOSTFN2_INT_STATUS, HOSTFN2_INT_MSK}, { | ||
1241 | HOSTFN3_INT_STATUS, HOSTFN3_INT_MSK}, { | ||
1242 | HOSTFN0_INT_STATUS, HOSTFN0_INT_MSK},}; | ||
1243 | |||
1244 | static void | ||
1245 | bfa_ioc_reg_init(struct bfa_ioc_s *ioc) | ||
1246 | { | ||
1247 | bfa_os_addr_t rb; | ||
1248 | int pcifn = bfa_ioc_pcifn(ioc); | ||
1249 | |||
1250 | rb = bfa_ioc_bar0(ioc); | ||
1251 | |||
1252 | ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; | ||
1253 | ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; | ||
1254 | ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; | ||
1255 | |||
1256 | if (ioc->port_id == 0) { | ||
1257 | ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; | ||
1258 | ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; | ||
1259 | ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn; | ||
1260 | ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu; | ||
1261 | ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; | ||
1262 | } else { | ||
1263 | ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); | ||
1264 | ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); | ||
1265 | ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn; | ||
1266 | ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu; | ||
1267 | ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; | ||
1268 | } | ||
1269 | |||
1270 | /** | ||
1271 | * Shared IRQ handling in INTX mode | ||
1272 | */ | ||
1273 | ioc->ioc_regs.shirq_isr_next = rb + iocreg_shirq_next[pcifn].isr; | ||
1274 | ioc->ioc_regs.shirq_msk_next = rb + iocreg_shirq_next[pcifn].msk; | ||
1275 | |||
1276 | /* | ||
1277 | * PSS control registers | ||
1278 | */ | ||
1279 | ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); | ||
1280 | ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG); | ||
1281 | ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG); | ||
1282 | |||
1283 | /* | ||
1284 | * IOC semaphore registers and serialization | ||
1285 | */ | ||
1286 | ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); | ||
1287 | ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); | ||
1288 | ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); | ||
1289 | |||
1290 | /** | ||
1291 | * sram memory access | ||
1292 | */ | ||
1293 | ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); | ||
1294 | ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB; | ||
1295 | if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) | ||
1296 | ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; | ||
1297 | } | ||
1298 | |||
1299 | /** | ||
1300 | * Initiate a full firmware download. | 1108 | * Initiate a full firmware download. |
1301 | */ | 1109 | */ |
1302 | static void | 1110 | static void |
@@ -1332,17 +1140,17 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, | |||
1332 | 1140 | ||
1333 | for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) { | 1141 | for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) { |
1334 | 1142 | ||
1335 | if (BFA_FLASH_CHUNK_NO(i) != chunkno) { | 1143 | if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { |
1336 | chunkno = BFA_FLASH_CHUNK_NO(i); | 1144 | chunkno = BFA_IOC_FLASH_CHUNK_NO(i); |
1337 | fwimg = bfa_ioc_fwimg_get_chunk(ioc, | 1145 | fwimg = bfa_ioc_fwimg_get_chunk(ioc, |
1338 | BFA_FLASH_CHUNK_ADDR(chunkno)); | 1146 | BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); |
1339 | } | 1147 | } |
1340 | 1148 | ||
1341 | /** | 1149 | /** |
1342 | * write smem | 1150 | * write smem |
1343 | */ | 1151 | */ |
1344 | bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, | 1152 | bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, |
1345 | fwimg[BFA_FLASH_OFFSET_IN_CHUNK(i)]); | 1153 | fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]); |
1346 | 1154 | ||
1347 | loff += sizeof(u32); | 1155 | loff += sizeof(u32); |
1348 | 1156 | ||
@@ -1440,168 +1248,10 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc) | |||
1440 | } | 1248 | } |
1441 | 1249 | ||
1442 | /** | 1250 | /** |
1443 | * Initialize IOC to port mapping. | ||
1444 | */ | ||
1445 | |||
1446 | #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) | ||
1447 | static void | ||
1448 | bfa_ioc_map_port(struct bfa_ioc_s *ioc) | ||
1449 | { | ||
1450 | bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; | ||
1451 | u32 r32; | ||
1452 | |||
1453 | /** | ||
1454 | * For crossbow, port id is same as pci function. | ||
1455 | */ | ||
1456 | if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT) { | ||
1457 | ioc->port_id = bfa_ioc_pcifn(ioc); | ||
1458 | return; | ||
1459 | } | ||
1460 | |||
1461 | /** | ||
1462 | * For catapult, base port id on personality register and IOC type | ||
1463 | */ | ||
1464 | r32 = bfa_reg_read(rb + FNC_PERS_REG); | ||
1465 | r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); | ||
1466 | ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; | ||
1467 | |||
1468 | bfa_trc(ioc, bfa_ioc_pcifn(ioc)); | ||
1469 | bfa_trc(ioc, ioc->port_id); | ||
1470 | } | ||
1471 | |||
1472 | |||
1473 | |||
1474 | /** | ||
1475 | * bfa_ioc_public | 1251 | * bfa_ioc_public |
1476 | */ | 1252 | */ |
1477 | 1253 | ||
1478 | /** | 1254 | /** |
1479 | * Set interrupt mode for a function: INTX or MSIX | ||
1480 | */ | ||
1481 | void | ||
1482 | bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) | ||
1483 | { | ||
1484 | bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; | ||
1485 | u32 r32, mode; | ||
1486 | |||
1487 | r32 = bfa_reg_read(rb + FNC_PERS_REG); | ||
1488 | bfa_trc(ioc, r32); | ||
1489 | |||
1490 | mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & | ||
1491 | __F0_INTX_STATUS; | ||
1492 | |||
1493 | /** | ||
1494 | * If already in desired mode, do not change anything | ||
1495 | */ | ||
1496 | if (!msix && mode) | ||
1497 | return; | ||
1498 | |||
1499 | if (msix) | ||
1500 | mode = __F0_INTX_STATUS_MSIX; | ||
1501 | else | ||
1502 | mode = __F0_INTX_STATUS_INTA; | ||
1503 | |||
1504 | r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); | ||
1505 | r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); | ||
1506 | bfa_trc(ioc, r32); | ||
1507 | |||
1508 | bfa_reg_write(rb + FNC_PERS_REG, r32); | ||
1509 | } | ||
1510 | |||
1511 | bfa_status_t | ||
1512 | bfa_ioc_pll_init(struct bfa_ioc_s *ioc) | ||
1513 | { | ||
1514 | bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; | ||
1515 | u32 pll_sclk, pll_fclk, r32; | ||
1516 | |||
1517 | if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) { | ||
1518 | pll_sclk = | ||
1519 | __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN | | ||
1520 | __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(0U) | | ||
1521 | __APP_PLL_312_JITLMT0_1(3U) | | ||
1522 | __APP_PLL_312_CNTLMT0_1(1U); | ||
1523 | pll_fclk = | ||
1524 | __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN | | ||
1525 | __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(0U) | | ||
1526 | __APP_PLL_425_JITLMT0_1(3U) | | ||
1527 | __APP_PLL_425_CNTLMT0_1(1U); | ||
1528 | |||
1529 | /** | ||
1530 | * For catapult, choose operational mode FC/FCoE | ||
1531 | */ | ||
1532 | if (ioc->fcmode) { | ||
1533 | bfa_reg_write((rb + OP_MODE), 0); | ||
1534 | bfa_reg_write((rb + ETH_MAC_SER_REG), | ||
1535 | __APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 | ||
1536 | | __APP_EMS_CHANNEL_SEL); | ||
1537 | } else { | ||
1538 | ioc->pllinit = BFA_TRUE; | ||
1539 | bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE); | ||
1540 | bfa_reg_write((rb + ETH_MAC_SER_REG), | ||
1541 | __APP_EMS_REFCKBUFEN1); | ||
1542 | } | ||
1543 | } else { | ||
1544 | pll_sclk = | ||
1545 | __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN | | ||
1546 | __APP_PLL_312_P0_1(3U) | __APP_PLL_312_JITLMT0_1(3U) | | ||
1547 | __APP_PLL_312_CNTLMT0_1(3U); | ||
1548 | pll_fclk = | ||
1549 | __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN | | ||
1550 | __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) | | ||
1551 | __APP_PLL_425_JITLMT0_1(3U) | | ||
1552 | __APP_PLL_425_CNTLMT0_1(3U); | ||
1553 | } | ||
1554 | |||
1555 | bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); | ||
1556 | bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); | ||
1557 | |||
1558 | bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); | ||
1559 | bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); | ||
1560 | bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); | ||
1561 | bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); | ||
1562 | bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); | ||
1563 | bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); | ||
1564 | |||
1565 | bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, | ||
1566 | __APP_PLL_312_LOGIC_SOFT_RESET); | ||
1567 | bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, | ||
1568 | __APP_PLL_312_BYPASS | __APP_PLL_312_LOGIC_SOFT_RESET); | ||
1569 | bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, | ||
1570 | __APP_PLL_425_LOGIC_SOFT_RESET); | ||
1571 | bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, | ||
1572 | __APP_PLL_425_BYPASS | __APP_PLL_425_LOGIC_SOFT_RESET); | ||
1573 | bfa_os_udelay(2); | ||
1574 | bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, | ||
1575 | __APP_PLL_312_LOGIC_SOFT_RESET); | ||
1576 | bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, | ||
1577 | __APP_PLL_425_LOGIC_SOFT_RESET); | ||
1578 | |||
1579 | bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, | ||
1580 | pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET); | ||
1581 | bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, | ||
1582 | pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET); | ||
1583 | |||
1584 | /** | ||
1585 | * Wait for PLLs to lock. | ||
1586 | */ | ||
1587 | bfa_os_udelay(2000); | ||
1588 | bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); | ||
1589 | bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); | ||
1590 | |||
1591 | bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk); | ||
1592 | bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk); | ||
1593 | |||
1594 | if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) { | ||
1595 | bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START); | ||
1596 | bfa_os_udelay(1000); | ||
1597 | r32 = bfa_reg_read((rb + MBIST_STAT_REG)); | ||
1598 | bfa_trc(ioc, r32); | ||
1599 | } | ||
1600 | |||
1601 | return BFA_STATUS_OK; | ||
1602 | } | ||
1603 | |||
1604 | /** | ||
1605 | * Interface used by diag module to do firmware boot with memory test | 1255 | * Interface used by diag module to do firmware boot with memory test |
1606 | * as the entry vector. | 1256 | * as the entry vector. |
1607 | */ | 1257 | */ |
@@ -1764,6 +1414,14 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, | |||
1764 | ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT); | 1414 | ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT); |
1765 | ioc->cna = ioc->ctdev && !ioc->fcmode; | 1415 | ioc->cna = ioc->ctdev && !ioc->fcmode; |
1766 | 1416 | ||
1417 | /** | ||
1418 | * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c | ||
1419 | */ | ||
1420 | if (ioc->ctdev) | ||
1421 | bfa_ioc_set_ct_hwif(ioc); | ||
1422 | else | ||
1423 | bfa_ioc_set_cb_hwif(ioc); | ||
1424 | |||
1767 | bfa_ioc_map_port(ioc); | 1425 | bfa_ioc_map_port(ioc); |
1768 | bfa_ioc_reg_init(ioc); | 1426 | bfa_ioc_reg_init(ioc); |
1769 | } | 1427 | } |
@@ -1973,7 +1631,7 @@ bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) | |||
1973 | ((__sm) == BFI_IOC_INITING) || \ | 1631 | ((__sm) == BFI_IOC_INITING) || \ |
1974 | ((__sm) == BFI_IOC_HWINIT) || \ | 1632 | ((__sm) == BFI_IOC_HWINIT) || \ |
1975 | ((__sm) == BFI_IOC_DISABLED) || \ | 1633 | ((__sm) == BFI_IOC_DISABLED) || \ |
1976 | ((__sm) == BFI_IOC_HBFAIL) || \ | 1634 | ((__sm) == BFI_IOC_FAIL) || \ |
1977 | ((__sm) == BFI_IOC_CFG_DISABLED)) | 1635 | ((__sm) == BFI_IOC_CFG_DISABLED)) |
1978 | 1636 | ||
1979 | /** | 1637 | /** |
@@ -2195,29 +1853,6 @@ bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc) | |||
2195 | } | 1853 | } |
2196 | 1854 | ||
2197 | /** | 1855 | /** |
2198 | * Return true if interrupt should be claimed. | ||
2199 | */ | ||
2200 | bfa_boolean_t | ||
2201 | bfa_ioc_intx_claim(struct bfa_ioc_s *ioc) | ||
2202 | { | ||
2203 | u32 isr, msk; | ||
2204 | |||
2205 | /** | ||
2206 | * Always claim if not catapult. | ||
2207 | */ | ||
2208 | if (!ioc->ctdev) | ||
2209 | return BFA_TRUE; | ||
2210 | |||
2211 | /** | ||
2212 | * FALSE if next device is claiming interrupt. | ||
2213 | * TRUE if next device is not interrupting or not present. | ||
2214 | */ | ||
2215 | msk = bfa_reg_read(ioc->ioc_regs.shirq_msk_next); | ||
2216 | isr = bfa_reg_read(ioc->ioc_regs.shirq_isr_next); | ||
2217 | return !(isr & ~msk); | ||
2218 | } | ||
2219 | |||
2220 | /** | ||
2221 | * Send AEN notification | 1856 | * Send AEN notification |
2222 | */ | 1857 | */ |
2223 | static void | 1858 | static void |
@@ -2304,6 +1939,13 @@ bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) | |||
2304 | 1939 | ||
2305 | pgnum = bfa_ioc_smem_pgnum(ioc, loff); | 1940 | pgnum = bfa_ioc_smem_pgnum(ioc, loff); |
2306 | loff = bfa_ioc_smem_pgoff(ioc, loff); | 1941 | loff = bfa_ioc_smem_pgoff(ioc, loff); |
1942 | |||
1943 | /* | ||
1944 | * Hold semaphore to serialize pll init and fwtrc. | ||
1945 | */ | ||
1946 | if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) | ||
1947 | return BFA_STATUS_FAILED; | ||
1948 | |||
2307 | bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); | 1949 | bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); |
2308 | 1950 | ||
2309 | tlen = *trclen; | 1951 | tlen = *trclen; |
@@ -2329,6 +1971,12 @@ bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) | |||
2329 | } | 1971 | } |
2330 | bfa_reg_write(ioc->ioc_regs.host_page_num_fn, | 1972 | bfa_reg_write(ioc->ioc_regs.host_page_num_fn, |
2331 | bfa_ioc_smem_pgnum(ioc, 0)); | 1973 | bfa_ioc_smem_pgnum(ioc, 0)); |
1974 | |||
1975 | /* | ||
1976 | * release semaphore. | ||
1977 | */ | ||
1978 | bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); | ||
1979 | |||
2332 | bfa_trc(ioc, pgnum); | 1980 | bfa_trc(ioc, pgnum); |
2333 | 1981 | ||
2334 | *trclen = tlen * sizeof(u32); | 1982 | *trclen = tlen * sizeof(u32); |