diff options
Diffstat (limited to 'drivers/net/bna')
-rw-r--r-- | drivers/net/bna/bfa_defs.h | 22 | ||||
-rw-r--r-- | drivers/net/bna/bfa_defs_mfg_comm.h | 22 | ||||
-rw-r--r-- | drivers/net/bna/bfa_ioc.c | 1219 | ||||
-rw-r--r-- | drivers/net/bna/bfa_ioc.h | 49 | ||||
-rw-r--r-- | drivers/net/bna/bfa_ioc_ct.c | 102 | ||||
-rw-r--r-- | drivers/net/bna/bfi_ctreg.h | 41 | ||||
-rw-r--r-- | drivers/net/bna/bna.h | 6 | ||||
-rw-r--r-- | drivers/net/bna/bna_ctrl.c | 377 | ||||
-rw-r--r-- | drivers/net/bna/bna_txrx.c | 44 | ||||
-rw-r--r-- | drivers/net/bna/bna_types.h | 11 | ||||
-rw-r--r-- | drivers/net/bna/bnad.c | 427 | ||||
-rw-r--r-- | drivers/net/bna/bnad.h | 31 | ||||
-rw-r--r-- | drivers/net/bna/bnad_ethtool.c | 9 |
13 files changed, 1439 insertions, 921 deletions
diff --git a/drivers/net/bna/bfa_defs.h b/drivers/net/bna/bfa_defs.h index 29c1b8de2c2d..2ea0dfe1cedc 100644 --- a/drivers/net/bna/bfa_defs.h +++ b/drivers/net/bna/bfa_defs.h | |||
@@ -112,16 +112,18 @@ struct bfa_ioc_pci_attr { | |||
112 | * IOC states | 112 | * IOC states |
113 | */ | 113 | */ |
114 | enum bfa_ioc_state { | 114 | enum bfa_ioc_state { |
115 | BFA_IOC_RESET = 1, /*!< IOC is in reset state */ | 115 | BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */ |
116 | BFA_IOC_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ | 116 | BFA_IOC_RESET = 2, /*!< IOC is in reset state */ |
117 | BFA_IOC_HWINIT = 3, /*!< IOC h/w is being initialized */ | 117 | BFA_IOC_SEMWAIT = 3, /*!< Waiting for IOC h/w semaphore */ |
118 | BFA_IOC_GETATTR = 4, /*!< IOC is being configured */ | 118 | BFA_IOC_HWINIT = 4, /*!< IOC h/w is being initialized */ |
119 | BFA_IOC_OPERATIONAL = 5, /*!< IOC is operational */ | 119 | BFA_IOC_GETATTR = 5, /*!< IOC is being configured */ |
120 | BFA_IOC_INITFAIL = 6, /*!< IOC hardware failure */ | 120 | BFA_IOC_OPERATIONAL = 6, /*!< IOC is operational */ |
121 | BFA_IOC_HBFAIL = 7, /*!< IOC heart-beat failure */ | 121 | BFA_IOC_INITFAIL = 7, /*!< IOC hardware failure */ |
122 | BFA_IOC_DISABLING = 8, /*!< IOC is being disabled */ | 122 | BFA_IOC_FAIL = 8, /*!< IOC heart-beat failure */ |
123 | BFA_IOC_DISABLED = 9, /*!< IOC is disabled */ | 123 | BFA_IOC_DISABLING = 9, /*!< IOC is being disabled */ |
124 | BFA_IOC_FWMISMATCH = 10, /*!< IOC f/w different from drivers */ | 124 | BFA_IOC_DISABLED = 10, /*!< IOC is disabled */ |
125 | BFA_IOC_FWMISMATCH = 11, /*!< IOC f/w different from drivers */ | ||
126 | BFA_IOC_ENABLING = 12, /*!< IOC is being enabled */ | ||
125 | }; | 127 | }; |
126 | 128 | ||
127 | /** | 129 | /** |
diff --git a/drivers/net/bna/bfa_defs_mfg_comm.h b/drivers/net/bna/bfa_defs_mfg_comm.h index 987978fcb3fe..fdd677618361 100644 --- a/drivers/net/bna/bfa_defs_mfg_comm.h +++ b/drivers/net/bna/bfa_defs_mfg_comm.h | |||
@@ -95,28 +95,6 @@ enum { | |||
95 | (type) == BFA_MFG_TYPE_CNA10P1 || \ | 95 | (type) == BFA_MFG_TYPE_CNA10P1 || \ |
96 | bfa_mfg_is_mezz(type))) | 96 | bfa_mfg_is_mezz(type))) |
97 | 97 | ||
98 | /** | ||
99 | * Check if the card having old wwn/mac handling | ||
100 | */ | ||
101 | #define bfa_mfg_is_old_wwn_mac_model(type) (( \ | ||
102 | (type) == BFA_MFG_TYPE_FC8P2 || \ | ||
103 | (type) == BFA_MFG_TYPE_FC8P1 || \ | ||
104 | (type) == BFA_MFG_TYPE_FC4P2 || \ | ||
105 | (type) == BFA_MFG_TYPE_FC4P1 || \ | ||
106 | (type) == BFA_MFG_TYPE_CNA10P2 || \ | ||
107 | (type) == BFA_MFG_TYPE_CNA10P1 || \ | ||
108 | (type) == BFA_MFG_TYPE_JAYHAWK || \ | ||
109 | (type) == BFA_MFG_TYPE_WANCHESE)) | ||
110 | |||
111 | #define bfa_mfg_increment_wwn_mac(m, i) \ | ||
112 | do { \ | ||
113 | u32 t = ((m)[0] << 16) | ((m)[1] << 8) | (m)[2]; \ | ||
114 | t += (i); \ | ||
115 | (m)[0] = (t >> 16) & 0xFF; \ | ||
116 | (m)[1] = (t >> 8) & 0xFF; \ | ||
117 | (m)[2] = t & 0xFF; \ | ||
118 | } while (0) | ||
119 | |||
120 | #define bfa_mfg_adapter_prop_init_flash(card_type, prop) \ | 98 | #define bfa_mfg_adapter_prop_init_flash(card_type, prop) \ |
121 | do { \ | 99 | do { \ |
122 | switch ((card_type)) { \ | 100 | switch ((card_type)) { \ |
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c index e94e5aa97515..34933cb9569f 100644 --- a/drivers/net/bna/bfa_ioc.c +++ b/drivers/net/bna/bfa_ioc.c | |||
@@ -26,25 +26,6 @@ | |||
26 | * IOC local definitions | 26 | * IOC local definitions |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #define bfa_ioc_timer_start(__ioc) \ | ||
30 | mod_timer(&(__ioc)->ioc_timer, jiffies + \ | ||
31 | msecs_to_jiffies(BFA_IOC_TOV)) | ||
32 | #define bfa_ioc_timer_stop(__ioc) del_timer(&(__ioc)->ioc_timer) | ||
33 | |||
34 | #define bfa_ioc_recovery_timer_start(__ioc) \ | ||
35 | mod_timer(&(__ioc)->ioc_timer, jiffies + \ | ||
36 | msecs_to_jiffies(BFA_IOC_TOV_RECOVER)) | ||
37 | |||
38 | #define bfa_sem_timer_start(__ioc) \ | ||
39 | mod_timer(&(__ioc)->sem_timer, jiffies + \ | ||
40 | msecs_to_jiffies(BFA_IOC_HWSEM_TOV)) | ||
41 | #define bfa_sem_timer_stop(__ioc) del_timer(&(__ioc)->sem_timer) | ||
42 | |||
43 | #define bfa_hb_timer_start(__ioc) \ | ||
44 | mod_timer(&(__ioc)->hb_timer, jiffies + \ | ||
45 | msecs_to_jiffies(BFA_IOC_HB_TOV)) | ||
46 | #define bfa_hb_timer_stop(__ioc) del_timer(&(__ioc)->hb_timer) | ||
47 | |||
48 | /** | 29 | /** |
49 | * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. | 30 | * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. |
50 | */ | 31 | */ |
@@ -55,11 +36,16 @@ | |||
55 | ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) | 36 | ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) |
56 | #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) | 37 | #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) |
57 | #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) | 38 | #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) |
58 | #define bfa_ioc_notify_hbfail(__ioc) \ | 39 | #define bfa_ioc_notify_fail(__ioc) \ |
59 | ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) | 40 | ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) |
60 | 41 | #define bfa_ioc_sync_join(__ioc) \ | |
61 | #define bfa_ioc_is_optrom(__ioc) \ | 42 | ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) |
62 | (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ) | 43 | #define bfa_ioc_sync_leave(__ioc) \ |
44 | ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc)) | ||
45 | #define bfa_ioc_sync_ack(__ioc) \ | ||
46 | ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) | ||
47 | #define bfa_ioc_sync_complete(__ioc) \ | ||
48 | ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) | ||
63 | 49 | ||
64 | #define bfa_ioc_mbox_cmd_pending(__ioc) \ | 50 | #define bfa_ioc_mbox_cmd_pending(__ioc) \ |
65 | (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ | 51 | (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ |
@@ -85,6 +71,12 @@ static void bfa_ioc_recover(struct bfa_ioc *ioc); | |||
85 | static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc); | 71 | static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc); |
86 | static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); | 72 | static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); |
87 | static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); | 73 | static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); |
74 | static void bfa_ioc_fail_notify(struct bfa_ioc *ioc); | ||
75 | static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc); | ||
76 | static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc); | ||
77 | static void bfa_ioc_pf_initfailed(struct bfa_ioc *ioc); | ||
78 | static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); | ||
79 | static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); | ||
88 | static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, | 80 | static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, |
89 | u32 boot_param); | 81 | u32 boot_param); |
90 | static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); | 82 | static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); |
@@ -101,72 +93,173 @@ static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, | |||
101 | char *manufacturer); | 93 | char *manufacturer); |
102 | static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model); | 94 | static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model); |
103 | static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); | 95 | static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); |
104 | static mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc); | ||
105 | 96 | ||
106 | /** | 97 | /** |
107 | * IOC state machine events | 98 | * IOC state machine definitions/declarations |
108 | */ | 99 | */ |
109 | enum ioc_event { | 100 | enum ioc_event { |
110 | IOC_E_ENABLE = 1, /*!< IOC enable request */ | 101 | IOC_E_RESET = 1, /*!< IOC reset request */ |
111 | IOC_E_DISABLE = 2, /*!< IOC disable request */ | 102 | IOC_E_ENABLE = 2, /*!< IOC enable request */ |
112 | IOC_E_TIMEOUT = 3, /*!< f/w response timeout */ | 103 | IOC_E_DISABLE = 3, /*!< IOC disable request */ |
113 | IOC_E_FWREADY = 4, /*!< f/w initialization done */ | 104 | IOC_E_DETACH = 4, /*!< driver detach cleanup */ |
114 | IOC_E_FWRSP_GETATTR = 5, /*!< IOC get attribute response */ | 105 | IOC_E_ENABLED = 5, /*!< f/w enabled */ |
115 | IOC_E_FWRSP_ENABLE = 6, /*!< enable f/w response */ | 106 | IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */ |
116 | IOC_E_FWRSP_DISABLE = 7, /*!< disable f/w response */ | 107 | IOC_E_DISABLED = 7, /*!< f/w disabled */ |
117 | IOC_E_HBFAIL = 8, /*!< heartbeat failure */ | 108 | IOC_E_INITFAILED = 8, /*!< failure notice by iocpf sm */ |
118 | IOC_E_HWERROR = 9, /*!< hardware error interrupt */ | 109 | IOC_E_PFAILED = 9, /*!< failure notice by iocpf sm */ |
119 | IOC_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */ | 110 | IOC_E_HBFAIL = 10, /*!< heartbeat failure */ |
120 | IOC_E_DETACH = 11, /*!< driver detach cleanup */ | 111 | IOC_E_HWERROR = 11, /*!< hardware error interrupt */ |
112 | IOC_E_TIMEOUT = 12, /*!< timeout */ | ||
121 | }; | 113 | }; |
122 | 114 | ||
115 | bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event); | ||
123 | bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event); | 116 | bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event); |
124 | bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event); | ||
125 | bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event); | ||
126 | bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event); | ||
127 | bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event); | ||
128 | bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event); | 117 | bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event); |
129 | bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event); | 118 | bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event); |
130 | bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event); | 119 | bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event); |
131 | bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event); | 120 | bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event); |
132 | bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event); | 121 | bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event); |
133 | bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event); | 122 | bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event); |
134 | bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event); | 123 | bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event); |
135 | 124 | ||
136 | static struct bfa_sm_table ioc_sm_table[] = { | 125 | static struct bfa_sm_table ioc_sm_table[] = { |
126 | {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, | ||
137 | {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, | 127 | {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, |
138 | {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH}, | 128 | {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, |
139 | {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH}, | ||
140 | {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT}, | ||
141 | {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT}, | ||
142 | {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT}, | ||
143 | {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, | 129 | {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, |
144 | {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, | 130 | {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, |
145 | {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL}, | 131 | {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL}, |
146 | {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL}, | 132 | {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, |
147 | {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, | 133 | {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, |
148 | {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, | 134 | {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, |
149 | }; | 135 | }; |
150 | 136 | ||
151 | /** | 137 | /** |
138 | * IOCPF state machine definitions/declarations | ||
139 | */ | ||
140 | |||
141 | /* | ||
142 | * Forward declareations for iocpf state machine | ||
143 | */ | ||
144 | static void bfa_iocpf_enable(struct bfa_ioc *ioc); | ||
145 | static void bfa_iocpf_disable(struct bfa_ioc *ioc); | ||
146 | static void bfa_iocpf_fail(struct bfa_ioc *ioc); | ||
147 | static void bfa_iocpf_initfail(struct bfa_ioc *ioc); | ||
148 | static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc); | ||
149 | static void bfa_iocpf_stop(struct bfa_ioc *ioc); | ||
150 | |||
151 | /** | ||
152 | * IOCPF state machine events | ||
153 | */ | ||
154 | enum iocpf_event { | ||
155 | IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */ | ||
156 | IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */ | ||
157 | IOCPF_E_STOP = 3, /*!< stop on driver detach */ | ||
158 | IOCPF_E_FWREADY = 4, /*!< f/w initialization done */ | ||
159 | IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */ | ||
160 | IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */ | ||
161 | IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */ | ||
162 | IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */ | ||
163 | IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */ | ||
164 | IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */ | ||
165 | IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */ | ||
166 | }; | ||
167 | |||
168 | /** | ||
169 | * IOCPF states | ||
170 | */ | ||
171 | enum bfa_iocpf_state { | ||
172 | BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */ | ||
173 | BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ | ||
174 | BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */ | ||
175 | BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */ | ||
176 | BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */ | ||
177 | BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */ | ||
178 | BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */ | ||
179 | BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */ | ||
180 | BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */ | ||
181 | }; | ||
182 | |||
183 | bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event); | ||
184 | bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event); | ||
185 | bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event); | ||
186 | bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event); | ||
187 | bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event); | ||
188 | bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event); | ||
189 | bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event); | ||
190 | bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf, | ||
191 | enum iocpf_event); | ||
192 | bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event); | ||
193 | bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event); | ||
194 | bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event); | ||
195 | bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event); | ||
196 | bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf, | ||
197 | enum iocpf_event); | ||
198 | bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event); | ||
199 | |||
200 | static struct bfa_sm_table iocpf_sm_table[] = { | ||
201 | {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET}, | ||
202 | {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH}, | ||
203 | {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH}, | ||
204 | {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT}, | ||
205 | {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, | ||
206 | {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, | ||
207 | {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, | ||
208 | {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL}, | ||
209 | {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, | ||
210 | {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL}, | ||
211 | {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, | ||
212 | {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, | ||
213 | {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING}, | ||
214 | {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, | ||
215 | }; | ||
216 | |||
217 | /** | ||
218 | * IOC State Machine | ||
219 | */ | ||
220 | |||
221 | /** | ||
222 | * Beginning state. IOC uninit state. | ||
223 | */ | ||
224 | static void | ||
225 | bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc) | ||
226 | { | ||
227 | } | ||
228 | |||
229 | /** | ||
230 | * IOC is in uninit state. | ||
231 | */ | ||
232 | static void | ||
233 | bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event) | ||
234 | { | ||
235 | switch (event) { | ||
236 | case IOC_E_RESET: | ||
237 | bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); | ||
238 | break; | ||
239 | |||
240 | default: | ||
241 | bfa_sm_fault(ioc, event); | ||
242 | } | ||
243 | } | ||
244 | |||
245 | /** | ||
152 | * Reset entry actions -- initialize state machine | 246 | * Reset entry actions -- initialize state machine |
153 | */ | 247 | */ |
154 | static void | 248 | static void |
155 | bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) | 249 | bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) |
156 | { | 250 | { |
157 | ioc->retry_count = 0; | 251 | bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); |
158 | ioc->auto_recover = bfa_nw_auto_recover; | ||
159 | } | 252 | } |
160 | 253 | ||
161 | /** | 254 | /** |
162 | * Beginning state. IOC is in reset state. | 255 | * IOC is in reset state. |
163 | */ | 256 | */ |
164 | static void | 257 | static void |
165 | bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) | 258 | bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) |
166 | { | 259 | { |
167 | switch (event) { | 260 | switch (event) { |
168 | case IOC_E_ENABLE: | 261 | case IOC_E_ENABLE: |
169 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); | 262 | bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); |
170 | break; | 263 | break; |
171 | 264 | ||
172 | case IOC_E_DISABLE: | 265 | case IOC_E_DISABLE: |
@@ -174,6 +267,7 @@ bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) | |||
174 | break; | 267 | break; |
175 | 268 | ||
176 | case IOC_E_DETACH: | 269 | case IOC_E_DETACH: |
270 | bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); | ||
177 | break; | 271 | break; |
178 | 272 | ||
179 | default: | 273 | default: |
@@ -181,42 +275,43 @@ bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) | |||
181 | } | 275 | } |
182 | } | 276 | } |
183 | 277 | ||
184 | /** | ||
185 | * Semaphore should be acquired for version check. | ||
186 | */ | ||
187 | static void | 278 | static void |
188 | bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc) | 279 | bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) |
189 | { | 280 | { |
190 | bfa_ioc_hw_sem_get(ioc); | 281 | bfa_iocpf_enable(ioc); |
191 | } | 282 | } |
192 | 283 | ||
193 | /** | 284 | /** |
194 | * Awaiting h/w semaphore to continue with version check. | 285 | * Host IOC function is being enabled, awaiting response from firmware. |
286 | * Semaphore is acquired. | ||
195 | */ | 287 | */ |
196 | static void | 288 | static void |
197 | bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event) | 289 | bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) |
198 | { | 290 | { |
199 | switch (event) { | 291 | switch (event) { |
200 | case IOC_E_SEMLOCKED: | 292 | case IOC_E_ENABLED: |
201 | if (bfa_ioc_firmware_lock(ioc)) { | 293 | bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); |
202 | ioc->retry_count = 0; | 294 | break; |
203 | bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); | 295 | |
204 | } else { | 296 | case IOC_E_PFAILED: |
205 | bfa_nw_ioc_hw_sem_release(ioc); | 297 | /* !!! fall through !!! */ |
206 | bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch); | 298 | case IOC_E_HWERROR: |
207 | } | 299 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); |
300 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); | ||
301 | if (event != IOC_E_PFAILED) | ||
302 | bfa_iocpf_initfail(ioc); | ||
208 | break; | 303 | break; |
209 | 304 | ||
210 | case IOC_E_DISABLE: | 305 | case IOC_E_DISABLE: |
211 | bfa_ioc_disable_comp(ioc); | 306 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); |
212 | /* fall through */ | 307 | break; |
213 | 308 | ||
214 | case IOC_E_DETACH: | 309 | case IOC_E_DETACH: |
215 | bfa_ioc_hw_sem_get_cancel(ioc); | 310 | bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); |
216 | bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); | 311 | bfa_iocpf_stop(ioc); |
217 | break; | 312 | break; |
218 | 313 | ||
219 | case IOC_E_FWREADY: | 314 | case IOC_E_ENABLE: |
220 | break; | 315 | break; |
221 | 316 | ||
222 | default: | 317 | default: |
@@ -225,41 +320,85 @@ bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event) | |||
225 | } | 320 | } |
226 | 321 | ||
227 | /** | 322 | /** |
228 | * Notify enable completion callback and generate mismatch AEN. | 323 | * Semaphore should be acquired for version check. |
229 | */ | 324 | */ |
230 | static void | 325 | static void |
231 | bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc) | 326 | bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) |
232 | { | 327 | { |
233 | /** | 328 | mod_timer(&ioc->ioc_timer, jiffies + |
234 | * Provide enable completion callback and AEN notification only once. | 329 | msecs_to_jiffies(BFA_IOC_TOV)); |
235 | */ | 330 | bfa_ioc_send_getattr(ioc); |
236 | if (ioc->retry_count == 0) | ||
237 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | ||
238 | ioc->retry_count++; | ||
239 | bfa_ioc_timer_start(ioc); | ||
240 | } | 331 | } |
241 | 332 | ||
242 | /** | 333 | /** |
243 | * Awaiting firmware version match. | 334 | * IOC configuration in progress. Timer is active. |
244 | */ | 335 | */ |
245 | static void | 336 | static void |
246 | bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event) | 337 | bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) |
247 | { | 338 | { |
248 | switch (event) { | 339 | switch (event) { |
340 | case IOC_E_FWRSP_GETATTR: | ||
341 | del_timer(&ioc->ioc_timer); | ||
342 | bfa_ioc_check_attr_wwns(ioc); | ||
343 | bfa_fsm_set_state(ioc, bfa_ioc_sm_op); | ||
344 | break; | ||
345 | |||
346 | case IOC_E_PFAILED: | ||
347 | case IOC_E_HWERROR: | ||
348 | del_timer(&ioc->ioc_timer); | ||
349 | /* fall through */ | ||
249 | case IOC_E_TIMEOUT: | 350 | case IOC_E_TIMEOUT: |
250 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); | 351 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); |
352 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); | ||
353 | if (event != IOC_E_PFAILED) | ||
354 | bfa_iocpf_getattrfail(ioc); | ||
251 | break; | 355 | break; |
252 | 356 | ||
253 | case IOC_E_DISABLE: | 357 | case IOC_E_DISABLE: |
254 | bfa_ioc_disable_comp(ioc); | 358 | del_timer(&ioc->ioc_timer); |
255 | /* fall through */ | 359 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); |
360 | break; | ||
256 | 361 | ||
257 | case IOC_E_DETACH: | 362 | case IOC_E_ENABLE: |
258 | bfa_ioc_timer_stop(ioc); | 363 | break; |
259 | bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); | 364 | |
365 | default: | ||
366 | bfa_sm_fault(ioc, event); | ||
367 | } | ||
368 | } | ||
369 | |||
370 | static void | ||
371 | bfa_ioc_sm_op_entry(struct bfa_ioc *ioc) | ||
372 | { | ||
373 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); | ||
374 | bfa_ioc_hb_monitor(ioc); | ||
375 | } | ||
376 | |||
377 | static void | ||
378 | bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event) | ||
379 | { | ||
380 | switch (event) { | ||
381 | case IOC_E_ENABLE: | ||
382 | break; | ||
383 | |||
384 | case IOC_E_DISABLE: | ||
385 | bfa_ioc_hb_stop(ioc); | ||
386 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); | ||
260 | break; | 387 | break; |
261 | 388 | ||
262 | case IOC_E_FWREADY: | 389 | case IOC_E_PFAILED: |
390 | case IOC_E_HWERROR: | ||
391 | bfa_ioc_hb_stop(ioc); | ||
392 | /* !!! fall through !!! */ | ||
393 | case IOC_E_HBFAIL: | ||
394 | bfa_ioc_fail_notify(ioc); | ||
395 | if (ioc->iocpf.auto_recover) | ||
396 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); | ||
397 | else | ||
398 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); | ||
399 | |||
400 | if (event != IOC_E_PFAILED) | ||
401 | bfa_iocpf_fail(ioc); | ||
263 | break; | 402 | break; |
264 | 403 | ||
265 | default: | 404 | default: |
@@ -267,30 +406,61 @@ bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event) | |||
267 | } | 406 | } |
268 | } | 407 | } |
269 | 408 | ||
409 | static void | ||
410 | bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc) | ||
411 | { | ||
412 | bfa_iocpf_disable(ioc); | ||
413 | } | ||
414 | |||
270 | /** | 415 | /** |
271 | * Request for semaphore. | 416 | * IOC is being desabled |
272 | */ | 417 | */ |
273 | static void | 418 | static void |
274 | bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc) | 419 | bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) |
275 | { | 420 | { |
276 | bfa_ioc_hw_sem_get(ioc); | 421 | switch (event) { |
422 | case IOC_E_DISABLED: | ||
423 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | ||
424 | break; | ||
425 | |||
426 | case IOC_E_HWERROR: | ||
427 | /* | ||
428 | * No state change. Will move to disabled state | ||
429 | * after iocpf sm completes failure processing and | ||
430 | * moves to disabled state. | ||
431 | */ | ||
432 | bfa_iocpf_fail(ioc); | ||
433 | break; | ||
434 | |||
435 | default: | ||
436 | bfa_sm_fault(ioc, event); | ||
437 | } | ||
277 | } | 438 | } |
278 | 439 | ||
279 | /** | 440 | /** |
280 | * Awaiting semaphore for h/w initialzation. | 441 | * IOC desable completion entry. |
281 | */ | 442 | */ |
282 | static void | 443 | static void |
283 | bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event) | 444 | bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) |
445 | { | ||
446 | bfa_ioc_disable_comp(ioc); | ||
447 | } | ||
448 | |||
449 | static void | ||
450 | bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event) | ||
284 | { | 451 | { |
285 | switch (event) { | 452 | switch (event) { |
286 | case IOC_E_SEMLOCKED: | 453 | case IOC_E_ENABLE: |
287 | ioc->retry_count = 0; | 454 | bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); |
288 | bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); | ||
289 | break; | 455 | break; |
290 | 456 | ||
291 | case IOC_E_DISABLE: | 457 | case IOC_E_DISABLE: |
292 | bfa_ioc_hw_sem_get_cancel(ioc); | 458 | ioc->cbfn->disable_cbfn(ioc->bfa); |
293 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | 459 | break; |
460 | |||
461 | case IOC_E_DETACH: | ||
462 | bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); | ||
463 | bfa_iocpf_stop(ioc); | ||
294 | break; | 464 | break; |
295 | 465 | ||
296 | default: | 466 | default: |
@@ -299,46 +469,45 @@ bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event) | |||
299 | } | 469 | } |
300 | 470 | ||
301 | static void | 471 | static void |
302 | bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc) | 472 | bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc) |
303 | { | 473 | { |
304 | bfa_ioc_timer_start(ioc); | ||
305 | bfa_ioc_reset(ioc, false); | ||
306 | } | 474 | } |
307 | 475 | ||
308 | /** | 476 | /** |
309 | * @brief | 477 | * Hardware initialization retry. |
310 | * Hardware is being initialized. Interrupts are enabled. | ||
311 | * Holding hardware semaphore lock. | ||
312 | */ | 478 | */ |
313 | static void | 479 | static void |
314 | bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event) | 480 | bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event) |
315 | { | 481 | { |
316 | switch (event) { | 482 | switch (event) { |
317 | case IOC_E_FWREADY: | 483 | case IOC_E_ENABLED: |
318 | bfa_ioc_timer_stop(ioc); | 484 | bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); |
319 | bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); | ||
320 | break; | 485 | break; |
321 | 486 | ||
487 | case IOC_E_PFAILED: | ||
322 | case IOC_E_HWERROR: | 488 | case IOC_E_HWERROR: |
323 | bfa_ioc_timer_stop(ioc); | 489 | /** |
324 | /* fall through */ | 490 | * Initialization retry failed. |
491 | */ | ||
492 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | ||
493 | if (event != IOC_E_PFAILED) | ||
494 | bfa_iocpf_initfail(ioc); | ||
495 | break; | ||
325 | 496 | ||
326 | case IOC_E_TIMEOUT: | 497 | case IOC_E_INITFAILED: |
327 | ioc->retry_count++; | 498 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); |
328 | if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { | 499 | break; |
329 | bfa_ioc_timer_start(ioc); | ||
330 | bfa_ioc_reset(ioc, true); | ||
331 | break; | ||
332 | } | ||
333 | 500 | ||
334 | bfa_nw_ioc_hw_sem_release(ioc); | 501 | case IOC_E_ENABLE: |
335 | bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); | ||
336 | break; | 502 | break; |
337 | 503 | ||
338 | case IOC_E_DISABLE: | 504 | case IOC_E_DISABLE: |
339 | bfa_nw_ioc_hw_sem_release(ioc); | 505 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); |
340 | bfa_ioc_timer_stop(ioc); | 506 | break; |
341 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | 507 | |
508 | case IOC_E_DETACH: | ||
509 | bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); | ||
510 | bfa_iocpf_stop(ioc); | ||
342 | break; | 511 | break; |
343 | 512 | ||
344 | default: | 513 | default: |
@@ -347,51 +516,248 @@ bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event) | |||
347 | } | 516 | } |
348 | 517 | ||
349 | static void | 518 | static void |
350 | bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) | 519 | bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc) |
351 | { | 520 | { |
352 | bfa_ioc_timer_start(ioc); | ||
353 | bfa_ioc_send_enable(ioc); | ||
354 | } | 521 | } |
355 | 522 | ||
356 | /** | 523 | /** |
357 | * Host IOC function is being enabled, awaiting response from firmware. | 524 | * IOC failure. |
358 | * Semaphore is acquired. | ||
359 | */ | 525 | */ |
360 | static void | 526 | static void |
361 | bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) | 527 | bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event) |
362 | { | 528 | { |
363 | switch (event) { | 529 | switch (event) { |
364 | case IOC_E_FWRSP_ENABLE: | 530 | case IOC_E_ENABLE: |
365 | bfa_ioc_timer_stop(ioc); | 531 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); |
366 | bfa_nw_ioc_hw_sem_release(ioc); | 532 | break; |
367 | bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); | 533 | |
534 | case IOC_E_DISABLE: | ||
535 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); | ||
536 | break; | ||
537 | |||
538 | case IOC_E_DETACH: | ||
539 | bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); | ||
540 | bfa_iocpf_stop(ioc); | ||
368 | break; | 541 | break; |
369 | 542 | ||
370 | case IOC_E_HWERROR: | 543 | case IOC_E_HWERROR: |
371 | bfa_ioc_timer_stop(ioc); | 544 | /* HB failure notification, ignore. */ |
372 | /* fall through */ | 545 | break; |
373 | 546 | ||
374 | case IOC_E_TIMEOUT: | 547 | default: |
375 | ioc->retry_count++; | 548 | bfa_sm_fault(ioc, event); |
376 | if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { | 549 | } |
377 | writel(BFI_IOC_UNINIT, | 550 | } |
378 | ioc->ioc_regs.ioc_fwstate); | 551 | |
379 | bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); | 552 | /** |
380 | break; | 553 | * IOCPF State Machine |
554 | */ | ||
555 | |||
556 | /** | ||
557 | * Reset entry actions -- initialize state machine | ||
558 | */ | ||
559 | static void | ||
560 | bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf) | ||
561 | { | ||
562 | iocpf->retry_count = 0; | ||
563 | iocpf->auto_recover = bfa_nw_auto_recover; | ||
564 | } | ||
565 | |||
566 | /** | ||
567 | * Beginning state. IOC is in reset state. | ||
568 | */ | ||
569 | static void | ||
570 | bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event) | ||
571 | { | ||
572 | switch (event) { | ||
573 | case IOCPF_E_ENABLE: | ||
574 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); | ||
575 | break; | ||
576 | |||
577 | case IOCPF_E_STOP: | ||
578 | break; | ||
579 | |||
580 | default: | ||
581 | bfa_sm_fault(iocpf->ioc, event); | ||
582 | } | ||
583 | } | ||
584 | |||
585 | /** | ||
586 | * Semaphore should be acquired for version check. | ||
587 | */ | ||
588 | static void | ||
589 | bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf) | ||
590 | { | ||
591 | bfa_ioc_hw_sem_get(iocpf->ioc); | ||
592 | } | ||
593 | |||
594 | /** | ||
595 | * Awaiting h/w semaphore to continue with version check. | ||
596 | */ | ||
597 | static void | ||
598 | bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) | ||
599 | { | ||
600 | struct bfa_ioc *ioc = iocpf->ioc; | ||
601 | |||
602 | switch (event) { | ||
603 | case IOCPF_E_SEMLOCKED: | ||
604 | if (bfa_ioc_firmware_lock(ioc)) { | ||
605 | if (bfa_ioc_sync_complete(ioc)) { | ||
606 | iocpf->retry_count = 0; | ||
607 | bfa_ioc_sync_join(ioc); | ||
608 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); | ||
609 | } else { | ||
610 | bfa_ioc_firmware_unlock(ioc); | ||
611 | bfa_nw_ioc_hw_sem_release(ioc); | ||
612 | mod_timer(&ioc->sem_timer, jiffies + | ||
613 | msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); | ||
614 | } | ||
615 | } else { | ||
616 | bfa_nw_ioc_hw_sem_release(ioc); | ||
617 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); | ||
381 | } | 618 | } |
619 | break; | ||
382 | 620 | ||
383 | bfa_nw_ioc_hw_sem_release(ioc); | 621 | case IOCPF_E_DISABLE: |
384 | bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); | 622 | bfa_ioc_hw_sem_get_cancel(ioc); |
623 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); | ||
624 | bfa_ioc_pf_disabled(ioc); | ||
385 | break; | 625 | break; |
386 | 626 | ||
387 | case IOC_E_DISABLE: | 627 | case IOCPF_E_STOP: |
388 | bfa_ioc_timer_stop(ioc); | 628 | bfa_ioc_hw_sem_get_cancel(ioc); |
629 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); | ||
630 | break; | ||
631 | |||
632 | default: | ||
633 | bfa_sm_fault(ioc, event); | ||
634 | } | ||
635 | } | ||
636 | |||
637 | /** | ||
638 | * Notify enable completion callback | ||
639 | */ | ||
640 | static void | ||
641 | bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf) | ||
642 | { | ||
643 | /* Call only the first time sm enters fwmismatch state. */ | ||
644 | if (iocpf->retry_count == 0) | ||
645 | bfa_ioc_pf_fwmismatch(iocpf->ioc); | ||
646 | |||
647 | iocpf->retry_count++; | ||
648 | mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + | ||
649 | msecs_to_jiffies(BFA_IOC_TOV)); | ||
650 | } | ||
651 | |||
652 | /** | ||
653 | * Awaiting firmware version match. | ||
654 | */ | ||
655 | static void | ||
656 | bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) | ||
657 | { | ||
658 | struct bfa_ioc *ioc = iocpf->ioc; | ||
659 | |||
660 | switch (event) { | ||
661 | case IOCPF_E_TIMEOUT: | ||
662 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); | ||
663 | break; | ||
664 | |||
665 | case IOCPF_E_DISABLE: | ||
666 | del_timer(&ioc->iocpf_timer); | ||
667 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); | ||
668 | bfa_ioc_pf_disabled(ioc); | ||
669 | break; | ||
670 | |||
671 | case IOCPF_E_STOP: | ||
672 | del_timer(&ioc->iocpf_timer); | ||
673 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); | ||
674 | break; | ||
675 | |||
676 | default: | ||
677 | bfa_sm_fault(ioc, event); | ||
678 | } | ||
679 | } | ||
680 | |||
681 | /** | ||
682 | * Request for semaphore. | ||
683 | */ | ||
684 | static void | ||
685 | bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf) | ||
686 | { | ||
687 | bfa_ioc_hw_sem_get(iocpf->ioc); | ||
688 | } | ||
689 | |||
690 | /** | ||
691 | * Awaiting semaphore for h/w initialzation. | ||
692 | */ | ||
693 | static void | ||
694 | bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event) | ||
695 | { | ||
696 | struct bfa_ioc *ioc = iocpf->ioc; | ||
697 | |||
698 | switch (event) { | ||
699 | case IOCPF_E_SEMLOCKED: | ||
700 | if (bfa_ioc_sync_complete(ioc)) { | ||
701 | bfa_ioc_sync_join(ioc); | ||
702 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); | ||
703 | } else { | ||
704 | bfa_nw_ioc_hw_sem_release(ioc); | ||
705 | mod_timer(&ioc->sem_timer, jiffies + | ||
706 | msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); | ||
707 | } | ||
708 | break; | ||
709 | |||
710 | case IOCPF_E_DISABLE: | ||
711 | bfa_ioc_hw_sem_get_cancel(ioc); | ||
712 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); | ||
713 | break; | ||
714 | |||
715 | default: | ||
716 | bfa_sm_fault(ioc, event); | ||
717 | } | ||
718 | } | ||
719 | |||
720 | static void | ||
721 | bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf) | ||
722 | { | ||
723 | mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + | ||
724 | msecs_to_jiffies(BFA_IOC_TOV)); | ||
725 | bfa_ioc_reset(iocpf->ioc, 0); | ||
726 | } | ||
727 | |||
728 | /** | ||
729 | * Hardware is being initialized. Interrupts are enabled. | ||
730 | * Holding hardware semaphore lock. | ||
731 | */ | ||
732 | static void | ||
733 | bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event) | ||
734 | { | ||
735 | struct bfa_ioc *ioc = iocpf->ioc; | ||
736 | |||
737 | switch (event) { | ||
738 | case IOCPF_E_FWREADY: | ||
739 | del_timer(&ioc->iocpf_timer); | ||
740 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); | ||
741 | break; | ||
742 | |||
743 | case IOCPF_E_INITFAIL: | ||
744 | del_timer(&ioc->iocpf_timer); | ||
745 | /* | ||
746 | * !!! fall through !!! | ||
747 | */ | ||
748 | |||
749 | case IOCPF_E_TIMEOUT: | ||
389 | bfa_nw_ioc_hw_sem_release(ioc); | 750 | bfa_nw_ioc_hw_sem_release(ioc); |
390 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | 751 | if (event == IOCPF_E_TIMEOUT) |
752 | bfa_ioc_pf_failed(ioc); | ||
753 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); | ||
391 | break; | 754 | break; |
392 | 755 | ||
393 | case IOC_E_FWREADY: | 756 | case IOCPF_E_DISABLE: |
394 | bfa_ioc_send_enable(ioc); | 757 | del_timer(&ioc->iocpf_timer); |
758 | bfa_ioc_sync_leave(ioc); | ||
759 | bfa_nw_ioc_hw_sem_release(ioc); | ||
760 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); | ||
395 | break; | 761 | break; |
396 | 762 | ||
397 | default: | 763 | default: |
@@ -400,37 +766,49 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) | |||
400 | } | 766 | } |
401 | 767 | ||
402 | static void | 768 | static void |
403 | bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) | 769 | bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf) |
404 | { | 770 | { |
405 | bfa_ioc_timer_start(ioc); | 771 | mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + |
406 | bfa_ioc_send_getattr(ioc); | 772 | msecs_to_jiffies(BFA_IOC_TOV)); |
773 | bfa_ioc_send_enable(iocpf->ioc); | ||
407 | } | 774 | } |
408 | 775 | ||
409 | /** | 776 | /** |
410 | * @brief | 777 | * Host IOC function is being enabled, awaiting response from firmware. |
411 | * IOC configuration in progress. Timer is active. | 778 | * Semaphore is acquired. |
412 | */ | 779 | */ |
413 | static void | 780 | static void |
414 | bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) | 781 | bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event) |
415 | { | 782 | { |
783 | struct bfa_ioc *ioc = iocpf->ioc; | ||
784 | |||
416 | switch (event) { | 785 | switch (event) { |
417 | case IOC_E_FWRSP_GETATTR: | 786 | case IOCPF_E_FWRSP_ENABLE: |
418 | bfa_ioc_timer_stop(ioc); | 787 | del_timer(&ioc->iocpf_timer); |
419 | bfa_ioc_check_attr_wwns(ioc); | 788 | bfa_nw_ioc_hw_sem_release(ioc); |
420 | bfa_fsm_set_state(ioc, bfa_ioc_sm_op); | 789 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); |
421 | break; | 790 | break; |
422 | 791 | ||
423 | case IOC_E_HWERROR: | 792 | case IOCPF_E_INITFAIL: |
424 | bfa_ioc_timer_stop(ioc); | 793 | del_timer(&ioc->iocpf_timer); |
425 | /* fall through */ | 794 | /* |
795 | * !!! fall through !!! | ||
796 | */ | ||
797 | case IOCPF_E_TIMEOUT: | ||
798 | bfa_nw_ioc_hw_sem_release(ioc); | ||
799 | if (event == IOCPF_E_TIMEOUT) | ||
800 | bfa_ioc_pf_failed(ioc); | ||
801 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); | ||
802 | break; | ||
426 | 803 | ||
427 | case IOC_E_TIMEOUT: | 804 | case IOCPF_E_DISABLE: |
428 | bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); | 805 | del_timer(&ioc->iocpf_timer); |
806 | bfa_nw_ioc_hw_sem_release(ioc); | ||
807 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); | ||
429 | break; | 808 | break; |
430 | 809 | ||
431 | case IOC_E_DISABLE: | 810 | case IOCPF_E_FWREADY: |
432 | bfa_ioc_timer_stop(ioc); | 811 | bfa_ioc_send_enable(ioc); |
433 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | ||
434 | break; | 812 | break; |
435 | 813 | ||
436 | default: | 814 | default: |
@@ -438,36 +816,42 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) | |||
438 | } | 816 | } |
439 | } | 817 | } |
440 | 818 | ||
819 | static bool | ||
820 | bfa_nw_ioc_is_operational(struct bfa_ioc *ioc) | ||
821 | { | ||
822 | return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); | ||
823 | } | ||
824 | |||
441 | static void | 825 | static void |
442 | bfa_ioc_sm_op_entry(struct bfa_ioc *ioc) | 826 | bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf) |
443 | { | 827 | { |
444 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); | 828 | bfa_ioc_pf_enabled(iocpf->ioc); |
445 | bfa_ioc_hb_monitor(ioc); | ||
446 | } | 829 | } |
447 | 830 | ||
448 | static void | 831 | static void |
449 | bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event) | 832 | bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event) |
450 | { | 833 | { |
834 | struct bfa_ioc *ioc = iocpf->ioc; | ||
835 | |||
451 | switch (event) { | 836 | switch (event) { |
452 | case IOC_E_ENABLE: | 837 | case IOCPF_E_DISABLE: |
838 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); | ||
453 | break; | 839 | break; |
454 | 840 | ||
455 | case IOC_E_DISABLE: | 841 | case IOCPF_E_GETATTRFAIL: |
456 | bfa_ioc_hb_stop(ioc); | 842 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); |
457 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); | ||
458 | break; | 843 | break; |
459 | 844 | ||
460 | case IOC_E_HWERROR: | 845 | case IOCPF_E_FAIL: |
461 | case IOC_E_FWREADY: | 846 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); |
462 | /** | 847 | break; |
463 | * Hard error or IOC recovery by other function. | ||
464 | * Treat it same as heartbeat failure. | ||
465 | */ | ||
466 | bfa_ioc_hb_stop(ioc); | ||
467 | /* !!! fall through !!! */ | ||
468 | 848 | ||
469 | case IOC_E_HBFAIL: | 849 | case IOCPF_E_FWREADY: |
470 | bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail); | 850 | bfa_ioc_pf_failed(ioc); |
851 | if (bfa_nw_ioc_is_operational(ioc)) | ||
852 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); | ||
853 | else | ||
854 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); | ||
471 | break; | 855 | break; |
472 | 856 | ||
473 | default: | 857 | default: |
@@ -476,33 +860,40 @@ bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event) | |||
476 | } | 860 | } |
477 | 861 | ||
478 | static void | 862 | static void |
479 | bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc) | 863 | bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf) |
480 | { | 864 | { |
481 | bfa_ioc_timer_start(ioc); | 865 | mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + |
482 | bfa_ioc_send_disable(ioc); | 866 | msecs_to_jiffies(BFA_IOC_TOV)); |
867 | bfa_ioc_send_disable(iocpf->ioc); | ||
483 | } | 868 | } |
484 | 869 | ||
485 | /** | 870 | /** |
486 | * IOC is being disabled | 871 | * IOC is being disabled |
487 | */ | 872 | */ |
488 | static void | 873 | static void |
489 | bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) | 874 | bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) |
490 | { | 875 | { |
876 | struct bfa_ioc *ioc = iocpf->ioc; | ||
877 | |||
491 | switch (event) { | 878 | switch (event) { |
492 | case IOC_E_FWRSP_DISABLE: | 879 | case IOCPF_E_FWRSP_DISABLE: |
493 | bfa_ioc_timer_stop(ioc); | 880 | case IOCPF_E_FWREADY: |
494 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | 881 | del_timer(&ioc->iocpf_timer); |
882 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); | ||
495 | break; | 883 | break; |
496 | 884 | ||
497 | case IOC_E_HWERROR: | 885 | case IOCPF_E_FAIL: |
498 | bfa_ioc_timer_stop(ioc); | 886 | del_timer(&ioc->iocpf_timer); |
499 | /* | 887 | /* |
500 | * !!! fall through !!! | 888 | * !!! fall through !!! |
501 | */ | 889 | */ |
502 | 890 | ||
503 | case IOC_E_TIMEOUT: | 891 | case IOCPF_E_TIMEOUT: |
504 | writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); | 892 | writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); |
505 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | 893 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); |
894 | break; | ||
895 | |||
896 | case IOCPF_E_FWRSP_ENABLE: | ||
506 | break; | 897 | break; |
507 | 898 | ||
508 | default: | 899 | default: |
@@ -510,33 +901,58 @@ bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) | |||
510 | } | 901 | } |
511 | } | 902 | } |
512 | 903 | ||
513 | /** | ||
514 | * IOC disable completion entry. | ||
515 | */ | ||
516 | static void | 904 | static void |
517 | bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) | 905 | bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf) |
518 | { | 906 | { |
519 | bfa_ioc_disable_comp(ioc); | 907 | bfa_ioc_hw_sem_get(iocpf->ioc); |
520 | } | 908 | } |
521 | 909 | ||
910 | /** | ||
911 | * IOC hb ack request is being removed. | ||
912 | */ | ||
522 | static void | 913 | static void |
523 | bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event) | 914 | bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) |
524 | { | 915 | { |
916 | struct bfa_ioc *ioc = iocpf->ioc; | ||
917 | |||
525 | switch (event) { | 918 | switch (event) { |
526 | case IOC_E_ENABLE: | 919 | case IOCPF_E_SEMLOCKED: |
527 | bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); | 920 | bfa_ioc_sync_leave(ioc); |
921 | bfa_nw_ioc_hw_sem_release(ioc); | ||
922 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); | ||
528 | break; | 923 | break; |
529 | 924 | ||
530 | case IOC_E_DISABLE: | 925 | case IOCPF_E_FAIL: |
531 | ioc->cbfn->disable_cbfn(ioc->bfa); | ||
532 | break; | 926 | break; |
533 | 927 | ||
534 | case IOC_E_FWREADY: | 928 | default: |
929 | bfa_sm_fault(ioc, event); | ||
930 | } | ||
931 | } | ||
932 | |||
933 | /** | ||
934 | * IOC disable completion entry. | ||
935 | */ | ||
936 | static void | ||
937 | bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf) | ||
938 | { | ||
939 | bfa_ioc_pf_disabled(iocpf->ioc); | ||
940 | } | ||
941 | |||
942 | static void | ||
943 | bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event) | ||
944 | { | ||
945 | struct bfa_ioc *ioc = iocpf->ioc; | ||
946 | |||
947 | switch (event) { | ||
948 | case IOCPF_E_ENABLE: | ||
949 | iocpf->retry_count = 0; | ||
950 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); | ||
535 | break; | 951 | break; |
536 | 952 | ||
537 | case IOC_E_DETACH: | 953 | case IOCPF_E_STOP: |
538 | bfa_ioc_firmware_unlock(ioc); | 954 | bfa_ioc_firmware_unlock(ioc); |
539 | bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); | 955 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); |
540 | break; | 956 | break; |
541 | 957 | ||
542 | default: | 958 | default: |
@@ -545,33 +961,50 @@ bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event) | |||
545 | } | 961 | } |
546 | 962 | ||
547 | static void | 963 | static void |
548 | bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc) | 964 | bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf) |
549 | { | 965 | { |
550 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | 966 | bfa_ioc_hw_sem_get(iocpf->ioc); |
551 | bfa_ioc_timer_start(ioc); | ||
552 | } | 967 | } |
553 | 968 | ||
554 | /** | 969 | /** |
555 | * @brief | ||
556 | * Hardware initialization failed. | 970 | * Hardware initialization failed. |
557 | */ | 971 | */ |
558 | static void | 972 | static void |
559 | bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event) | 973 | bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) |
560 | { | 974 | { |
975 | struct bfa_ioc *ioc = iocpf->ioc; | ||
976 | |||
561 | switch (event) { | 977 | switch (event) { |
562 | case IOC_E_DISABLE: | 978 | case IOCPF_E_SEMLOCKED: |
563 | bfa_ioc_timer_stop(ioc); | 979 | bfa_ioc_notify_fail(ioc); |
564 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | 980 | bfa_ioc_sync_ack(ioc); |
981 | iocpf->retry_count++; | ||
982 | if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) { | ||
983 | bfa_ioc_sync_leave(ioc); | ||
984 | bfa_nw_ioc_hw_sem_release(ioc); | ||
985 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); | ||
986 | } else { | ||
987 | if (bfa_ioc_sync_complete(ioc)) | ||
988 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); | ||
989 | else { | ||
990 | bfa_nw_ioc_hw_sem_release(ioc); | ||
991 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); | ||
992 | } | ||
993 | } | ||
565 | break; | 994 | break; |
566 | 995 | ||
567 | case IOC_E_DETACH: | 996 | case IOCPF_E_DISABLE: |
568 | bfa_ioc_timer_stop(ioc); | 997 | bfa_ioc_hw_sem_get_cancel(ioc); |
998 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); | ||
999 | break; | ||
1000 | |||
1001 | case IOCPF_E_STOP: | ||
1002 | bfa_ioc_hw_sem_get_cancel(ioc); | ||
569 | bfa_ioc_firmware_unlock(ioc); | 1003 | bfa_ioc_firmware_unlock(ioc); |
570 | bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); | 1004 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); |
571 | break; | 1005 | break; |
572 | 1006 | ||
573 | case IOC_E_TIMEOUT: | 1007 | case IOCPF_E_FAIL: |
574 | bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); | ||
575 | break; | 1008 | break; |
576 | 1009 | ||
577 | default: | 1010 | default: |
@@ -580,80 +1013,108 @@ bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event) | |||
580 | } | 1013 | } |
581 | 1014 | ||
582 | static void | 1015 | static void |
583 | bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc) | 1016 | bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf) |
584 | { | 1017 | { |
585 | struct list_head *qe; | 1018 | bfa_ioc_pf_initfailed(iocpf->ioc); |
586 | struct bfa_ioc_hbfail_notify *notify; | 1019 | } |
587 | 1020 | ||
588 | /** | 1021 | /** |
589 | * Mark IOC as failed in hardware and stop firmware. | 1022 | * Hardware initialization failed. |
590 | */ | 1023 | */ |
591 | bfa_ioc_lpu_stop(ioc); | 1024 | static void |
592 | writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); | 1025 | bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event) |
1026 | { | ||
1027 | struct bfa_ioc *ioc = iocpf->ioc; | ||
593 | 1028 | ||
594 | /** | 1029 | switch (event) { |
595 | * Notify other functions on HB failure. | 1030 | case IOCPF_E_DISABLE: |
596 | */ | 1031 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); |
597 | bfa_ioc_notify_hbfail(ioc); | 1032 | break; |
598 | 1033 | ||
599 | /** | 1034 | case IOCPF_E_STOP: |
600 | * Notify driver and common modules registered for notification. | 1035 | bfa_ioc_firmware_unlock(ioc); |
601 | */ | 1036 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); |
602 | ioc->cbfn->hbfail_cbfn(ioc->bfa); | 1037 | break; |
603 | list_for_each(qe, &ioc->hb_notify_q) { | 1038 | |
604 | notify = (struct bfa_ioc_hbfail_notify *) qe; | 1039 | default: |
605 | notify->cbfn(notify->cbarg); | 1040 | bfa_sm_fault(ioc, event); |
606 | } | 1041 | } |
1042 | } | ||
607 | 1043 | ||
1044 | static void | ||
1045 | bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf) | ||
1046 | { | ||
608 | /** | 1047 | /** |
609 | * Flush any queued up mailbox requests. | 1048 | * Mark IOC as failed in hardware and stop firmware. |
610 | */ | 1049 | */ |
611 | bfa_ioc_mbox_hbfail(ioc); | 1050 | bfa_ioc_lpu_stop(iocpf->ioc); |
612 | 1051 | ||
613 | /** | 1052 | /** |
614 | * Trigger auto-recovery after a delay. | 1053 | * Flush any queued up mailbox requests. |
615 | */ | 1054 | */ |
616 | if (ioc->auto_recover) | 1055 | bfa_ioc_mbox_hbfail(iocpf->ioc); |
617 | mod_timer(&ioc->ioc_timer, jiffies + | 1056 | bfa_ioc_hw_sem_get(iocpf->ioc); |
618 | msecs_to_jiffies(BFA_IOC_TOV_RECOVER)); | ||
619 | } | 1057 | } |
620 | 1058 | ||
621 | /** | 1059 | /** |
622 | * @brief | 1060 | * IOC is in failed state. |
623 | * IOC heartbeat failure. | ||
624 | */ | 1061 | */ |
625 | static void | 1062 | static void |
626 | bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event) | 1063 | bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) |
627 | { | 1064 | { |
628 | switch (event) { | 1065 | struct bfa_ioc *ioc = iocpf->ioc; |
629 | 1066 | ||
630 | case IOC_E_ENABLE: | 1067 | switch (event) { |
631 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | 1068 | case IOCPF_E_SEMLOCKED: |
1069 | iocpf->retry_count = 0; | ||
1070 | bfa_ioc_sync_ack(ioc); | ||
1071 | bfa_ioc_notify_fail(ioc); | ||
1072 | if (!iocpf->auto_recover) { | ||
1073 | bfa_ioc_sync_leave(ioc); | ||
1074 | bfa_nw_ioc_hw_sem_release(ioc); | ||
1075 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); | ||
1076 | } else { | ||
1077 | if (bfa_ioc_sync_complete(ioc)) | ||
1078 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); | ||
1079 | else { | ||
1080 | bfa_nw_ioc_hw_sem_release(ioc); | ||
1081 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); | ||
1082 | } | ||
1083 | } | ||
632 | break; | 1084 | break; |
633 | 1085 | ||
634 | case IOC_E_DISABLE: | 1086 | case IOCPF_E_DISABLE: |
635 | if (ioc->auto_recover) | 1087 | bfa_ioc_hw_sem_get_cancel(ioc); |
636 | bfa_ioc_timer_stop(ioc); | 1088 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); |
637 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); | ||
638 | break; | 1089 | break; |
639 | 1090 | ||
640 | case IOC_E_TIMEOUT: | 1091 | case IOCPF_E_FAIL: |
641 | bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); | ||
642 | break; | 1092 | break; |
643 | 1093 | ||
644 | case IOC_E_FWREADY: | 1094 | default: |
645 | /** | 1095 | bfa_sm_fault(ioc, event); |
646 | * Recovery is already initiated by other function. | 1096 | } |
647 | */ | 1097 | } |
648 | break; | ||
649 | 1098 | ||
650 | case IOC_E_HWERROR: | 1099 | static void |
651 | /* | 1100 | bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf) |
652 | * HB failure notification, ignore. | 1101 | { |
653 | */ | 1102 | } |
1103 | |||
1104 | /** | ||
1105 | * @brief | ||
1106 | * IOC is in failed state. | ||
1107 | */ | ||
1108 | static void | ||
1109 | bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event) | ||
1110 | { | ||
1111 | switch (event) { | ||
1112 | case IOCPF_E_DISABLE: | ||
1113 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); | ||
654 | break; | 1114 | break; |
1115 | |||
655 | default: | 1116 | default: |
656 | bfa_sm_fault(ioc, event); | 1117 | bfa_sm_fault(iocpf->ioc, event); |
657 | } | 1118 | } |
658 | } | 1119 | } |
659 | 1120 | ||
@@ -678,14 +1139,6 @@ bfa_ioc_disable_comp(struct bfa_ioc *ioc) | |||
678 | } | 1139 | } |
679 | } | 1140 | } |
680 | 1141 | ||
681 | void | ||
682 | bfa_nw_ioc_sem_timeout(void *ioc_arg) | ||
683 | { | ||
684 | struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; | ||
685 | |||
686 | bfa_ioc_hw_sem_get(ioc); | ||
687 | } | ||
688 | |||
689 | bool | 1142 | bool |
690 | bfa_nw_ioc_sem_get(void __iomem *sem_reg) | 1143 | bfa_nw_ioc_sem_get(void __iomem *sem_reg) |
691 | { | 1144 | { |
@@ -725,7 +1178,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc *ioc) | |||
725 | */ | 1178 | */ |
726 | r32 = readl(ioc->ioc_regs.ioc_sem_reg); | 1179 | r32 = readl(ioc->ioc_regs.ioc_sem_reg); |
727 | if (r32 == 0) { | 1180 | if (r32 == 0) { |
728 | bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED); | 1181 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); |
729 | return; | 1182 | return; |
730 | } | 1183 | } |
731 | 1184 | ||
@@ -865,12 +1318,6 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc) | |||
865 | { | 1318 | { |
866 | struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr; | 1319 | struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr; |
867 | 1320 | ||
868 | /** | ||
869 | * If bios/efi boot (flash based) -- return true | ||
870 | */ | ||
871 | if (bfa_ioc_is_optrom(ioc)) | ||
872 | return true; | ||
873 | |||
874 | bfa_nw_ioc_fwver_get(ioc, &fwhdr); | 1321 | bfa_nw_ioc_fwver_get(ioc, &fwhdr); |
875 | drv_fwhdr = (struct bfi_ioc_image_hdr *) | 1322 | drv_fwhdr = (struct bfi_ioc_image_hdr *) |
876 | bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); | 1323 | bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); |
@@ -934,20 +1381,15 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) | |||
934 | /** | 1381 | /** |
935 | * If IOC function is disabled and firmware version is same, | 1382 | * If IOC function is disabled and firmware version is same, |
936 | * just re-enable IOC. | 1383 | * just re-enable IOC. |
937 | * | ||
938 | * If option rom, IOC must not be in operational state. With | ||
939 | * convergence, IOC will be in operational state when 2nd driver | ||
940 | * is loaded. | ||
941 | */ | 1384 | */ |
942 | if (ioc_fwstate == BFI_IOC_DISABLED || | 1385 | if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) { |
943 | (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) { | ||
944 | /** | 1386 | /** |
945 | * When using MSI-X any pending firmware ready event should | 1387 | * When using MSI-X any pending firmware ready event should |
946 | * be flushed. Otherwise MSI-X interrupts are not delivered. | 1388 | * be flushed. Otherwise MSI-X interrupts are not delivered. |
947 | */ | 1389 | */ |
948 | bfa_ioc_msgflush(ioc); | 1390 | bfa_ioc_msgflush(ioc); |
949 | ioc->cbfn->reset_cbfn(ioc->bfa); | 1391 | ioc->cbfn->reset_cbfn(ioc->bfa); |
950 | bfa_fsm_send_event(ioc, IOC_E_FWREADY); | 1392 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); |
951 | return; | 1393 | return; |
952 | } | 1394 | } |
953 | 1395 | ||
@@ -1033,7 +1475,6 @@ bfa_nw_ioc_hb_check(void *cbarg) | |||
1033 | 1475 | ||
1034 | hb_count = readl(ioc->ioc_regs.heartbeat); | 1476 | hb_count = readl(ioc->ioc_regs.heartbeat); |
1035 | if (ioc->hb_count == hb_count) { | 1477 | if (ioc->hb_count == hb_count) { |
1036 | pr_crit("Firmware heartbeat failure at %d", hb_count); | ||
1037 | bfa_ioc_recover(ioc); | 1478 | bfa_ioc_recover(ioc); |
1038 | return; | 1479 | return; |
1039 | } else { | 1480 | } else { |
@@ -1078,11 +1519,6 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, | |||
1078 | */ | 1519 | */ |
1079 | bfa_ioc_lmem_init(ioc); | 1520 | bfa_ioc_lmem_init(ioc); |
1080 | 1521 | ||
1081 | /** | ||
1082 | * Flash based firmware boot | ||
1083 | */ | ||
1084 | if (bfa_ioc_is_optrom(ioc)) | ||
1085 | boot_type = BFI_BOOT_TYPE_FLASH; | ||
1086 | fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno); | 1522 | fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno); |
1087 | 1523 | ||
1088 | pgnum = bfa_ioc_smem_pgnum(ioc, loff); | 1524 | pgnum = bfa_ioc_smem_pgnum(ioc, loff); |
@@ -1209,6 +1645,55 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc) | |||
1209 | bfa_q_deq(&mod->cmd_q, &cmd); | 1645 | bfa_q_deq(&mod->cmd_q, &cmd); |
1210 | } | 1646 | } |
1211 | 1647 | ||
1648 | static void | ||
1649 | bfa_ioc_fail_notify(struct bfa_ioc *ioc) | ||
1650 | { | ||
1651 | struct list_head *qe; | ||
1652 | struct bfa_ioc_hbfail_notify *notify; | ||
1653 | |||
1654 | /** | ||
1655 | * Notify driver and common modules registered for notification. | ||
1656 | */ | ||
1657 | ioc->cbfn->hbfail_cbfn(ioc->bfa); | ||
1658 | list_for_each(qe, &ioc->hb_notify_q) { | ||
1659 | notify = (struct bfa_ioc_hbfail_notify *) qe; | ||
1660 | notify->cbfn(notify->cbarg); | ||
1661 | } | ||
1662 | } | ||
1663 | |||
1664 | static void | ||
1665 | bfa_ioc_pf_enabled(struct bfa_ioc *ioc) | ||
1666 | { | ||
1667 | bfa_fsm_send_event(ioc, IOC_E_ENABLED); | ||
1668 | } | ||
1669 | |||
1670 | static void | ||
1671 | bfa_ioc_pf_disabled(struct bfa_ioc *ioc) | ||
1672 | { | ||
1673 | bfa_fsm_send_event(ioc, IOC_E_DISABLED); | ||
1674 | } | ||
1675 | |||
1676 | static void | ||
1677 | bfa_ioc_pf_initfailed(struct bfa_ioc *ioc) | ||
1678 | { | ||
1679 | bfa_fsm_send_event(ioc, IOC_E_INITFAILED); | ||
1680 | } | ||
1681 | |||
1682 | static void | ||
1683 | bfa_ioc_pf_failed(struct bfa_ioc *ioc) | ||
1684 | { | ||
1685 | bfa_fsm_send_event(ioc, IOC_E_PFAILED); | ||
1686 | } | ||
1687 | |||
1688 | static void | ||
1689 | bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc) | ||
1690 | { | ||
1691 | /** | ||
1692 | * Provide enable completion callback and AEN notification. | ||
1693 | */ | ||
1694 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | ||
1695 | } | ||
1696 | |||
1212 | /** | 1697 | /** |
1213 | * IOC public | 1698 | * IOC public |
1214 | */ | 1699 | */ |
@@ -1304,6 +1789,7 @@ static void | |||
1304 | bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) | 1789 | bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) |
1305 | { | 1790 | { |
1306 | union bfi_ioc_i2h_msg_u *msg; | 1791 | union bfi_ioc_i2h_msg_u *msg; |
1792 | struct bfa_iocpf *iocpf = &ioc->iocpf; | ||
1307 | 1793 | ||
1308 | msg = (union bfi_ioc_i2h_msg_u *) m; | 1794 | msg = (union bfi_ioc_i2h_msg_u *) m; |
1309 | 1795 | ||
@@ -1314,15 +1800,15 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) | |||
1314 | break; | 1800 | break; |
1315 | 1801 | ||
1316 | case BFI_IOC_I2H_READY_EVENT: | 1802 | case BFI_IOC_I2H_READY_EVENT: |
1317 | bfa_fsm_send_event(ioc, IOC_E_FWREADY); | 1803 | bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY); |
1318 | break; | 1804 | break; |
1319 | 1805 | ||
1320 | case BFI_IOC_I2H_ENABLE_REPLY: | 1806 | case BFI_IOC_I2H_ENABLE_REPLY: |
1321 | bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE); | 1807 | bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); |
1322 | break; | 1808 | break; |
1323 | 1809 | ||
1324 | case BFI_IOC_I2H_DISABLE_REPLY: | 1810 | case BFI_IOC_I2H_DISABLE_REPLY: |
1325 | bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE); | 1811 | bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE); |
1326 | break; | 1812 | break; |
1327 | 1813 | ||
1328 | case BFI_IOC_I2H_GETATTR_REPLY: | 1814 | case BFI_IOC_I2H_GETATTR_REPLY: |
@@ -1348,11 +1834,13 @@ bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) | |||
1348 | ioc->fcmode = false; | 1834 | ioc->fcmode = false; |
1349 | ioc->pllinit = false; | 1835 | ioc->pllinit = false; |
1350 | ioc->dbg_fwsave_once = true; | 1836 | ioc->dbg_fwsave_once = true; |
1837 | ioc->iocpf.ioc = ioc; | ||
1351 | 1838 | ||
1352 | bfa_ioc_mbox_attach(ioc); | 1839 | bfa_ioc_mbox_attach(ioc); |
1353 | INIT_LIST_HEAD(&ioc->hb_notify_q); | 1840 | INIT_LIST_HEAD(&ioc->hb_notify_q); |
1354 | 1841 | ||
1355 | bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); | 1842 | bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); |
1843 | bfa_fsm_send_event(ioc, IOC_E_RESET); | ||
1356 | } | 1844 | } |
1357 | 1845 | ||
1358 | /** | 1846 | /** |
@@ -1657,7 +2145,40 @@ bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model) | |||
1657 | static enum bfa_ioc_state | 2145 | static enum bfa_ioc_state |
1658 | bfa_ioc_get_state(struct bfa_ioc *ioc) | 2146 | bfa_ioc_get_state(struct bfa_ioc *ioc) |
1659 | { | 2147 | { |
1660 | return bfa_sm_to_state(ioc_sm_table, ioc->fsm); | 2148 | enum bfa_iocpf_state iocpf_st; |
2149 | enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm); | ||
2150 | |||
2151 | if (ioc_st == BFA_IOC_ENABLING || | ||
2152 | ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) { | ||
2153 | |||
2154 | iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); | ||
2155 | |||
2156 | switch (iocpf_st) { | ||
2157 | case BFA_IOCPF_SEMWAIT: | ||
2158 | ioc_st = BFA_IOC_SEMWAIT; | ||
2159 | break; | ||
2160 | |||
2161 | case BFA_IOCPF_HWINIT: | ||
2162 | ioc_st = BFA_IOC_HWINIT; | ||
2163 | break; | ||
2164 | |||
2165 | case BFA_IOCPF_FWMISMATCH: | ||
2166 | ioc_st = BFA_IOC_FWMISMATCH; | ||
2167 | break; | ||
2168 | |||
2169 | case BFA_IOCPF_FAIL: | ||
2170 | ioc_st = BFA_IOC_FAIL; | ||
2171 | break; | ||
2172 | |||
2173 | case BFA_IOCPF_INITFAIL: | ||
2174 | ioc_st = BFA_IOC_INITFAIL; | ||
2175 | break; | ||
2176 | |||
2177 | default: | ||
2178 | break; | ||
2179 | } | ||
2180 | } | ||
2181 | return ioc_st; | ||
1661 | } | 2182 | } |
1662 | 2183 | ||
1663 | void | 2184 | void |
@@ -1689,28 +2210,7 @@ bfa_ioc_get_pwwn(struct bfa_ioc *ioc) | |||
1689 | mac_t | 2210 | mac_t |
1690 | bfa_nw_ioc_get_mac(struct bfa_ioc *ioc) | 2211 | bfa_nw_ioc_get_mac(struct bfa_ioc *ioc) |
1691 | { | 2212 | { |
1692 | /* | 2213 | return ioc->attr->mac; |
1693 | * Currently mfg mac is used as FCoE enode mac (not configured by PBC) | ||
1694 | */ | ||
1695 | if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE) | ||
1696 | return bfa_ioc_get_mfg_mac(ioc); | ||
1697 | else | ||
1698 | return ioc->attr->mac; | ||
1699 | } | ||
1700 | |||
1701 | static mac_t | ||
1702 | bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc) | ||
1703 | { | ||
1704 | mac_t m; | ||
1705 | |||
1706 | m = ioc->attr->mfg_mac; | ||
1707 | if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type)) | ||
1708 | m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc); | ||
1709 | else | ||
1710 | bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]), | ||
1711 | bfa_ioc_pcifn(ioc)); | ||
1712 | |||
1713 | return m; | ||
1714 | } | 2214 | } |
1715 | 2215 | ||
1716 | /** | 2216 | /** |
@@ -1719,8 +2219,13 @@ bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc) | |||
1719 | static void | 2219 | static void |
1720 | bfa_ioc_recover(struct bfa_ioc *ioc) | 2220 | bfa_ioc_recover(struct bfa_ioc *ioc) |
1721 | { | 2221 | { |
1722 | bfa_ioc_stats(ioc, ioc_hbfails); | 2222 | u16 bdf; |
1723 | bfa_fsm_send_event(ioc, IOC_E_HBFAIL); | 2223 | |
2224 | bdf = (ioc->pcidev.pci_slot << 8 | ioc->pcidev.pci_func << 3 | | ||
2225 | ioc->pcidev.device_id); | ||
2226 | |||
2227 | pr_crit("Firmware heartbeat failure at %d", bdf); | ||
2228 | BUG_ON(1); | ||
1724 | } | 2229 | } |
1725 | 2230 | ||
1726 | static void | 2231 | static void |
@@ -1728,5 +2233,61 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc) | |||
1728 | { | 2233 | { |
1729 | if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) | 2234 | if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) |
1730 | return; | 2235 | return; |
2236 | } | ||
2237 | |||
2238 | /** | ||
2239 | * @dg hal_iocpf_pvt BFA IOC PF private functions | ||
2240 | * @{ | ||
2241 | */ | ||
2242 | |||
2243 | static void | ||
2244 | bfa_iocpf_enable(struct bfa_ioc *ioc) | ||
2245 | { | ||
2246 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); | ||
2247 | } | ||
2248 | |||
2249 | static void | ||
2250 | bfa_iocpf_disable(struct bfa_ioc *ioc) | ||
2251 | { | ||
2252 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); | ||
2253 | } | ||
2254 | |||
2255 | static void | ||
2256 | bfa_iocpf_fail(struct bfa_ioc *ioc) | ||
2257 | { | ||
2258 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); | ||
2259 | } | ||
2260 | |||
2261 | static void | ||
2262 | bfa_iocpf_initfail(struct bfa_ioc *ioc) | ||
2263 | { | ||
2264 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); | ||
2265 | } | ||
2266 | |||
2267 | static void | ||
2268 | bfa_iocpf_getattrfail(struct bfa_ioc *ioc) | ||
2269 | { | ||
2270 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); | ||
2271 | } | ||
2272 | |||
2273 | static void | ||
2274 | bfa_iocpf_stop(struct bfa_ioc *ioc) | ||
2275 | { | ||
2276 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); | ||
2277 | } | ||
1731 | 2278 | ||
2279 | void | ||
2280 | bfa_nw_iocpf_timeout(void *ioc_arg) | ||
2281 | { | ||
2282 | struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; | ||
2283 | |||
2284 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); | ||
2285 | } | ||
2286 | |||
2287 | void | ||
2288 | bfa_nw_iocpf_sem_timeout(void *ioc_arg) | ||
2289 | { | ||
2290 | struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; | ||
2291 | |||
2292 | bfa_ioc_hw_sem_get(ioc); | ||
1732 | } | 2293 | } |
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h index a73d84ec808c..e4974bc24ef6 100644 --- a/drivers/net/bna/bfa_ioc.h +++ b/drivers/net/bna/bfa_ioc.h | |||
@@ -26,16 +26,7 @@ | |||
26 | #define BFA_IOC_TOV 3000 /* msecs */ | 26 | #define BFA_IOC_TOV 3000 /* msecs */ |
27 | #define BFA_IOC_HWSEM_TOV 500 /* msecs */ | 27 | #define BFA_IOC_HWSEM_TOV 500 /* msecs */ |
28 | #define BFA_IOC_HB_TOV 500 /* msecs */ | 28 | #define BFA_IOC_HB_TOV 500 /* msecs */ |
29 | #define BFA_IOC_HWINIT_MAX 2 | 29 | #define BFA_IOC_HWINIT_MAX 5 |
30 | #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV | ||
31 | |||
32 | /** | ||
33 | * Generic Scatter Gather Element used by driver | ||
34 | */ | ||
35 | struct bfa_sge { | ||
36 | u32 sg_len; | ||
37 | void *sg_addr; | ||
38 | }; | ||
39 | 30 | ||
40 | /** | 31 | /** |
41 | * PCI device information required by IOC | 32 | * PCI device information required by IOC |
@@ -65,19 +56,6 @@ struct bfa_dma { | |||
65 | #define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */ | 56 | #define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */ |
66 | 57 | ||
67 | /** | 58 | /** |
68 | * @brief BFA dma address assignment macro | ||
69 | */ | ||
70 | #define bfa_dma_addr_set(dma_addr, pa) \ | ||
71 | __bfa_dma_addr_set(&dma_addr, (u64)pa) | ||
72 | |||
73 | static inline void | ||
74 | __bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa) | ||
75 | { | ||
76 | dma_addr->a32.addr_lo = (u32) pa; | ||
77 | dma_addr->a32.addr_hi = (u32) (upper_32_bits(pa)); | ||
78 | } | ||
79 | |||
80 | /** | ||
81 | * @brief BFA dma address assignment macro. (big endian format) | 59 | * @brief BFA dma address assignment macro. (big endian format) |
82 | */ | 60 | */ |
83 | #define bfa_dma_be_addr_set(dma_addr, pa) \ | 61 | #define bfa_dma_be_addr_set(dma_addr, pa) \ |
@@ -105,8 +83,11 @@ struct bfa_ioc_regs { | |||
105 | void __iomem *host_page_num_fn; | 83 | void __iomem *host_page_num_fn; |
106 | void __iomem *heartbeat; | 84 | void __iomem *heartbeat; |
107 | void __iomem *ioc_fwstate; | 85 | void __iomem *ioc_fwstate; |
86 | void __iomem *alt_ioc_fwstate; | ||
108 | void __iomem *ll_halt; | 87 | void __iomem *ll_halt; |
88 | void __iomem *alt_ll_halt; | ||
109 | void __iomem *err_set; | 89 | void __iomem *err_set; |
90 | void __iomem *ioc_fail_sync; | ||
110 | void __iomem *shirq_isr_next; | 91 | void __iomem *shirq_isr_next; |
111 | void __iomem *shirq_msk_next; | 92 | void __iomem *shirq_msk_next; |
112 | void __iomem *smem_page_start; | 93 | void __iomem *smem_page_start; |
@@ -165,16 +146,22 @@ struct bfa_ioc_hbfail_notify { | |||
165 | (__notify)->cbarg = (__cbarg); \ | 146 | (__notify)->cbarg = (__cbarg); \ |
166 | } while (0) | 147 | } while (0) |
167 | 148 | ||
149 | struct bfa_iocpf { | ||
150 | bfa_fsm_t fsm; | ||
151 | struct bfa_ioc *ioc; | ||
152 | u32 retry_count; | ||
153 | bool auto_recover; | ||
154 | }; | ||
155 | |||
168 | struct bfa_ioc { | 156 | struct bfa_ioc { |
169 | bfa_fsm_t fsm; | 157 | bfa_fsm_t fsm; |
170 | struct bfa *bfa; | 158 | struct bfa *bfa; |
171 | struct bfa_pcidev pcidev; | 159 | struct bfa_pcidev pcidev; |
172 | struct bfa_timer_mod *timer_mod; | ||
173 | struct timer_list ioc_timer; | 160 | struct timer_list ioc_timer; |
161 | struct timer_list iocpf_timer; | ||
174 | struct timer_list sem_timer; | 162 | struct timer_list sem_timer; |
175 | struct timer_list hb_timer; | 163 | struct timer_list hb_timer; |
176 | u32 hb_count; | 164 | u32 hb_count; |
177 | u32 retry_count; | ||
178 | struct list_head hb_notify_q; | 165 | struct list_head hb_notify_q; |
179 | void *dbg_fwsave; | 166 | void *dbg_fwsave; |
180 | int dbg_fwsave_len; | 167 | int dbg_fwsave_len; |
@@ -182,7 +169,6 @@ struct bfa_ioc { | |||
182 | enum bfi_mclass ioc_mc; | 169 | enum bfi_mclass ioc_mc; |
183 | struct bfa_ioc_regs ioc_regs; | 170 | struct bfa_ioc_regs ioc_regs; |
184 | struct bfa_ioc_drv_stats stats; | 171 | struct bfa_ioc_drv_stats stats; |
185 | bool auto_recover; | ||
186 | bool fcmode; | 172 | bool fcmode; |
187 | bool ctdev; | 173 | bool ctdev; |
188 | bool cna; | 174 | bool cna; |
@@ -195,6 +181,7 @@ struct bfa_ioc { | |||
195 | struct bfa_ioc_cbfn *cbfn; | 181 | struct bfa_ioc_cbfn *cbfn; |
196 | struct bfa_ioc_mbox_mod mbox_mod; | 182 | struct bfa_ioc_mbox_mod mbox_mod; |
197 | struct bfa_ioc_hwif *ioc_hwif; | 183 | struct bfa_ioc_hwif *ioc_hwif; |
184 | struct bfa_iocpf iocpf; | ||
198 | }; | 185 | }; |
199 | 186 | ||
200 | struct bfa_ioc_hwif { | 187 | struct bfa_ioc_hwif { |
@@ -205,8 +192,12 @@ struct bfa_ioc_hwif { | |||
205 | void (*ioc_map_port) (struct bfa_ioc *ioc); | 192 | void (*ioc_map_port) (struct bfa_ioc *ioc); |
206 | void (*ioc_isr_mode_set) (struct bfa_ioc *ioc, | 193 | void (*ioc_isr_mode_set) (struct bfa_ioc *ioc, |
207 | bool msix); | 194 | bool msix); |
208 | void (*ioc_notify_hbfail) (struct bfa_ioc *ioc); | 195 | void (*ioc_notify_fail) (struct bfa_ioc *ioc); |
209 | void (*ioc_ownership_reset) (struct bfa_ioc *ioc); | 196 | void (*ioc_ownership_reset) (struct bfa_ioc *ioc); |
197 | void (*ioc_sync_join) (struct bfa_ioc *ioc); | ||
198 | void (*ioc_sync_leave) (struct bfa_ioc *ioc); | ||
199 | void (*ioc_sync_ack) (struct bfa_ioc *ioc); | ||
200 | bool (*ioc_sync_complete) (struct bfa_ioc *ioc); | ||
210 | }; | 201 | }; |
211 | 202 | ||
212 | #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) | 203 | #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) |
@@ -271,7 +262,6 @@ void bfa_nw_ioc_enable(struct bfa_ioc *ioc); | |||
271 | void bfa_nw_ioc_disable(struct bfa_ioc *ioc); | 262 | void bfa_nw_ioc_disable(struct bfa_ioc *ioc); |
272 | 263 | ||
273 | void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc); | 264 | void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc); |
274 | |||
275 | void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr); | 265 | void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr); |
276 | void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc, | 266 | void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc, |
277 | struct bfa_ioc_hbfail_notify *notify); | 267 | struct bfa_ioc_hbfail_notify *notify); |
@@ -289,7 +279,8 @@ mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc); | |||
289 | */ | 279 | */ |
290 | void bfa_nw_ioc_timeout(void *ioc); | 280 | void bfa_nw_ioc_timeout(void *ioc); |
291 | void bfa_nw_ioc_hb_check(void *ioc); | 281 | void bfa_nw_ioc_hb_check(void *ioc); |
292 | void bfa_nw_ioc_sem_timeout(void *ioc); | 282 | void bfa_nw_iocpf_timeout(void *ioc); |
283 | void bfa_nw_iocpf_sem_timeout(void *ioc); | ||
293 | 284 | ||
294 | /* | 285 | /* |
295 | * F/W Image Size & Chunk | 286 | * F/W Image Size & Chunk |
diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c index 121cfd6d48b1..469997c4ffd1 100644 --- a/drivers/net/bna/bfa_ioc_ct.c +++ b/drivers/net/bna/bfa_ioc_ct.c | |||
@@ -22,6 +22,15 @@ | |||
22 | #include "bfi_ctreg.h" | 22 | #include "bfi_ctreg.h" |
23 | #include "bfa_defs.h" | 23 | #include "bfa_defs.h" |
24 | 24 | ||
25 | #define bfa_ioc_ct_sync_pos(__ioc) \ | ||
26 | ((u32) (1 << bfa_ioc_pcifn(__ioc))) | ||
27 | #define BFA_IOC_SYNC_REQD_SH 16 | ||
28 | #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff) | ||
29 | #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000) | ||
30 | #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH) | ||
31 | #define bfa_ioc_ct_sync_reqd_pos(__ioc) \ | ||
32 | (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH) | ||
33 | |||
25 | /* | 34 | /* |
26 | * forward declarations | 35 | * forward declarations |
27 | */ | 36 | */ |
@@ -30,8 +39,12 @@ static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc); | |||
30 | static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc); | 39 | static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc); |
31 | static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc); | 40 | static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc); |
32 | static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); | 41 | static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); |
33 | static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc); | 42 | static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc); |
34 | static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); | 43 | static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); |
44 | static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); | ||
45 | static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); | ||
46 | static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); | ||
47 | static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc); | ||
35 | static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode); | 48 | static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode); |
36 | 49 | ||
37 | static struct bfa_ioc_hwif nw_hwif_ct; | 50 | static struct bfa_ioc_hwif nw_hwif_ct; |
@@ -48,8 +61,12 @@ bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) | |||
48 | nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; | 61 | nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; |
49 | nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; | 62 | nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; |
50 | nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; | 63 | nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; |
51 | nw_hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail; | 64 | nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail; |
52 | nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; | 65 | nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; |
66 | nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join; | ||
67 | nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave; | ||
68 | nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack; | ||
69 | nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete; | ||
53 | 70 | ||
54 | ioc->ioc_hwif = &nw_hwif_ct; | 71 | ioc->ioc_hwif = &nw_hwif_ct; |
55 | } | 72 | } |
@@ -86,6 +103,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) | |||
86 | if (usecnt == 0) { | 103 | if (usecnt == 0) { |
87 | writel(1, ioc->ioc_regs.ioc_usage_reg); | 104 | writel(1, ioc->ioc_regs.ioc_usage_reg); |
88 | bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); | 105 | bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); |
106 | writel(0, ioc->ioc_regs.ioc_fail_sync); | ||
89 | return true; | 107 | return true; |
90 | } | 108 | } |
91 | 109 | ||
@@ -149,12 +167,14 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc) | |||
149 | * Notify other functions on HB failure. | 167 | * Notify other functions on HB failure. |
150 | */ | 168 | */ |
151 | static void | 169 | static void |
152 | bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc) | 170 | bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) |
153 | { | 171 | { |
154 | if (ioc->cna) { | 172 | if (ioc->cna) { |
155 | writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); | 173 | writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); |
174 | writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); | ||
156 | /* Wait for halt to take effect */ | 175 | /* Wait for halt to take effect */ |
157 | readl(ioc->ioc_regs.ll_halt); | 176 | readl(ioc->ioc_regs.ll_halt); |
177 | readl(ioc->ioc_regs.alt_ll_halt); | ||
158 | } else { | 178 | } else { |
159 | writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); | 179 | writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); |
160 | readl(ioc->ioc_regs.err_set); | 180 | readl(ioc->ioc_regs.err_set); |
@@ -206,15 +226,19 @@ bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) | |||
206 | if (ioc->port_id == 0) { | 226 | if (ioc->port_id == 0) { |
207 | ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; | 227 | ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; |
208 | ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; | 228 | ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; |
229 | ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; | ||
209 | ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn; | 230 | ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn; |
210 | ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu; | 231 | ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu; |
211 | ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; | 232 | ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; |
233 | ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; | ||
212 | } else { | 234 | } else { |
213 | ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); | 235 | ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); |
214 | ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); | 236 | ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); |
237 | ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; | ||
215 | ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn; | 238 | ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn; |
216 | ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu; | 239 | ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu; |
217 | ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; | 240 | ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; |
241 | ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; | ||
218 | } | 242 | } |
219 | 243 | ||
220 | /* | 244 | /* |
@@ -232,6 +256,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) | |||
232 | ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); | 256 | ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); |
233 | ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); | 257 | ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); |
234 | ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); | 258 | ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); |
259 | ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC); | ||
235 | 260 | ||
236 | /** | 261 | /** |
237 | * sram memory access | 262 | * sram memory access |
@@ -317,6 +342,77 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) | |||
317 | bfa_nw_ioc_hw_sem_release(ioc); | 342 | bfa_nw_ioc_hw_sem_release(ioc); |
318 | } | 343 | } |
319 | 344 | ||
345 | /** | ||
346 | * Synchronized IOC failure processing routines | ||
347 | */ | ||
348 | static void | ||
349 | bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) | ||
350 | { | ||
351 | u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); | ||
352 | u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); | ||
353 | |||
354 | writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); | ||
355 | } | ||
356 | |||
357 | static void | ||
358 | bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc) | ||
359 | { | ||
360 | u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); | ||
361 | u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | | ||
362 | bfa_ioc_ct_sync_pos(ioc); | ||
363 | |||
364 | writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); | ||
365 | } | ||
366 | |||
367 | static void | ||
368 | bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc) | ||
369 | { | ||
370 | u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); | ||
371 | |||
372 | writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync); | ||
373 | } | ||
374 | |||
375 | static bool | ||
376 | bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc) | ||
377 | { | ||
378 | u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); | ||
379 | u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); | ||
380 | u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32); | ||
381 | u32 tmp_ackd; | ||
382 | |||
383 | if (sync_ackd == 0) | ||
384 | return true; | ||
385 | |||
386 | /** | ||
387 | * The check below is to see whether any other PCI fn | ||
388 | * has reinitialized the ASIC (reset sync_ackd bits) | ||
389 | * and failed again while this IOC was waiting for hw | ||
390 | * semaphore (in bfa_iocpf_sm_semwait()). | ||
391 | */ | ||
392 | tmp_ackd = sync_ackd; | ||
393 | if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && | ||
394 | !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) | ||
395 | sync_ackd |= bfa_ioc_ct_sync_pos(ioc); | ||
396 | |||
397 | if (sync_reqd == sync_ackd) { | ||
398 | writel(bfa_ioc_ct_clear_sync_ackd(r32), | ||
399 | ioc->ioc_regs.ioc_fail_sync); | ||
400 | writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); | ||
401 | writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); | ||
402 | return true; | ||
403 | } | ||
404 | |||
405 | /** | ||
406 | * If another PCI fn reinitialized and failed again while | ||
407 | * this IOC was waiting for hw sem, the sync_ackd bit for | ||
408 | * this IOC need to be set again to allow reinitialization. | ||
409 | */ | ||
410 | if (tmp_ackd != sync_ackd) | ||
411 | writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); | ||
412 | |||
413 | return false; | ||
414 | } | ||
415 | |||
320 | static enum bfa_status | 416 | static enum bfa_status |
321 | bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode) | 417 | bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode) |
322 | { | 418 | { |
diff --git a/drivers/net/bna/bfi_ctreg.h b/drivers/net/bna/bfi_ctreg.h index 404ea351d4a1..5130d7918660 100644 --- a/drivers/net/bna/bfi_ctreg.h +++ b/drivers/net/bna/bfi_ctreg.h | |||
@@ -535,6 +535,7 @@ enum { | |||
535 | #define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG | 535 | #define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG |
536 | #define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG | 536 | #define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG |
537 | #define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG | 537 | #define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG |
538 | #define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG | ||
538 | 539 | ||
539 | #define CPE_DEPTH_Q(__n) \ | 540 | #define CPE_DEPTH_Q(__n) \ |
540 | (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0)) | 541 | (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0)) |
@@ -552,22 +553,30 @@ enum { | |||
552 | (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0)) | 553 | (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0)) |
553 | #define RME_CI_PTR_Q(__n) \ | 554 | #define RME_CI_PTR_Q(__n) \ |
554 | (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0)) | 555 | (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0)) |
555 | #define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \ | 556 | #define HQM_QSET_RXQ_DRBL_P0(__n) \ |
556 | * (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0)) | 557 | (HQM_QSET0_RXQ_DRBL_P0 + (__n) * \ |
557 | #define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \ | 558 | (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0)) |
558 | * (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0)) | 559 | #define HQM_QSET_TXQ_DRBL_P0(__n) \ |
559 | #define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \ | 560 | (HQM_QSET0_TXQ_DRBL_P0 + (__n) * \ |
560 | * (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0)) | 561 | (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0)) |
561 | #define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \ | 562 | #define HQM_QSET_IB_DRBL_1_P0(__n) \ |
562 | * (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0)) | 563 | (HQM_QSET0_IB_DRBL_1_P0 + (__n) * \ |
563 | #define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \ | 564 | (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0)) |
564 | * (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1)) | 565 | #define HQM_QSET_IB_DRBL_2_P0(__n) \ |
565 | #define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \ | 566 | (HQM_QSET0_IB_DRBL_2_P0 + (__n) * \ |
566 | * (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1)) | 567 | (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0)) |
567 | #define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \ | 568 | #define HQM_QSET_RXQ_DRBL_P1(__n) \ |
568 | * (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1)) | 569 | (HQM_QSET0_RXQ_DRBL_P1 + (__n) * \ |
569 | #define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \ | 570 | (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1)) |
570 | * (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1)) | 571 | #define HQM_QSET_TXQ_DRBL_P1(__n) \ |
572 | (HQM_QSET0_TXQ_DRBL_P1 + (__n) * \ | ||
573 | (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1)) | ||
574 | #define HQM_QSET_IB_DRBL_1_P1(__n) \ | ||
575 | (HQM_QSET0_IB_DRBL_1_P1 + (__n) * \ | ||
576 | (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1)) | ||
577 | #define HQM_QSET_IB_DRBL_2_P1(__n) \ | ||
578 | (HQM_QSET0_IB_DRBL_2_P1 + (__n) * \ | ||
579 | (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1)) | ||
571 | 580 | ||
572 | #define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) | 581 | #define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) |
573 | #define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) | 582 | #define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) |
diff --git a/drivers/net/bna/bna.h b/drivers/net/bna/bna.h index df6676bbc84e..a287f89b0289 100644 --- a/drivers/net/bna/bna.h +++ b/drivers/net/bna/bna.h | |||
@@ -32,8 +32,6 @@ extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX]; | |||
32 | /* Log string size */ | 32 | /* Log string size */ |
33 | #define BNA_MESSAGE_SIZE 256 | 33 | #define BNA_MESSAGE_SIZE 256 |
34 | 34 | ||
35 | #define bna_device_timer(_dev) bfa_timer_beat(&((_dev)->timer_mod)) | ||
36 | |||
37 | /* MBOX API for PORT, TX, RX */ | 35 | /* MBOX API for PORT, TX, RX */ |
38 | #define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg) \ | 36 | #define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg) \ |
39 | do { \ | 37 | do { \ |
@@ -390,8 +388,8 @@ void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe); | |||
390 | 388 | ||
391 | /* API for RX */ | 389 | /* API for RX */ |
392 | int bna_port_mtu_get(struct bna_port *port); | 390 | int bna_port_mtu_get(struct bna_port *port); |
393 | void bna_llport_admin_up(struct bna_llport *llport); | 391 | void bna_llport_rx_started(struct bna_llport *llport); |
394 | void bna_llport_admin_down(struct bna_llport *llport); | 392 | void bna_llport_rx_stopped(struct bna_llport *llport); |
395 | 393 | ||
396 | /* API for BNAD */ | 394 | /* API for BNAD */ |
397 | void bna_port_enable(struct bna_port *port); | 395 | void bna_port_enable(struct bna_port *port); |
diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c index 07b26598546e..e1527472b961 100644 --- a/drivers/net/bna/bna_ctrl.c +++ b/drivers/net/bna/bna_ctrl.c | |||
@@ -59,14 +59,70 @@ bna_port_cb_link_down(struct bna_port *port, int status) | |||
59 | port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN); | 59 | port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN); |
60 | } | 60 | } |
61 | 61 | ||
62 | static inline int | ||
63 | llport_can_be_up(struct bna_llport *llport) | ||
64 | { | ||
65 | int ready = 0; | ||
66 | if (llport->type == BNA_PORT_T_REGULAR) | ||
67 | ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) && | ||
68 | (llport->flags & BNA_LLPORT_F_RX_STARTED) && | ||
69 | (llport->flags & BNA_LLPORT_F_PORT_ENABLED)); | ||
70 | else | ||
71 | ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) && | ||
72 | (llport->flags & BNA_LLPORT_F_RX_STARTED) && | ||
73 | !(llport->flags & BNA_LLPORT_F_PORT_ENABLED)); | ||
74 | return ready; | ||
75 | } | ||
76 | |||
77 | #define llport_is_up llport_can_be_up | ||
78 | |||
79 | enum bna_llport_event { | ||
80 | LLPORT_E_START = 1, | ||
81 | LLPORT_E_STOP = 2, | ||
82 | LLPORT_E_FAIL = 3, | ||
83 | LLPORT_E_UP = 4, | ||
84 | LLPORT_E_DOWN = 5, | ||
85 | LLPORT_E_FWRESP_UP_OK = 6, | ||
86 | LLPORT_E_FWRESP_UP_FAIL = 7, | ||
87 | LLPORT_E_FWRESP_DOWN = 8 | ||
88 | }; | ||
89 | |||
90 | static void | ||
91 | bna_llport_cb_port_enabled(struct bna_llport *llport) | ||
92 | { | ||
93 | llport->flags |= BNA_LLPORT_F_PORT_ENABLED; | ||
94 | |||
95 | if (llport_can_be_up(llport)) | ||
96 | bfa_fsm_send_event(llport, LLPORT_E_UP); | ||
97 | } | ||
98 | |||
99 | static void | ||
100 | bna_llport_cb_port_disabled(struct bna_llport *llport) | ||
101 | { | ||
102 | int llport_up = llport_is_up(llport); | ||
103 | |||
104 | llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED; | ||
105 | |||
106 | if (llport_up) | ||
107 | bfa_fsm_send_event(llport, LLPORT_E_DOWN); | ||
108 | } | ||
109 | |||
62 | /** | 110 | /** |
63 | * MBOX | 111 | * MBOX |
64 | */ | 112 | */ |
65 | static int | 113 | static int |
66 | bna_is_aen(u8 msg_id) | 114 | bna_is_aen(u8 msg_id) |
67 | { | 115 | { |
68 | return msg_id == BFI_LL_I2H_LINK_DOWN_AEN || | 116 | switch (msg_id) { |
69 | msg_id == BFI_LL_I2H_LINK_UP_AEN; | 117 | case BFI_LL_I2H_LINK_DOWN_AEN: |
118 | case BFI_LL_I2H_LINK_UP_AEN: | ||
119 | case BFI_LL_I2H_PORT_ENABLE_AEN: | ||
120 | case BFI_LL_I2H_PORT_DISABLE_AEN: | ||
121 | return 1; | ||
122 | |||
123 | default: | ||
124 | return 0; | ||
125 | } | ||
70 | } | 126 | } |
71 | 127 | ||
72 | static void | 128 | static void |
@@ -81,6 +137,12 @@ bna_mbox_aen_callback(struct bna *bna, struct bfi_mbmsg *msg) | |||
81 | case BFI_LL_I2H_LINK_DOWN_AEN: | 137 | case BFI_LL_I2H_LINK_DOWN_AEN: |
82 | bna_port_cb_link_down(&bna->port, aen->reason); | 138 | bna_port_cb_link_down(&bna->port, aen->reason); |
83 | break; | 139 | break; |
140 | case BFI_LL_I2H_PORT_ENABLE_AEN: | ||
141 | bna_llport_cb_port_enabled(&bna->port.llport); | ||
142 | break; | ||
143 | case BFI_LL_I2H_PORT_DISABLE_AEN: | ||
144 | bna_llport_cb_port_disabled(&bna->port.llport); | ||
145 | break; | ||
84 | default: | 146 | default: |
85 | break; | 147 | break; |
86 | } | 148 | } |
@@ -251,16 +313,6 @@ static void bna_llport_start(struct bna_llport *llport); | |||
251 | static void bna_llport_stop(struct bna_llport *llport); | 313 | static void bna_llport_stop(struct bna_llport *llport); |
252 | static void bna_llport_fail(struct bna_llport *llport); | 314 | static void bna_llport_fail(struct bna_llport *llport); |
253 | 315 | ||
254 | enum bna_llport_event { | ||
255 | LLPORT_E_START = 1, | ||
256 | LLPORT_E_STOP = 2, | ||
257 | LLPORT_E_FAIL = 3, | ||
258 | LLPORT_E_UP = 4, | ||
259 | LLPORT_E_DOWN = 5, | ||
260 | LLPORT_E_FWRESP_UP = 6, | ||
261 | LLPORT_E_FWRESP_DOWN = 7 | ||
262 | }; | ||
263 | |||
264 | enum bna_llport_state { | 316 | enum bna_llport_state { |
265 | BNA_LLPORT_STOPPED = 1, | 317 | BNA_LLPORT_STOPPED = 1, |
266 | BNA_LLPORT_DOWN = 2, | 318 | BNA_LLPORT_DOWN = 2, |
@@ -320,7 +372,7 @@ bna_llport_sm_stopped(struct bna_llport *llport, | |||
320 | /* No-op */ | 372 | /* No-op */ |
321 | break; | 373 | break; |
322 | 374 | ||
323 | case LLPORT_E_FWRESP_UP: | 375 | case LLPORT_E_FWRESP_UP_OK: |
324 | case LLPORT_E_FWRESP_DOWN: | 376 | case LLPORT_E_FWRESP_DOWN: |
325 | /** | 377 | /** |
326 | * These events are received due to flushing of mbox when | 378 | * These events are received due to flushing of mbox when |
@@ -366,6 +418,7 @@ bna_llport_sm_down(struct bna_llport *llport, | |||
366 | static void | 418 | static void |
367 | bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport) | 419 | bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport) |
368 | { | 420 | { |
421 | BUG_ON(!llport_can_be_up(llport)); | ||
369 | /** | 422 | /** |
370 | * NOTE: Do not call bna_fw_llport_up() here. That will over step | 423 | * NOTE: Do not call bna_fw_llport_up() here. That will over step |
371 | * mbox due to down_resp_wait -> up_resp_wait transition on event | 424 | * mbox due to down_resp_wait -> up_resp_wait transition on event |
@@ -390,10 +443,14 @@ bna_llport_sm_up_resp_wait(struct bna_llport *llport, | |||
390 | bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait); | 443 | bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait); |
391 | break; | 444 | break; |
392 | 445 | ||
393 | case LLPORT_E_FWRESP_UP: | 446 | case LLPORT_E_FWRESP_UP_OK: |
394 | bfa_fsm_set_state(llport, bna_llport_sm_up); | 447 | bfa_fsm_set_state(llport, bna_llport_sm_up); |
395 | break; | 448 | break; |
396 | 449 | ||
450 | case LLPORT_E_FWRESP_UP_FAIL: | ||
451 | bfa_fsm_set_state(llport, bna_llport_sm_down); | ||
452 | break; | ||
453 | |||
397 | case LLPORT_E_FWRESP_DOWN: | 454 | case LLPORT_E_FWRESP_DOWN: |
398 | /* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */ | 455 | /* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */ |
399 | bna_fw_llport_up(llport); | 456 | bna_fw_llport_up(llport); |
@@ -431,11 +488,12 @@ bna_llport_sm_down_resp_wait(struct bna_llport *llport, | |||
431 | bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait); | 488 | bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait); |
432 | break; | 489 | break; |
433 | 490 | ||
434 | case LLPORT_E_FWRESP_UP: | 491 | case LLPORT_E_FWRESP_UP_OK: |
435 | /* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */ | 492 | /* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */ |
436 | bna_fw_llport_down(llport); | 493 | bna_fw_llport_down(llport); |
437 | break; | 494 | break; |
438 | 495 | ||
496 | case LLPORT_E_FWRESP_UP_FAIL: | ||
439 | case LLPORT_E_FWRESP_DOWN: | 497 | case LLPORT_E_FWRESP_DOWN: |
440 | bfa_fsm_set_state(llport, bna_llport_sm_down); | 498 | bfa_fsm_set_state(llport, bna_llport_sm_down); |
441 | break; | 499 | break; |
@@ -496,11 +554,12 @@ bna_llport_sm_last_resp_wait(struct bna_llport *llport, | |||
496 | /* No-op */ | 554 | /* No-op */ |
497 | break; | 555 | break; |
498 | 556 | ||
499 | case LLPORT_E_FWRESP_UP: | 557 | case LLPORT_E_FWRESP_UP_OK: |
500 | /* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */ | 558 | /* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */ |
501 | bna_fw_llport_down(llport); | 559 | bna_fw_llport_down(llport); |
502 | break; | 560 | break; |
503 | 561 | ||
562 | case LLPORT_E_FWRESP_UP_FAIL: | ||
504 | case LLPORT_E_FWRESP_DOWN: | 563 | case LLPORT_E_FWRESP_DOWN: |
505 | bfa_fsm_set_state(llport, bna_llport_sm_stopped); | 564 | bfa_fsm_set_state(llport, bna_llport_sm_stopped); |
506 | break; | 565 | break; |
@@ -541,7 +600,14 @@ bna_fw_cb_llport_up(void *arg, int status) | |||
541 | struct bna_llport *llport = (struct bna_llport *)arg; | 600 | struct bna_llport *llport = (struct bna_llport *)arg; |
542 | 601 | ||
543 | bfa_q_qe_init(&llport->mbox_qe.qe); | 602 | bfa_q_qe_init(&llport->mbox_qe.qe); |
544 | bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP); | 603 | if (status == BFI_LL_CMD_FAIL) { |
604 | if (llport->type == BNA_PORT_T_REGULAR) | ||
605 | llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED; | ||
606 | else | ||
607 | llport->flags &= ~BNA_LLPORT_F_ADMIN_UP; | ||
608 | bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_FAIL); | ||
609 | } else | ||
610 | bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_OK); | ||
545 | } | 611 | } |
546 | 612 | ||
547 | static void | 613 | static void |
@@ -588,13 +654,14 @@ bna_port_cb_llport_stopped(struct bna_port *port, | |||
588 | static void | 654 | static void |
589 | bna_llport_init(struct bna_llport *llport, struct bna *bna) | 655 | bna_llport_init(struct bna_llport *llport, struct bna *bna) |
590 | { | 656 | { |
591 | llport->flags |= BNA_LLPORT_F_ENABLED; | 657 | llport->flags |= BNA_LLPORT_F_ADMIN_UP; |
658 | llport->flags |= BNA_LLPORT_F_PORT_ENABLED; | ||
592 | llport->type = BNA_PORT_T_REGULAR; | 659 | llport->type = BNA_PORT_T_REGULAR; |
593 | llport->bna = bna; | 660 | llport->bna = bna; |
594 | 661 | ||
595 | llport->link_status = BNA_LINK_DOWN; | 662 | llport->link_status = BNA_LINK_DOWN; |
596 | 663 | ||
597 | llport->admin_up_count = 0; | 664 | llport->rx_started_count = 0; |
598 | 665 | ||
599 | llport->stop_cbfn = NULL; | 666 | llport->stop_cbfn = NULL; |
600 | 667 | ||
@@ -606,7 +673,8 @@ bna_llport_init(struct bna_llport *llport, struct bna *bna) | |||
606 | static void | 673 | static void |
607 | bna_llport_uninit(struct bna_llport *llport) | 674 | bna_llport_uninit(struct bna_llport *llport) |
608 | { | 675 | { |
609 | llport->flags &= ~BNA_LLPORT_F_ENABLED; | 676 | llport->flags &= ~BNA_LLPORT_F_ADMIN_UP; |
677 | llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED; | ||
610 | 678 | ||
611 | llport->bna = NULL; | 679 | llport->bna = NULL; |
612 | } | 680 | } |
@@ -628,6 +696,8 @@ bna_llport_stop(struct bna_llport *llport) | |||
628 | static void | 696 | static void |
629 | bna_llport_fail(struct bna_llport *llport) | 697 | bna_llport_fail(struct bna_llport *llport) |
630 | { | 698 | { |
699 | /* Reset the physical port status to enabled */ | ||
700 | llport->flags |= BNA_LLPORT_F_PORT_ENABLED; | ||
631 | bfa_fsm_send_event(llport, LLPORT_E_FAIL); | 701 | bfa_fsm_send_event(llport, LLPORT_E_FAIL); |
632 | } | 702 | } |
633 | 703 | ||
@@ -638,25 +708,31 @@ bna_llport_state_get(struct bna_llport *llport) | |||
638 | } | 708 | } |
639 | 709 | ||
640 | void | 710 | void |
641 | bna_llport_admin_up(struct bna_llport *llport) | 711 | bna_llport_rx_started(struct bna_llport *llport) |
642 | { | 712 | { |
643 | llport->admin_up_count++; | 713 | llport->rx_started_count++; |
644 | 714 | ||
645 | if (llport->admin_up_count == 1) { | 715 | if (llport->rx_started_count == 1) { |
646 | llport->flags |= BNA_LLPORT_F_RX_ENABLED; | 716 | |
647 | if (llport->flags & BNA_LLPORT_F_ENABLED) | 717 | llport->flags |= BNA_LLPORT_F_RX_STARTED; |
718 | |||
719 | if (llport_can_be_up(llport)) | ||
648 | bfa_fsm_send_event(llport, LLPORT_E_UP); | 720 | bfa_fsm_send_event(llport, LLPORT_E_UP); |
649 | } | 721 | } |
650 | } | 722 | } |
651 | 723 | ||
652 | void | 724 | void |
653 | bna_llport_admin_down(struct bna_llport *llport) | 725 | bna_llport_rx_stopped(struct bna_llport *llport) |
654 | { | 726 | { |
655 | llport->admin_up_count--; | 727 | int llport_up = llport_is_up(llport); |
728 | |||
729 | llport->rx_started_count--; | ||
656 | 730 | ||
657 | if (llport->admin_up_count == 0) { | 731 | if (llport->rx_started_count == 0) { |
658 | llport->flags &= ~BNA_LLPORT_F_RX_ENABLED; | 732 | |
659 | if (llport->flags & BNA_LLPORT_F_ENABLED) | 733 | llport->flags &= ~BNA_LLPORT_F_RX_STARTED; |
734 | |||
735 | if (llport_up) | ||
660 | bfa_fsm_send_event(llport, LLPORT_E_DOWN); | 736 | bfa_fsm_send_event(llport, LLPORT_E_DOWN); |
661 | } | 737 | } |
662 | } | 738 | } |
@@ -2056,37 +2132,6 @@ rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status) | |||
2056 | bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); | 2132 | bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); |
2057 | } | 2133 | } |
2058 | 2134 | ||
2059 | static void | ||
2060 | __rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status) | ||
2061 | { | ||
2062 | struct bna_rx_fndb_ram *rx_fndb_ram; | ||
2063 | u32 ctrl_flags; | ||
2064 | int i; | ||
2065 | |||
2066 | rx_fndb_ram = (struct bna_rx_fndb_ram *) | ||
2067 | BNA_GET_MEM_BASE_ADDR(rxf->rx->bna->pcidev.pci_bar_kva, | ||
2068 | RX_FNDB_RAM_BASE_OFFSET); | ||
2069 | |||
2070 | for (i = 0; i < BFI_MAX_RXF; i++) { | ||
2071 | if (status == BNA_STATUS_T_ENABLED) { | ||
2072 | if (i == rxf->rxf_id) | ||
2073 | continue; | ||
2074 | |||
2075 | ctrl_flags = | ||
2076 | readl(&rx_fndb_ram[i].control_flags); | ||
2077 | ctrl_flags |= BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE; | ||
2078 | writel(ctrl_flags, | ||
2079 | &rx_fndb_ram[i].control_flags); | ||
2080 | } else { | ||
2081 | ctrl_flags = | ||
2082 | readl(&rx_fndb_ram[i].control_flags); | ||
2083 | ctrl_flags &= ~BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE; | ||
2084 | writel(ctrl_flags, | ||
2085 | &rx_fndb_ram[i].control_flags); | ||
2086 | } | ||
2087 | } | ||
2088 | } | ||
2089 | |||
2090 | int | 2135 | int |
2091 | rxf_process_packet_filter_ucast(struct bna_rxf *rxf) | 2136 | rxf_process_packet_filter_ucast(struct bna_rxf *rxf) |
2092 | { | 2137 | { |
@@ -2153,46 +2198,6 @@ rxf_process_packet_filter_promisc(struct bna_rxf *rxf) | |||
2153 | } | 2198 | } |
2154 | 2199 | ||
2155 | int | 2200 | int |
2156 | rxf_process_packet_filter_default(struct bna_rxf *rxf) | ||
2157 | { | ||
2158 | struct bna *bna = rxf->rx->bna; | ||
2159 | |||
2160 | /* Enable/disable default mode */ | ||
2161 | if (is_default_enable(rxf->rxmode_pending, | ||
2162 | rxf->rxmode_pending_bitmask)) { | ||
2163 | /* move default configuration from pending -> active */ | ||
2164 | default_inactive(rxf->rxmode_pending, | ||
2165 | rxf->rxmode_pending_bitmask); | ||
2166 | rxf->rxmode_active |= BNA_RXMODE_DEFAULT; | ||
2167 | |||
2168 | /* Disable VLAN filter to allow all VLANs */ | ||
2169 | __rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED); | ||
2170 | /* Redirect all other RxF vlan filtering to this one */ | ||
2171 | __rxf_default_function_config(rxf, BNA_STATUS_T_ENABLED); | ||
2172 | rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ, | ||
2173 | BNA_STATUS_T_ENABLED); | ||
2174 | return 1; | ||
2175 | } else if (is_default_disable(rxf->rxmode_pending, | ||
2176 | rxf->rxmode_pending_bitmask)) { | ||
2177 | /* move default configuration from pending -> active */ | ||
2178 | default_inactive(rxf->rxmode_pending, | ||
2179 | rxf->rxmode_pending_bitmask); | ||
2180 | rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT; | ||
2181 | bna->rxf_default_id = BFI_MAX_RXF; | ||
2182 | |||
2183 | /* Revert VLAN filter */ | ||
2184 | __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status); | ||
2185 | /* Stop RxF vlan filter table redirection */ | ||
2186 | __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED); | ||
2187 | rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ, | ||
2188 | BNA_STATUS_T_DISABLED); | ||
2189 | return 1; | ||
2190 | } | ||
2191 | |||
2192 | return 0; | ||
2193 | } | ||
2194 | |||
2195 | int | ||
2196 | rxf_process_packet_filter_allmulti(struct bna_rxf *rxf) | 2201 | rxf_process_packet_filter_allmulti(struct bna_rxf *rxf) |
2197 | { | 2202 | { |
2198 | /* Enable/disable allmulti mode */ | 2203 | /* Enable/disable allmulti mode */ |
@@ -2289,48 +2294,6 @@ rxf_clear_packet_filter_promisc(struct bna_rxf *rxf) | |||
2289 | } | 2294 | } |
2290 | 2295 | ||
2291 | int | 2296 | int |
2292 | rxf_clear_packet_filter_default(struct bna_rxf *rxf) | ||
2293 | { | ||
2294 | struct bna *bna = rxf->rx->bna; | ||
2295 | |||
2296 | /* 8. Execute pending default mode disable command */ | ||
2297 | if (is_default_disable(rxf->rxmode_pending, | ||
2298 | rxf->rxmode_pending_bitmask)) { | ||
2299 | /* move default configuration from pending -> active */ | ||
2300 | default_inactive(rxf->rxmode_pending, | ||
2301 | rxf->rxmode_pending_bitmask); | ||
2302 | rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT; | ||
2303 | bna->rxf_default_id = BFI_MAX_RXF; | ||
2304 | |||
2305 | /* Revert VLAN filter */ | ||
2306 | __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status); | ||
2307 | /* Stop RxF vlan filter table redirection */ | ||
2308 | __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED); | ||
2309 | rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ, | ||
2310 | BNA_STATUS_T_DISABLED); | ||
2311 | return 1; | ||
2312 | } | ||
2313 | |||
2314 | /* 9. Clear active default mode; move it to pending enable */ | ||
2315 | if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) { | ||
2316 | /* move default configuration from active -> pending */ | ||
2317 | default_enable(rxf->rxmode_pending, | ||
2318 | rxf->rxmode_pending_bitmask); | ||
2319 | rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT; | ||
2320 | |||
2321 | /* Revert VLAN filter */ | ||
2322 | __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status); | ||
2323 | /* Stop RxF vlan filter table redirection */ | ||
2324 | __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED); | ||
2325 | rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ, | ||
2326 | BNA_STATUS_T_DISABLED); | ||
2327 | return 1; | ||
2328 | } | ||
2329 | |||
2330 | return 0; | ||
2331 | } | ||
2332 | |||
2333 | int | ||
2334 | rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf) | 2297 | rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf) |
2335 | { | 2298 | { |
2336 | /* 10. Execute pending allmulti mode disable command */ | 2299 | /* 10. Execute pending allmulti mode disable command */ |
@@ -2405,28 +2368,6 @@ rxf_reset_packet_filter_promisc(struct bna_rxf *rxf) | |||
2405 | } | 2368 | } |
2406 | 2369 | ||
2407 | void | 2370 | void |
2408 | rxf_reset_packet_filter_default(struct bna_rxf *rxf) | ||
2409 | { | ||
2410 | struct bna *bna = rxf->rx->bna; | ||
2411 | |||
2412 | /* 8. Clear pending default mode disable */ | ||
2413 | if (is_default_disable(rxf->rxmode_pending, | ||
2414 | rxf->rxmode_pending_bitmask)) { | ||
2415 | default_inactive(rxf->rxmode_pending, | ||
2416 | rxf->rxmode_pending_bitmask); | ||
2417 | rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT; | ||
2418 | bna->rxf_default_id = BFI_MAX_RXF; | ||
2419 | } | ||
2420 | |||
2421 | /* 9. Move default mode config from active -> pending */ | ||
2422 | if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) { | ||
2423 | default_enable(rxf->rxmode_pending, | ||
2424 | rxf->rxmode_pending_bitmask); | ||
2425 | rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT; | ||
2426 | } | ||
2427 | } | ||
2428 | |||
2429 | void | ||
2430 | rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf) | 2371 | rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf) |
2431 | { | 2372 | { |
2432 | /* 10. Clear pending allmulti mode disable */ | 2373 | /* 10. Clear pending allmulti mode disable */ |
@@ -2523,76 +2464,6 @@ rxf_promisc_disable(struct bna_rxf *rxf) | |||
2523 | * 1 = need h/w change | 2464 | * 1 = need h/w change |
2524 | */ | 2465 | */ |
2525 | static int | 2466 | static int |
2526 | rxf_default_enable(struct bna_rxf *rxf) | ||
2527 | { | ||
2528 | struct bna *bna = rxf->rx->bna; | ||
2529 | int ret = 0; | ||
2530 | |||
2531 | /* There can not be any pending disable command */ | ||
2532 | |||
2533 | /* Do nothing if pending enable or already enabled */ | ||
2534 | if (is_default_enable(rxf->rxmode_pending, | ||
2535 | rxf->rxmode_pending_bitmask) || | ||
2536 | (rxf->rxmode_active & BNA_RXMODE_DEFAULT)) { | ||
2537 | /* Schedule enable */ | ||
2538 | } else { | ||
2539 | /* Default mode should not be active in the system */ | ||
2540 | default_enable(rxf->rxmode_pending, | ||
2541 | rxf->rxmode_pending_bitmask); | ||
2542 | bna->rxf_default_id = rxf->rxf_id; | ||
2543 | ret = 1; | ||
2544 | } | ||
2545 | |||
2546 | return ret; | ||
2547 | } | ||
2548 | |||
2549 | /** | ||
2550 | * Should only be called by bna_rxf_mode_set. | ||
2551 | * Helps deciding if h/w configuration is needed or not. | ||
2552 | * Returns: | ||
2553 | * 0 = no h/w change | ||
2554 | * 1 = need h/w change | ||
2555 | */ | ||
2556 | static int | ||
2557 | rxf_default_disable(struct bna_rxf *rxf) | ||
2558 | { | ||
2559 | struct bna *bna = rxf->rx->bna; | ||
2560 | int ret = 0; | ||
2561 | |||
2562 | /* There can not be any pending disable */ | ||
2563 | |||
2564 | /* Turn off pending enable command , if any */ | ||
2565 | if (is_default_enable(rxf->rxmode_pending, | ||
2566 | rxf->rxmode_pending_bitmask)) { | ||
2567 | /* Promisc mode should not be active */ | ||
2568 | /* system default state should be pending */ | ||
2569 | default_inactive(rxf->rxmode_pending, | ||
2570 | rxf->rxmode_pending_bitmask); | ||
2571 | /* Remove the default state from the system */ | ||
2572 | bna->rxf_default_id = BFI_MAX_RXF; | ||
2573 | |||
2574 | /* Schedule disable */ | ||
2575 | } else if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) { | ||
2576 | /* Default mode should be active in the system */ | ||
2577 | default_disable(rxf->rxmode_pending, | ||
2578 | rxf->rxmode_pending_bitmask); | ||
2579 | ret = 1; | ||
2580 | |||
2581 | /* Do nothing if already disabled */ | ||
2582 | } else { | ||
2583 | } | ||
2584 | |||
2585 | return ret; | ||
2586 | } | ||
2587 | |||
2588 | /** | ||
2589 | * Should only be called by bna_rxf_mode_set. | ||
2590 | * Helps deciding if h/w configuration is needed or not. | ||
2591 | * Returns: | ||
2592 | * 0 = no h/w change | ||
2593 | * 1 = need h/w change | ||
2594 | */ | ||
2595 | static int | ||
2596 | rxf_allmulti_enable(struct bna_rxf *rxf) | 2467 | rxf_allmulti_enable(struct bna_rxf *rxf) |
2597 | { | 2468 | { |
2598 | int ret = 0; | 2469 | int ret = 0; |
@@ -2654,38 +2525,13 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, | |||
2654 | struct bna_rxf *rxf = &rx->rxf; | 2525 | struct bna_rxf *rxf = &rx->rxf; |
2655 | int need_hw_config = 0; | 2526 | int need_hw_config = 0; |
2656 | 2527 | ||
2657 | /* Error checks */ | 2528 | /* Process the commands */ |
2658 | 2529 | ||
2659 | if (is_promisc_enable(new_mode, bitmask)) { | 2530 | if (is_promisc_enable(new_mode, bitmask)) { |
2660 | /* If promisc mode is already enabled elsewhere in the system */ | 2531 | /* If promisc mode is already enabled elsewhere in the system */ |
2661 | if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) && | 2532 | if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) && |
2662 | (rx->bna->rxf_promisc_id != rxf->rxf_id)) | 2533 | (rx->bna->rxf_promisc_id != rxf->rxf_id)) |
2663 | goto err_return; | 2534 | goto err_return; |
2664 | |||
2665 | /* If default mode is already enabled in the system */ | ||
2666 | if (rx->bna->rxf_default_id != BFI_MAX_RXF) | ||
2667 | goto err_return; | ||
2668 | |||
2669 | /* Trying to enable promiscuous and default mode together */ | ||
2670 | if (is_default_enable(new_mode, bitmask)) | ||
2671 | goto err_return; | ||
2672 | } | ||
2673 | |||
2674 | if (is_default_enable(new_mode, bitmask)) { | ||
2675 | /* If default mode is already enabled elsewhere in the system */ | ||
2676 | if ((rx->bna->rxf_default_id != BFI_MAX_RXF) && | ||
2677 | (rx->bna->rxf_default_id != rxf->rxf_id)) { | ||
2678 | goto err_return; | ||
2679 | } | ||
2680 | |||
2681 | /* If promiscuous mode is already enabled in the system */ | ||
2682 | if (rx->bna->rxf_promisc_id != BFI_MAX_RXF) | ||
2683 | goto err_return; | ||
2684 | } | ||
2685 | |||
2686 | /* Process the commands */ | ||
2687 | |||
2688 | if (is_promisc_enable(new_mode, bitmask)) { | ||
2689 | if (rxf_promisc_enable(rxf)) | 2535 | if (rxf_promisc_enable(rxf)) |
2690 | need_hw_config = 1; | 2536 | need_hw_config = 1; |
2691 | } else if (is_promisc_disable(new_mode, bitmask)) { | 2537 | } else if (is_promisc_disable(new_mode, bitmask)) { |
@@ -2693,14 +2539,6 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, | |||
2693 | need_hw_config = 1; | 2539 | need_hw_config = 1; |
2694 | } | 2540 | } |
2695 | 2541 | ||
2696 | if (is_default_enable(new_mode, bitmask)) { | ||
2697 | if (rxf_default_enable(rxf)) | ||
2698 | need_hw_config = 1; | ||
2699 | } else if (is_default_disable(new_mode, bitmask)) { | ||
2700 | if (rxf_default_disable(rxf)) | ||
2701 | need_hw_config = 1; | ||
2702 | } | ||
2703 | |||
2704 | if (is_allmulti_enable(new_mode, bitmask)) { | 2542 | if (is_allmulti_enable(new_mode, bitmask)) { |
2705 | if (rxf_allmulti_enable(rxf)) | 2543 | if (rxf_allmulti_enable(rxf)) |
2706 | need_hw_config = 1; | 2544 | need_hw_config = 1; |
@@ -3126,7 +2964,6 @@ bna_init(struct bna *bna, struct bnad *bnad, struct bfa_pcidev *pcidev, | |||
3126 | 2964 | ||
3127 | bna_mcam_mod_init(&bna->mcam_mod, bna, res_info); | 2965 | bna_mcam_mod_init(&bna->mcam_mod, bna, res_info); |
3128 | 2966 | ||
3129 | bna->rxf_default_id = BFI_MAX_RXF; | ||
3130 | bna->rxf_promisc_id = BFI_MAX_RXF; | 2967 | bna->rxf_promisc_id = BFI_MAX_RXF; |
3131 | 2968 | ||
3132 | /* Mbox q element for posting stat request to f/w */ | 2969 | /* Mbox q element for posting stat request to f/w */ |
diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c index ad93fdb0f427..58c7664040dc 100644 --- a/drivers/net/bna/bna_txrx.c +++ b/drivers/net/bna/bna_txrx.c | |||
@@ -1226,8 +1226,7 @@ rxf_process_packet_filter_vlan(struct bna_rxf *rxf) | |||
1226 | /* Apply the VLAN filter */ | 1226 | /* Apply the VLAN filter */ |
1227 | if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) { | 1227 | if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) { |
1228 | rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING; | 1228 | rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING; |
1229 | if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC) && | 1229 | if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC)) |
1230 | !(rxf->rxmode_active & BNA_RXMODE_DEFAULT)) | ||
1231 | __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status); | 1230 | __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status); |
1232 | } | 1231 | } |
1233 | 1232 | ||
@@ -1276,9 +1275,6 @@ rxf_process_packet_filter(struct bna_rxf *rxf) | |||
1276 | if (rxf_process_packet_filter_promisc(rxf)) | 1275 | if (rxf_process_packet_filter_promisc(rxf)) |
1277 | return 1; | 1276 | return 1; |
1278 | 1277 | ||
1279 | if (rxf_process_packet_filter_default(rxf)) | ||
1280 | return 1; | ||
1281 | |||
1282 | if (rxf_process_packet_filter_allmulti(rxf)) | 1278 | if (rxf_process_packet_filter_allmulti(rxf)) |
1283 | return 1; | 1279 | return 1; |
1284 | 1280 | ||
@@ -1340,9 +1336,6 @@ rxf_clear_packet_filter(struct bna_rxf *rxf) | |||
1340 | if (rxf_clear_packet_filter_promisc(rxf)) | 1336 | if (rxf_clear_packet_filter_promisc(rxf)) |
1341 | return 1; | 1337 | return 1; |
1342 | 1338 | ||
1343 | if (rxf_clear_packet_filter_default(rxf)) | ||
1344 | return 1; | ||
1345 | |||
1346 | if (rxf_clear_packet_filter_allmulti(rxf)) | 1339 | if (rxf_clear_packet_filter_allmulti(rxf)) |
1347 | return 1; | 1340 | return 1; |
1348 | 1341 | ||
@@ -1389,8 +1382,6 @@ rxf_reset_packet_filter(struct bna_rxf *rxf) | |||
1389 | 1382 | ||
1390 | rxf_reset_packet_filter_promisc(rxf); | 1383 | rxf_reset_packet_filter_promisc(rxf); |
1391 | 1384 | ||
1392 | rxf_reset_packet_filter_default(rxf); | ||
1393 | |||
1394 | rxf_reset_packet_filter_allmulti(rxf); | 1385 | rxf_reset_packet_filter_allmulti(rxf); |
1395 | } | 1386 | } |
1396 | 1387 | ||
@@ -1441,12 +1432,16 @@ bna_rxf_init(struct bna_rxf *rxf, | |||
1441 | memset(rxf->vlan_filter_table, 0, | 1432 | memset(rxf->vlan_filter_table, 0, |
1442 | (sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32))); | 1433 | (sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32))); |
1443 | 1434 | ||
1435 | /* Set up VLAN 0 for pure priority tagged packets */ | ||
1436 | rxf->vlan_filter_table[0] |= 1; | ||
1437 | |||
1444 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | 1438 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); |
1445 | } | 1439 | } |
1446 | 1440 | ||
1447 | static void | 1441 | static void |
1448 | bna_rxf_uninit(struct bna_rxf *rxf) | 1442 | bna_rxf_uninit(struct bna_rxf *rxf) |
1449 | { | 1443 | { |
1444 | struct bna *bna = rxf->rx->bna; | ||
1450 | struct bna_mac *mac; | 1445 | struct bna_mac *mac; |
1451 | 1446 | ||
1452 | bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment); | 1447 | bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment); |
@@ -1473,6 +1468,27 @@ bna_rxf_uninit(struct bna_rxf *rxf) | |||
1473 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); | 1468 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); |
1474 | } | 1469 | } |
1475 | 1470 | ||
1471 | /* Turn off pending promisc mode */ | ||
1472 | if (is_promisc_enable(rxf->rxmode_pending, | ||
1473 | rxf->rxmode_pending_bitmask)) { | ||
1474 | /* system promisc state should be pending */ | ||
1475 | BUG_ON(!(bna->rxf_promisc_id == rxf->rxf_id)); | ||
1476 | promisc_inactive(rxf->rxmode_pending, | ||
1477 | rxf->rxmode_pending_bitmask); | ||
1478 | bna->rxf_promisc_id = BFI_MAX_RXF; | ||
1479 | } | ||
1480 | /* Promisc mode should not be active */ | ||
1481 | BUG_ON(rxf->rxmode_active & BNA_RXMODE_PROMISC); | ||
1482 | |||
1483 | /* Turn off pending all-multi mode */ | ||
1484 | if (is_allmulti_enable(rxf->rxmode_pending, | ||
1485 | rxf->rxmode_pending_bitmask)) { | ||
1486 | allmulti_inactive(rxf->rxmode_pending, | ||
1487 | rxf->rxmode_pending_bitmask); | ||
1488 | } | ||
1489 | /* Allmulti mode should not be active */ | ||
1490 | BUG_ON(rxf->rxmode_active & BNA_RXMODE_ALLMULTI); | ||
1491 | |||
1476 | rxf->rx = NULL; | 1492 | rxf->rx = NULL; |
1477 | } | 1493 | } |
1478 | 1494 | ||
@@ -1947,7 +1963,7 @@ bna_rx_sm_started_entry(struct bna_rx *rx) | |||
1947 | bna_ib_ack(&rxp->cq.ib->door_bell, 0); | 1963 | bna_ib_ack(&rxp->cq.ib->door_bell, 0); |
1948 | } | 1964 | } |
1949 | 1965 | ||
1950 | bna_llport_admin_up(&rx->bna->port.llport); | 1966 | bna_llport_rx_started(&rx->bna->port.llport); |
1951 | } | 1967 | } |
1952 | 1968 | ||
1953 | void | 1969 | void |
@@ -1955,13 +1971,13 @@ bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event) | |||
1955 | { | 1971 | { |
1956 | switch (event) { | 1972 | switch (event) { |
1957 | case RX_E_FAIL: | 1973 | case RX_E_FAIL: |
1958 | bna_llport_admin_down(&rx->bna->port.llport); | 1974 | bna_llport_rx_stopped(&rx->bna->port.llport); |
1959 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); | 1975 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); |
1960 | rx_ib_fail(rx); | 1976 | rx_ib_fail(rx); |
1961 | bna_rxf_fail(&rx->rxf); | 1977 | bna_rxf_fail(&rx->rxf); |
1962 | break; | 1978 | break; |
1963 | case RX_E_STOP: | 1979 | case RX_E_STOP: |
1964 | bna_llport_admin_down(&rx->bna->port.llport); | 1980 | bna_llport_rx_stopped(&rx->bna->port.llport); |
1965 | bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); | 1981 | bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); |
1966 | break; | 1982 | break; |
1967 | default: | 1983 | default: |
@@ -3373,7 +3389,7 @@ __bna_txq_start(struct bna_tx *tx, struct bna_txq *txq) | |||
3373 | 3389 | ||
3374 | txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE; | 3390 | txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE; |
3375 | txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) | | 3391 | txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) | |
3376 | (txq->priority & 0x3)); | 3392 | (txq->priority & 0x7)); |
3377 | txq_cfg.wvc_n_cquota_n_rquota = | 3393 | txq_cfg.wvc_n_cquota_n_rquota = |
3378 | ((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) | | 3394 | ((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) | |
3379 | (BFI_TX_MAX_WRR_QUOTA & 0xfff)); | 3395 | (BFI_TX_MAX_WRR_QUOTA & 0xfff)); |
diff --git a/drivers/net/bna/bna_types.h b/drivers/net/bna/bna_types.h index 6877310f6ef4..b9c134f7ad31 100644 --- a/drivers/net/bna/bna_types.h +++ b/drivers/net/bna/bna_types.h | |||
@@ -165,8 +165,7 @@ enum bna_rxp_type { | |||
165 | 165 | ||
166 | enum bna_rxmode { | 166 | enum bna_rxmode { |
167 | BNA_RXMODE_PROMISC = 1, | 167 | BNA_RXMODE_PROMISC = 1, |
168 | BNA_RXMODE_DEFAULT = 2, | 168 | BNA_RXMODE_ALLMULTI = 2 |
169 | BNA_RXMODE_ALLMULTI = 4 | ||
170 | }; | 169 | }; |
171 | 170 | ||
172 | enum bna_rx_event { | 171 | enum bna_rx_event { |
@@ -249,8 +248,9 @@ enum bna_link_status { | |||
249 | }; | 248 | }; |
250 | 249 | ||
251 | enum bna_llport_flags { | 250 | enum bna_llport_flags { |
252 | BNA_LLPORT_F_ENABLED = 1, | 251 | BNA_LLPORT_F_ADMIN_UP = 1, |
253 | BNA_LLPORT_F_RX_ENABLED = 2 | 252 | BNA_LLPORT_F_PORT_ENABLED = 2, |
253 | BNA_LLPORT_F_RX_STARTED = 4 | ||
254 | }; | 254 | }; |
255 | 255 | ||
256 | enum bna_port_flags { | 256 | enum bna_port_flags { |
@@ -405,7 +405,7 @@ struct bna_llport { | |||
405 | 405 | ||
406 | enum bna_link_status link_status; | 406 | enum bna_link_status link_status; |
407 | 407 | ||
408 | int admin_up_count; | 408 | int rx_started_count; |
409 | 409 | ||
410 | void (*stop_cbfn)(struct bna_port *, enum bna_cb_status); | 410 | void (*stop_cbfn)(struct bna_port *, enum bna_cb_status); |
411 | 411 | ||
@@ -1117,7 +1117,6 @@ struct bna { | |||
1117 | 1117 | ||
1118 | struct bna_rit_mod rit_mod; | 1118 | struct bna_rit_mod rit_mod; |
1119 | 1119 | ||
1120 | int rxf_default_id; | ||
1121 | int rxf_promisc_id; | 1120 | int rxf_promisc_id; |
1122 | 1121 | ||
1123 | struct bna_mbox_qe mbox_qe; | 1122 | struct bna_mbox_qe mbox_qe; |
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c index 7e839b9cec22..fad912656fe4 100644 --- a/drivers/net/bna/bnad.c +++ b/drivers/net/bna/bnad.c | |||
@@ -70,6 +70,8 @@ do { \ | |||
70 | (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \ | 70 | (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \ |
71 | } while (0) | 71 | } while (0) |
72 | 72 | ||
73 | #define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */ | ||
74 | |||
73 | /* | 75 | /* |
74 | * Reinitialize completions in CQ, once Rx is taken down | 76 | * Reinitialize completions in CQ, once Rx is taken down |
75 | */ | 77 | */ |
@@ -107,7 +109,7 @@ static void | |||
107 | bnad_free_all_txbufs(struct bnad *bnad, | 109 | bnad_free_all_txbufs(struct bnad *bnad, |
108 | struct bna_tcb *tcb) | 110 | struct bna_tcb *tcb) |
109 | { | 111 | { |
110 | u16 unmap_cons; | 112 | u32 unmap_cons; |
111 | struct bnad_unmap_q *unmap_q = tcb->unmap_q; | 113 | struct bnad_unmap_q *unmap_q = tcb->unmap_q; |
112 | struct bnad_skb_unmap *unmap_array; | 114 | struct bnad_skb_unmap *unmap_array; |
113 | struct sk_buff *skb = NULL; | 115 | struct sk_buff *skb = NULL; |
@@ -130,7 +132,9 @@ bnad_free_all_txbufs(struct bnad *bnad, | |||
130 | PCI_DMA_TODEVICE); | 132 | PCI_DMA_TODEVICE); |
131 | 133 | ||
132 | pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); | 134 | pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); |
133 | unmap_cons++; | 135 | if (++unmap_cons >= unmap_q->q_depth) |
136 | break; | ||
137 | |||
134 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 138 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
135 | pci_unmap_page(bnad->pcidev, | 139 | pci_unmap_page(bnad->pcidev, |
136 | pci_unmap_addr(&unmap_array[unmap_cons], | 140 | pci_unmap_addr(&unmap_array[unmap_cons], |
@@ -139,7 +143,8 @@ bnad_free_all_txbufs(struct bnad *bnad, | |||
139 | PCI_DMA_TODEVICE); | 143 | PCI_DMA_TODEVICE); |
140 | pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, | 144 | pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, |
141 | 0); | 145 | 0); |
142 | unmap_cons++; | 146 | if (++unmap_cons >= unmap_q->q_depth) |
147 | break; | ||
143 | } | 148 | } |
144 | dev_kfree_skb_any(skb); | 149 | dev_kfree_skb_any(skb); |
145 | } | 150 | } |
@@ -167,11 +172,11 @@ bnad_free_txbufs(struct bnad *bnad, | |||
167 | /* | 172 | /* |
168 | * Just return if TX is stopped. This check is useful | 173 | * Just return if TX is stopped. This check is useful |
169 | * when bnad_free_txbufs() runs out of a tasklet scheduled | 174 | * when bnad_free_txbufs() runs out of a tasklet scheduled |
170 | * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit | 175 | * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit |
171 | * but this routine runs actually after the cleanup has been | 176 | * but this routine runs actually after the cleanup has been |
172 | * executed. | 177 | * executed. |
173 | */ | 178 | */ |
174 | if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) | 179 | if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) |
175 | return 0; | 180 | return 0; |
176 | 181 | ||
177 | updated_hw_cons = *(tcb->hw_consumer_index); | 182 | updated_hw_cons = *(tcb->hw_consumer_index); |
@@ -239,7 +244,7 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr) | |||
239 | { | 244 | { |
240 | struct bnad *bnad = (struct bnad *)bnad_ptr; | 245 | struct bnad *bnad = (struct bnad *)bnad_ptr; |
241 | struct bna_tcb *tcb; | 246 | struct bna_tcb *tcb; |
242 | u32 acked; | 247 | u32 acked = 0; |
243 | int i, j; | 248 | int i, j; |
244 | 249 | ||
245 | for (i = 0; i < bnad->num_tx; i++) { | 250 | for (i = 0; i < bnad->num_tx; i++) { |
@@ -252,10 +257,26 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr) | |||
252 | (!test_and_set_bit(BNAD_TXQ_FREE_SENT, | 257 | (!test_and_set_bit(BNAD_TXQ_FREE_SENT, |
253 | &tcb->flags))) { | 258 | &tcb->flags))) { |
254 | acked = bnad_free_txbufs(bnad, tcb); | 259 | acked = bnad_free_txbufs(bnad, tcb); |
255 | bna_ib_ack(tcb->i_dbell, acked); | 260 | if (likely(test_bit(BNAD_TXQ_TX_STARTED, |
261 | &tcb->flags))) | ||
262 | bna_ib_ack(tcb->i_dbell, acked); | ||
256 | smp_mb__before_clear_bit(); | 263 | smp_mb__before_clear_bit(); |
257 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | 264 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); |
258 | } | 265 | } |
266 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, | ||
267 | &tcb->flags))) | ||
268 | continue; | ||
269 | if (netif_queue_stopped(bnad->netdev)) { | ||
270 | if (acked && netif_carrier_ok(bnad->netdev) && | ||
271 | BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= | ||
272 | BNAD_NETIF_WAKE_THRESHOLD) { | ||
273 | netif_wake_queue(bnad->netdev); | ||
274 | /* TODO */ | ||
275 | /* Counters for individual TxQs? */ | ||
276 | BNAD_UPDATE_CTR(bnad, | ||
277 | netif_queue_wakeup); | ||
278 | } | ||
279 | } | ||
259 | } | 280 | } |
260 | } | 281 | } |
261 | } | 282 | } |
@@ -264,7 +285,7 @@ static u32 | |||
264 | bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) | 285 | bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) |
265 | { | 286 | { |
266 | struct net_device *netdev = bnad->netdev; | 287 | struct net_device *netdev = bnad->netdev; |
267 | u32 sent; | 288 | u32 sent = 0; |
268 | 289 | ||
269 | if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) | 290 | if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) |
270 | return 0; | 291 | return 0; |
@@ -275,12 +296,15 @@ bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) | |||
275 | netif_carrier_ok(netdev) && | 296 | netif_carrier_ok(netdev) && |
276 | BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= | 297 | BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= |
277 | BNAD_NETIF_WAKE_THRESHOLD) { | 298 | BNAD_NETIF_WAKE_THRESHOLD) { |
278 | netif_wake_queue(netdev); | 299 | if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { |
279 | BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | 300 | netif_wake_queue(netdev); |
301 | BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | ||
302 | } | ||
280 | } | 303 | } |
304 | } | ||
305 | |||
306 | if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | ||
281 | bna_ib_ack(tcb->i_dbell, sent); | 307 | bna_ib_ack(tcb->i_dbell, sent); |
282 | } else | ||
283 | bna_ib_ack(tcb->i_dbell, 0); | ||
284 | 308 | ||
285 | smp_mb__before_clear_bit(); | 309 | smp_mb__before_clear_bit(); |
286 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | 310 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); |
@@ -313,25 +337,24 @@ bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb) | |||
313 | } | 337 | } |
314 | 338 | ||
315 | static void | 339 | static void |
316 | bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) | 340 | bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) |
317 | { | 341 | { |
318 | struct bnad_unmap_q *unmap_q; | 342 | struct bnad_unmap_q *unmap_q; |
319 | struct sk_buff *skb; | 343 | struct sk_buff *skb; |
344 | int unmap_cons; | ||
320 | 345 | ||
321 | unmap_q = rcb->unmap_q; | 346 | unmap_q = rcb->unmap_q; |
322 | while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) { | 347 | for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) { |
323 | skb = unmap_q->unmap_array[unmap_q->consumer_index].skb; | 348 | skb = unmap_q->unmap_array[unmap_cons].skb; |
324 | BUG_ON(!(skb)); | 349 | if (!skb) |
325 | unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL; | 350 | continue; |
351 | unmap_q->unmap_array[unmap_cons].skb = NULL; | ||
326 | pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q-> | 352 | pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q-> |
327 | unmap_array[unmap_q->consumer_index], | 353 | unmap_array[unmap_cons], |
328 | dma_addr), rcb->rxq->buffer_size + | 354 | dma_addr), rcb->rxq->buffer_size, |
329 | NET_IP_ALIGN, PCI_DMA_FROMDEVICE); | 355 | PCI_DMA_FROMDEVICE); |
330 | dev_kfree_skb(skb); | 356 | dev_kfree_skb(skb); |
331 | BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); | ||
332 | BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth); | ||
333 | } | 357 | } |
334 | |||
335 | bnad_reset_rcb(bnad, rcb); | 358 | bnad_reset_rcb(bnad, rcb); |
336 | } | 359 | } |
337 | 360 | ||
@@ -385,43 +408,11 @@ finishing: | |||
385 | unmap_q->producer_index = unmap_prod; | 408 | unmap_q->producer_index = unmap_prod; |
386 | rcb->producer_index = unmap_prod; | 409 | rcb->producer_index = unmap_prod; |
387 | smp_mb(); | 410 | smp_mb(); |
388 | bna_rxq_prod_indx_doorbell(rcb); | 411 | if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags))) |
412 | bna_rxq_prod_indx_doorbell(rcb); | ||
389 | } | 413 | } |
390 | } | 414 | } |
391 | 415 | ||
392 | /* | ||
393 | * Locking is required in the enable path | ||
394 | * because it is called from a napi poll | ||
395 | * context, where the bna_lock is not held | ||
396 | * unlike the IRQ context. | ||
397 | */ | ||
398 | static void | ||
399 | bnad_enable_txrx_irqs(struct bnad *bnad) | ||
400 | { | ||
401 | struct bna_tcb *tcb; | ||
402 | struct bna_ccb *ccb; | ||
403 | int i, j; | ||
404 | unsigned long flags; | ||
405 | |||
406 | spin_lock_irqsave(&bnad->bna_lock, flags); | ||
407 | for (i = 0; i < bnad->num_tx; i++) { | ||
408 | for (j = 0; j < bnad->num_txq_per_tx; j++) { | ||
409 | tcb = bnad->tx_info[i].tcb[j]; | ||
410 | bna_ib_coalescing_timer_set(tcb->i_dbell, | ||
411 | tcb->txq->ib->ib_config.coalescing_timeo); | ||
412 | bna_ib_ack(tcb->i_dbell, 0); | ||
413 | } | ||
414 | } | ||
415 | |||
416 | for (i = 0; i < bnad->num_rx; i++) { | ||
417 | for (j = 0; j < bnad->num_rxp_per_rx; j++) { | ||
418 | ccb = bnad->rx_info[i].rx_ctrl[j].ccb; | ||
419 | bnad_enable_rx_irq_unsafe(ccb); | ||
420 | } | ||
421 | } | ||
422 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
423 | } | ||
424 | |||
425 | static inline void | 416 | static inline void |
426 | bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb) | 417 | bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb) |
427 | { | 418 | { |
@@ -448,6 +439,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) | |||
448 | u32 qid0 = ccb->rcb[0]->rxq->rxq_id; | 439 | u32 qid0 = ccb->rcb[0]->rxq->rxq_id; |
449 | struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; | 440 | struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; |
450 | 441 | ||
442 | if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) | ||
443 | return 0; | ||
444 | |||
451 | prefetch(bnad->netdev); | 445 | prefetch(bnad->netdev); |
452 | BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, | 446 | BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, |
453 | wi_range); | 447 | wi_range); |
@@ -544,12 +538,15 @@ next: | |||
544 | BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); | 538 | BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); |
545 | 539 | ||
546 | if (likely(ccb)) { | 540 | if (likely(ccb)) { |
547 | bna_ib_ack(ccb->i_dbell, packets); | 541 | if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) |
542 | bna_ib_ack(ccb->i_dbell, packets); | ||
548 | bnad_refill_rxq(bnad, ccb->rcb[0]); | 543 | bnad_refill_rxq(bnad, ccb->rcb[0]); |
549 | if (ccb->rcb[1]) | 544 | if (ccb->rcb[1]) |
550 | bnad_refill_rxq(bnad, ccb->rcb[1]); | 545 | bnad_refill_rxq(bnad, ccb->rcb[1]); |
551 | } else | 546 | } else { |
552 | bna_ib_ack(ccb->i_dbell, 0); | 547 | if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) |
548 | bna_ib_ack(ccb->i_dbell, 0); | ||
549 | } | ||
553 | 550 | ||
554 | return packets; | 551 | return packets; |
555 | } | 552 | } |
@@ -557,6 +554,9 @@ next: | |||
557 | static void | 554 | static void |
558 | bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) | 555 | bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) |
559 | { | 556 | { |
557 | if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) | ||
558 | return; | ||
559 | |||
560 | bna_ib_coalescing_timer_set(ccb->i_dbell, 0); | 560 | bna_ib_coalescing_timer_set(ccb->i_dbell, 0); |
561 | bna_ib_ack(ccb->i_dbell, 0); | 561 | bna_ib_ack(ccb->i_dbell, 0); |
562 | } | 562 | } |
@@ -566,7 +566,8 @@ bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) | |||
566 | { | 566 | { |
567 | unsigned long flags; | 567 | unsigned long flags; |
568 | 568 | ||
569 | spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */ | 569 | /* Because of polling context */ |
570 | spin_lock_irqsave(&bnad->bna_lock, flags); | ||
570 | bnad_enable_rx_irq_unsafe(ccb); | 571 | bnad_enable_rx_irq_unsafe(ccb); |
571 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 572 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
572 | } | 573 | } |
@@ -575,9 +576,11 @@ static void | |||
575 | bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) | 576 | bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) |
576 | { | 577 | { |
577 | struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); | 578 | struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); |
578 | if (likely(napi_schedule_prep((&rx_ctrl->napi)))) { | 579 | struct napi_struct *napi = &rx_ctrl->napi; |
580 | |||
581 | if (likely(napi_schedule_prep(napi))) { | ||
579 | bnad_disable_rx_irq(bnad, ccb); | 582 | bnad_disable_rx_irq(bnad, ccb); |
580 | __napi_schedule((&rx_ctrl->napi)); | 583 | __napi_schedule(napi); |
581 | } | 584 | } |
582 | BNAD_UPDATE_CTR(bnad, netif_rx_schedule); | 585 | BNAD_UPDATE_CTR(bnad, netif_rx_schedule); |
583 | } | 586 | } |
@@ -602,12 +605,11 @@ bnad_msix_mbox_handler(int irq, void *data) | |||
602 | { | 605 | { |
603 | u32 intr_status; | 606 | u32 intr_status; |
604 | unsigned long flags; | 607 | unsigned long flags; |
605 | struct net_device *netdev = data; | 608 | struct bnad *bnad = (struct bnad *)data; |
606 | struct bnad *bnad; | ||
607 | 609 | ||
608 | bnad = netdev_priv(netdev); | 610 | if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) |
611 | return IRQ_HANDLED; | ||
609 | 612 | ||
610 | /* BNA_ISR_GET(bnad); Inc Ref count */ | ||
611 | spin_lock_irqsave(&bnad->bna_lock, flags); | 613 | spin_lock_irqsave(&bnad->bna_lock, flags); |
612 | 614 | ||
613 | bna_intr_status_get(&bnad->bna, intr_status); | 615 | bna_intr_status_get(&bnad->bna, intr_status); |
@@ -617,7 +619,6 @@ bnad_msix_mbox_handler(int irq, void *data) | |||
617 | 619 | ||
618 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 620 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
619 | 621 | ||
620 | /* BNAD_ISR_PUT(bnad); Dec Ref count */ | ||
621 | return IRQ_HANDLED; | 622 | return IRQ_HANDLED; |
622 | } | 623 | } |
623 | 624 | ||
@@ -627,8 +628,7 @@ bnad_isr(int irq, void *data) | |||
627 | int i, j; | 628 | int i, j; |
628 | u32 intr_status; | 629 | u32 intr_status; |
629 | unsigned long flags; | 630 | unsigned long flags; |
630 | struct net_device *netdev = data; | 631 | struct bnad *bnad = (struct bnad *)data; |
631 | struct bnad *bnad = netdev_priv(netdev); | ||
632 | struct bnad_rx_info *rx_info; | 632 | struct bnad_rx_info *rx_info; |
633 | struct bnad_rx_ctrl *rx_ctrl; | 633 | struct bnad_rx_ctrl *rx_ctrl; |
634 | 634 | ||
@@ -642,16 +642,21 @@ bnad_isr(int irq, void *data) | |||
642 | 642 | ||
643 | spin_lock_irqsave(&bnad->bna_lock, flags); | 643 | spin_lock_irqsave(&bnad->bna_lock, flags); |
644 | 644 | ||
645 | if (BNA_IS_MBOX_ERR_INTR(intr_status)) { | 645 | if (BNA_IS_MBOX_ERR_INTR(intr_status)) |
646 | bna_mbox_handler(&bnad->bna, intr_status); | 646 | bna_mbox_handler(&bnad->bna, intr_status); |
647 | if (!BNA_IS_INTX_DATA_INTR(intr_status)) { | 647 | |
648 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
649 | goto done; | ||
650 | } | ||
651 | } | ||
652 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 648 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
653 | 649 | ||
650 | if (!BNA_IS_INTX_DATA_INTR(intr_status)) | ||
651 | return IRQ_HANDLED; | ||
652 | |||
654 | /* Process data interrupts */ | 653 | /* Process data interrupts */ |
654 | /* Tx processing */ | ||
655 | for (i = 0; i < bnad->num_tx; i++) { | ||
656 | for (j = 0; j < bnad->num_txq_per_tx; j++) | ||
657 | bnad_tx(bnad, bnad->tx_info[i].tcb[j]); | ||
658 | } | ||
659 | /* Rx processing */ | ||
655 | for (i = 0; i < bnad->num_rx; i++) { | 660 | for (i = 0; i < bnad->num_rx; i++) { |
656 | rx_info = &bnad->rx_info[i]; | 661 | rx_info = &bnad->rx_info[i]; |
657 | if (!rx_info->rx) | 662 | if (!rx_info->rx) |
@@ -663,7 +668,6 @@ bnad_isr(int irq, void *data) | |||
663 | rx_ctrl->ccb); | 668 | rx_ctrl->ccb); |
664 | } | 669 | } |
665 | } | 670 | } |
666 | done: | ||
667 | return IRQ_HANDLED; | 671 | return IRQ_HANDLED; |
668 | } | 672 | } |
669 | 673 | ||
@@ -674,11 +678,7 @@ done: | |||
674 | static void | 678 | static void |
675 | bnad_enable_mbox_irq(struct bnad *bnad) | 679 | bnad_enable_mbox_irq(struct bnad *bnad) |
676 | { | 680 | { |
677 | int irq = BNAD_GET_MBOX_IRQ(bnad); | 681 | clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); |
678 | |||
679 | if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)) | ||
680 | if (bnad->cfg_flags & BNAD_CF_MSIX) | ||
681 | enable_irq(irq); | ||
682 | 682 | ||
683 | BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); | 683 | BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); |
684 | } | 684 | } |
@@ -690,14 +690,19 @@ bnad_enable_mbox_irq(struct bnad *bnad) | |||
690 | static void | 690 | static void |
691 | bnad_disable_mbox_irq(struct bnad *bnad) | 691 | bnad_disable_mbox_irq(struct bnad *bnad) |
692 | { | 692 | { |
693 | int irq = BNAD_GET_MBOX_IRQ(bnad); | 693 | set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); |
694 | 694 | ||
695 | BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); | ||
696 | } | ||
695 | 697 | ||
696 | if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)) | 698 | static void |
697 | if (bnad->cfg_flags & BNAD_CF_MSIX) | 699 | bnad_set_netdev_perm_addr(struct bnad *bnad) |
698 | disable_irq_nosync(irq); | 700 | { |
701 | struct net_device *netdev = bnad->netdev; | ||
699 | 702 | ||
700 | BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); | 703 | memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len); |
704 | if (is_zero_ether_addr(netdev->dev_addr)) | ||
705 | memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len); | ||
701 | } | 706 | } |
702 | 707 | ||
703 | /* Control Path Handlers */ | 708 | /* Control Path Handlers */ |
@@ -755,11 +760,14 @@ bnad_cb_port_link_status(struct bnad *bnad, | |||
755 | 760 | ||
756 | if (link_up) { | 761 | if (link_up) { |
757 | if (!netif_carrier_ok(bnad->netdev)) { | 762 | if (!netif_carrier_ok(bnad->netdev)) { |
763 | struct bna_tcb *tcb = bnad->tx_info[0].tcb[0]; | ||
764 | if (!tcb) | ||
765 | return; | ||
758 | pr_warn("bna: %s link up\n", | 766 | pr_warn("bna: %s link up\n", |
759 | bnad->netdev->name); | 767 | bnad->netdev->name); |
760 | netif_carrier_on(bnad->netdev); | 768 | netif_carrier_on(bnad->netdev); |
761 | BNAD_UPDATE_CTR(bnad, link_toggle); | 769 | BNAD_UPDATE_CTR(bnad, link_toggle); |
762 | if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) { | 770 | if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { |
763 | /* Force an immediate Transmit Schedule */ | 771 | /* Force an immediate Transmit Schedule */ |
764 | pr_info("bna: %s TX_STARTED\n", | 772 | pr_info("bna: %s TX_STARTED\n", |
765 | bnad->netdev->name); | 773 | bnad->netdev->name); |
@@ -807,6 +815,18 @@ bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) | |||
807 | { | 815 | { |
808 | struct bnad_tx_info *tx_info = | 816 | struct bnad_tx_info *tx_info = |
809 | (struct bnad_tx_info *)tcb->txq->tx->priv; | 817 | (struct bnad_tx_info *)tcb->txq->tx->priv; |
818 | struct bnad_unmap_q *unmap_q = tcb->unmap_q; | ||
819 | |||
820 | while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) | ||
821 | cpu_relax(); | ||
822 | |||
823 | bnad_free_all_txbufs(bnad, tcb); | ||
824 | |||
825 | unmap_q->producer_index = 0; | ||
826 | unmap_q->consumer_index = 0; | ||
827 | |||
828 | smp_mb__before_clear_bit(); | ||
829 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | ||
810 | 830 | ||
811 | tx_info->tcb[tcb->id] = NULL; | 831 | tx_info->tcb[tcb->id] = NULL; |
812 | } | 832 | } |
@@ -822,6 +842,12 @@ bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb) | |||
822 | } | 842 | } |
823 | 843 | ||
824 | static void | 844 | static void |
845 | bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb) | ||
846 | { | ||
847 | bnad_free_all_rxbufs(bnad, rcb); | ||
848 | } | ||
849 | |||
850 | static void | ||
825 | bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) | 851 | bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) |
826 | { | 852 | { |
827 | struct bnad_rx_info *rx_info = | 853 | struct bnad_rx_info *rx_info = |
@@ -849,7 +875,7 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb) | |||
849 | if (tx_info != &bnad->tx_info[0]) | 875 | if (tx_info != &bnad->tx_info[0]) |
850 | return; | 876 | return; |
851 | 877 | ||
852 | clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags); | 878 | clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); |
853 | netif_stop_queue(bnad->netdev); | 879 | netif_stop_queue(bnad->netdev); |
854 | pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name); | 880 | pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name); |
855 | } | 881 | } |
@@ -857,30 +883,15 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb) | |||
857 | static void | 883 | static void |
858 | bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb) | 884 | bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb) |
859 | { | 885 | { |
860 | if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) | 886 | struct bnad_unmap_q *unmap_q = tcb->unmap_q; |
861 | return; | ||
862 | |||
863 | if (netif_carrier_ok(bnad->netdev)) { | ||
864 | pr_info("bna: %s TX_STARTED\n", bnad->netdev->name); | ||
865 | netif_wake_queue(bnad->netdev); | ||
866 | BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | ||
867 | } | ||
868 | } | ||
869 | |||
870 | static void | ||
871 | bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb) | ||
872 | { | ||
873 | struct bnad_unmap_q *unmap_q; | ||
874 | 887 | ||
875 | if (!tcb || (!tcb->unmap_q)) | 888 | if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) |
876 | return; | 889 | return; |
877 | 890 | ||
878 | unmap_q = tcb->unmap_q; | 891 | clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags); |
879 | if (!unmap_q->unmap_array) | ||
880 | return; | ||
881 | 892 | ||
882 | if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) | 893 | while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) |
883 | return; | 894 | cpu_relax(); |
884 | 895 | ||
885 | bnad_free_all_txbufs(bnad, tcb); | 896 | bnad_free_all_txbufs(bnad, tcb); |
886 | 897 | ||
@@ -889,21 +900,45 @@ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb) | |||
889 | 900 | ||
890 | smp_mb__before_clear_bit(); | 901 | smp_mb__before_clear_bit(); |
891 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | 902 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); |
903 | |||
904 | /* | ||
905 | * Workaround for first device enable failure & we | ||
906 | * get a 0 MAC address. We try to get the MAC address | ||
907 | * again here. | ||
908 | */ | ||
909 | if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) { | ||
910 | bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr); | ||
911 | bnad_set_netdev_perm_addr(bnad); | ||
912 | } | ||
913 | |||
914 | set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); | ||
915 | |||
916 | if (netif_carrier_ok(bnad->netdev)) { | ||
917 | pr_info("bna: %s TX_STARTED\n", bnad->netdev->name); | ||
918 | netif_wake_queue(bnad->netdev); | ||
919 | BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | ||
920 | } | ||
921 | } | ||
922 | |||
923 | static void | ||
924 | bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb) | ||
925 | { | ||
926 | /* Delay only once for the whole Tx Path Shutdown */ | ||
927 | if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags)) | ||
928 | mdelay(BNAD_TXRX_SYNC_MDELAY); | ||
892 | } | 929 | } |
893 | 930 | ||
894 | static void | 931 | static void |
895 | bnad_cb_rx_cleanup(struct bnad *bnad, | 932 | bnad_cb_rx_cleanup(struct bnad *bnad, |
896 | struct bna_ccb *ccb) | 933 | struct bna_ccb *ccb) |
897 | { | 934 | { |
898 | bnad_cq_cmpl_init(bnad, ccb); | ||
899 | |||
900 | bnad_free_rxbufs(bnad, ccb->rcb[0]); | ||
901 | clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); | 935 | clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); |
902 | 936 | ||
903 | if (ccb->rcb[1]) { | 937 | if (ccb->rcb[1]) |
904 | bnad_free_rxbufs(bnad, ccb->rcb[1]); | ||
905 | clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); | 938 | clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); |
906 | } | 939 | |
940 | if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags)) | ||
941 | mdelay(BNAD_TXRX_SYNC_MDELAY); | ||
907 | } | 942 | } |
908 | 943 | ||
909 | static void | 944 | static void |
@@ -911,6 +946,13 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb) | |||
911 | { | 946 | { |
912 | struct bnad_unmap_q *unmap_q = rcb->unmap_q; | 947 | struct bnad_unmap_q *unmap_q = rcb->unmap_q; |
913 | 948 | ||
949 | clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags); | ||
950 | |||
951 | if (rcb == rcb->cq->ccb->rcb[0]) | ||
952 | bnad_cq_cmpl_init(bnad, rcb->cq->ccb); | ||
953 | |||
954 | bnad_free_all_rxbufs(bnad, rcb); | ||
955 | |||
914 | set_bit(BNAD_RXQ_STARTED, &rcb->flags); | 956 | set_bit(BNAD_RXQ_STARTED, &rcb->flags); |
915 | 957 | ||
916 | /* Now allocate & post buffers for this RCB */ | 958 | /* Now allocate & post buffers for this RCB */ |
@@ -1047,7 +1089,7 @@ bnad_mbox_irq_free(struct bnad *bnad, | |||
1047 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 1089 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1048 | 1090 | ||
1049 | irq = BNAD_GET_MBOX_IRQ(bnad); | 1091 | irq = BNAD_GET_MBOX_IRQ(bnad); |
1050 | free_irq(irq, bnad->netdev); | 1092 | free_irq(irq, bnad); |
1051 | 1093 | ||
1052 | kfree(intr_info->idl); | 1094 | kfree(intr_info->idl); |
1053 | } | 1095 | } |
@@ -1061,7 +1103,7 @@ static int | |||
1061 | bnad_mbox_irq_alloc(struct bnad *bnad, | 1103 | bnad_mbox_irq_alloc(struct bnad *bnad, |
1062 | struct bna_intr_info *intr_info) | 1104 | struct bna_intr_info *intr_info) |
1063 | { | 1105 | { |
1064 | int err; | 1106 | int err = 0; |
1065 | unsigned long flags; | 1107 | unsigned long flags; |
1066 | u32 irq; | 1108 | u32 irq; |
1067 | irq_handler_t irq_handler; | 1109 | irq_handler_t irq_handler; |
@@ -1096,22 +1138,17 @@ bnad_mbox_irq_alloc(struct bnad *bnad, | |||
1096 | */ | 1138 | */ |
1097 | set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); | 1139 | set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); |
1098 | 1140 | ||
1141 | BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); | ||
1142 | |||
1099 | err = request_irq(irq, irq_handler, flags, | 1143 | err = request_irq(irq, irq_handler, flags, |
1100 | bnad->mbox_irq_name, bnad->netdev); | 1144 | bnad->mbox_irq_name, bnad); |
1101 | 1145 | ||
1102 | if (err) { | 1146 | if (err) { |
1103 | kfree(intr_info->idl); | 1147 | kfree(intr_info->idl); |
1104 | intr_info->idl = NULL; | 1148 | intr_info->idl = NULL; |
1105 | return err; | ||
1106 | } | 1149 | } |
1107 | 1150 | ||
1108 | spin_lock_irqsave(&bnad->bna_lock, flags); | 1151 | return err; |
1109 | |||
1110 | if (bnad->cfg_flags & BNAD_CF_MSIX) | ||
1111 | disable_irq_nosync(irq); | ||
1112 | |||
1113 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
1114 | return 0; | ||
1115 | } | 1152 | } |
1116 | 1153 | ||
1117 | static void | 1154 | static void |
@@ -1388,13 +1425,24 @@ bnad_ioc_hb_check(unsigned long data) | |||
1388 | } | 1425 | } |
1389 | 1426 | ||
1390 | static void | 1427 | static void |
1391 | bnad_ioc_sem_timeout(unsigned long data) | 1428 | bnad_iocpf_timeout(unsigned long data) |
1429 | { | ||
1430 | struct bnad *bnad = (struct bnad *)data; | ||
1431 | unsigned long flags; | ||
1432 | |||
1433 | spin_lock_irqsave(&bnad->bna_lock, flags); | ||
1434 | bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc); | ||
1435 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
1436 | } | ||
1437 | |||
1438 | static void | ||
1439 | bnad_iocpf_sem_timeout(unsigned long data) | ||
1392 | { | 1440 | { |
1393 | struct bnad *bnad = (struct bnad *)data; | 1441 | struct bnad *bnad = (struct bnad *)data; |
1394 | unsigned long flags; | 1442 | unsigned long flags; |
1395 | 1443 | ||
1396 | spin_lock_irqsave(&bnad->bna_lock, flags); | 1444 | spin_lock_irqsave(&bnad->bna_lock, flags); |
1397 | bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc); | 1445 | bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc); |
1398 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 1446 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1399 | } | 1447 | } |
1400 | 1448 | ||
@@ -1555,62 +1603,19 @@ poll_exit: | |||
1555 | return rcvd; | 1603 | return rcvd; |
1556 | } | 1604 | } |
1557 | 1605 | ||
1558 | static int | ||
1559 | bnad_napi_poll_txrx(struct napi_struct *napi, int budget) | ||
1560 | { | ||
1561 | struct bnad_rx_ctrl *rx_ctrl = | ||
1562 | container_of(napi, struct bnad_rx_ctrl, napi); | ||
1563 | struct bna_ccb *ccb; | ||
1564 | struct bnad *bnad; | ||
1565 | int rcvd = 0; | ||
1566 | int i, j; | ||
1567 | |||
1568 | ccb = rx_ctrl->ccb; | ||
1569 | |||
1570 | bnad = ccb->bnad; | ||
1571 | |||
1572 | if (!netif_carrier_ok(bnad->netdev)) | ||
1573 | goto poll_exit; | ||
1574 | |||
1575 | /* Handle Tx Completions, if any */ | ||
1576 | for (i = 0; i < bnad->num_tx; i++) { | ||
1577 | for (j = 0; j < bnad->num_txq_per_tx; j++) | ||
1578 | bnad_tx(bnad, bnad->tx_info[i].tcb[j]); | ||
1579 | } | ||
1580 | |||
1581 | /* Handle Rx Completions */ | ||
1582 | rcvd = bnad_poll_cq(bnad, ccb, budget); | ||
1583 | if (rcvd == budget) | ||
1584 | return rcvd; | ||
1585 | poll_exit: | ||
1586 | napi_complete((napi)); | ||
1587 | |||
1588 | BNAD_UPDATE_CTR(bnad, netif_rx_complete); | ||
1589 | |||
1590 | bnad_enable_txrx_irqs(bnad); | ||
1591 | return rcvd; | ||
1592 | } | ||
1593 | |||
1594 | static void | 1606 | static void |
1595 | bnad_napi_enable(struct bnad *bnad, u32 rx_id) | 1607 | bnad_napi_enable(struct bnad *bnad, u32 rx_id) |
1596 | { | 1608 | { |
1597 | int (*napi_poll) (struct napi_struct *, int); | ||
1598 | struct bnad_rx_ctrl *rx_ctrl; | 1609 | struct bnad_rx_ctrl *rx_ctrl; |
1599 | int i; | 1610 | int i; |
1600 | unsigned long flags; | ||
1601 | |||
1602 | spin_lock_irqsave(&bnad->bna_lock, flags); | ||
1603 | if (bnad->cfg_flags & BNAD_CF_MSIX) | ||
1604 | napi_poll = bnad_napi_poll_rx; | ||
1605 | else | ||
1606 | napi_poll = bnad_napi_poll_txrx; | ||
1607 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
1608 | 1611 | ||
1609 | /* Initialize & enable NAPI */ | 1612 | /* Initialize & enable NAPI */ |
1610 | for (i = 0; i < bnad->num_rxp_per_rx; i++) { | 1613 | for (i = 0; i < bnad->num_rxp_per_rx; i++) { |
1611 | rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; | 1614 | rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; |
1615 | |||
1612 | netif_napi_add(bnad->netdev, &rx_ctrl->napi, | 1616 | netif_napi_add(bnad->netdev, &rx_ctrl->napi, |
1613 | napi_poll, 64); | 1617 | bnad_napi_poll_rx, 64); |
1618 | |||
1614 | napi_enable(&rx_ctrl->napi); | 1619 | napi_enable(&rx_ctrl->napi); |
1615 | } | 1620 | } |
1616 | } | 1621 | } |
@@ -1825,6 +1830,7 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id) | |||
1825 | 1830 | ||
1826 | /* Initialize the Rx event handlers */ | 1831 | /* Initialize the Rx event handlers */ |
1827 | rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup; | 1832 | rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup; |
1833 | rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy; | ||
1828 | rx_cbfn.rcb_destroy_cbfn = NULL; | 1834 | rx_cbfn.rcb_destroy_cbfn = NULL; |
1829 | rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup; | 1835 | rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup; |
1830 | rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy; | 1836 | rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy; |
@@ -1968,6 +1974,27 @@ bnad_enable_default_bcast(struct bnad *bnad) | |||
1968 | return 0; | 1974 | return 0; |
1969 | } | 1975 | } |
1970 | 1976 | ||
1977 | /* Called with bnad_conf_lock() held */ | ||
1978 | static void | ||
1979 | bnad_restore_vlans(struct bnad *bnad, u32 rx_id) | ||
1980 | { | ||
1981 | u16 vlan_id; | ||
1982 | unsigned long flags; | ||
1983 | |||
1984 | if (!bnad->vlan_grp) | ||
1985 | return; | ||
1986 | |||
1987 | BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1))); | ||
1988 | |||
1989 | for (vlan_id = 0; vlan_id < VLAN_N_VID; vlan_id++) { | ||
1990 | if (!vlan_group_get_device(bnad->vlan_grp, vlan_id)) | ||
1991 | continue; | ||
1992 | spin_lock_irqsave(&bnad->bna_lock, flags); | ||
1993 | bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vlan_id); | ||
1994 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
1995 | } | ||
1996 | } | ||
1997 | |||
1971 | /* Statistics utilities */ | 1998 | /* Statistics utilities */ |
1972 | void | 1999 | void |
1973 | bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) | 2000 | bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) |
@@ -2152,16 +2179,6 @@ bnad_q_num_adjust(struct bnad *bnad, int msix_vectors) | |||
2152 | bnad->num_rxp_per_rx = 1; | 2179 | bnad->num_rxp_per_rx = 1; |
2153 | } | 2180 | } |
2154 | 2181 | ||
2155 | static void | ||
2156 | bnad_set_netdev_perm_addr(struct bnad *bnad) | ||
2157 | { | ||
2158 | struct net_device *netdev = bnad->netdev; | ||
2159 | |||
2160 | memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len); | ||
2161 | if (is_zero_ether_addr(netdev->dev_addr)) | ||
2162 | memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len); | ||
2163 | } | ||
2164 | |||
2165 | /* Enable / disable device */ | 2182 | /* Enable / disable device */ |
2166 | static void | 2183 | static void |
2167 | bnad_device_disable(struct bnad *bnad) | 2184 | bnad_device_disable(struct bnad *bnad) |
@@ -2353,6 +2370,9 @@ bnad_open(struct net_device *netdev) | |||
2353 | /* Enable broadcast */ | 2370 | /* Enable broadcast */ |
2354 | bnad_enable_default_bcast(bnad); | 2371 | bnad_enable_default_bcast(bnad); |
2355 | 2372 | ||
2373 | /* Restore VLANs, if any */ | ||
2374 | bnad_restore_vlans(bnad, 0); | ||
2375 | |||
2356 | /* Set the UCAST address */ | 2376 | /* Set the UCAST address */ |
2357 | spin_lock_irqsave(&bnad->bna_lock, flags); | 2377 | spin_lock_irqsave(&bnad->bna_lock, flags); |
2358 | bnad_mac_addr_set_locked(bnad, netdev->dev_addr); | 2378 | bnad_mac_addr_set_locked(bnad, netdev->dev_addr); |
@@ -2433,21 +2453,21 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
2433 | return NETDEV_TX_OK; | 2453 | return NETDEV_TX_OK; |
2434 | } | 2454 | } |
2435 | 2455 | ||
2456 | tx_id = 0; | ||
2457 | |||
2458 | tx_info = &bnad->tx_info[tx_id]; | ||
2459 | tcb = tx_info->tcb[tx_id]; | ||
2460 | unmap_q = tcb->unmap_q; | ||
2461 | |||
2436 | /* | 2462 | /* |
2437 | * Takes care of the Tx that is scheduled between clearing the flag | 2463 | * Takes care of the Tx that is scheduled between clearing the flag |
2438 | * and the netif_stop_queue() call. | 2464 | * and the netif_stop_queue() call. |
2439 | */ | 2465 | */ |
2440 | if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) { | 2466 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { |
2441 | dev_kfree_skb(skb); | 2467 | dev_kfree_skb(skb); |
2442 | return NETDEV_TX_OK; | 2468 | return NETDEV_TX_OK; |
2443 | } | 2469 | } |
2444 | 2470 | ||
2445 | tx_id = 0; | ||
2446 | |||
2447 | tx_info = &bnad->tx_info[tx_id]; | ||
2448 | tcb = tx_info->tcb[tx_id]; | ||
2449 | unmap_q = tcb->unmap_q; | ||
2450 | |||
2451 | vectors = 1 + skb_shinfo(skb)->nr_frags; | 2471 | vectors = 1 + skb_shinfo(skb)->nr_frags; |
2452 | if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) { | 2472 | if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) { |
2453 | dev_kfree_skb(skb); | 2473 | dev_kfree_skb(skb); |
@@ -2462,7 +2482,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
2462 | tcb->consumer_index && | 2482 | tcb->consumer_index && |
2463 | !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { | 2483 | !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { |
2464 | acked = bnad_free_txbufs(bnad, tcb); | 2484 | acked = bnad_free_txbufs(bnad, tcb); |
2465 | bna_ib_ack(tcb->i_dbell, acked); | 2485 | if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) |
2486 | bna_ib_ack(tcb->i_dbell, acked); | ||
2466 | smp_mb__before_clear_bit(); | 2487 | smp_mb__before_clear_bit(); |
2467 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | 2488 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); |
2468 | } else { | 2489 | } else { |
@@ -2624,6 +2645,10 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
2624 | tcb->producer_index = txq_prod; | 2645 | tcb->producer_index = txq_prod; |
2625 | 2646 | ||
2626 | smp_mb(); | 2647 | smp_mb(); |
2648 | |||
2649 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | ||
2650 | return NETDEV_TX_OK; | ||
2651 | |||
2627 | bna_txq_prod_indx_doorbell(tcb); | 2652 | bna_txq_prod_indx_doorbell(tcb); |
2628 | 2653 | ||
2629 | if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index) | 2654 | if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index) |
@@ -3032,7 +3057,7 @@ static int __devinit | |||
3032 | bnad_pci_probe(struct pci_dev *pdev, | 3057 | bnad_pci_probe(struct pci_dev *pdev, |
3033 | const struct pci_device_id *pcidev_id) | 3058 | const struct pci_device_id *pcidev_id) |
3034 | { | 3059 | { |
3035 | bool using_dac; | 3060 | bool using_dac = false; |
3036 | int err; | 3061 | int err; |
3037 | struct bnad *bnad; | 3062 | struct bnad *bnad; |
3038 | struct bna *bna; | 3063 | struct bna *bna; |
@@ -3066,7 +3091,7 @@ bnad_pci_probe(struct pci_dev *pdev, | |||
3066 | /* | 3091 | /* |
3067 | * PCI initialization | 3092 | * PCI initialization |
3068 | * Output : using_dac = 1 for 64 bit DMA | 3093 | * Output : using_dac = 1 for 64 bit DMA |
3069 | * = 0 for 32 bit DMA | 3094 | * = 0 for 32 bit DMA |
3070 | */ | 3095 | */ |
3071 | err = bnad_pci_init(bnad, pdev, &using_dac); | 3096 | err = bnad_pci_init(bnad, pdev, &using_dac); |
3072 | if (err) | 3097 | if (err) |
@@ -3084,6 +3109,9 @@ bnad_pci_probe(struct pci_dev *pdev, | |||
3084 | /* Initialize netdev structure, set up ethtool ops */ | 3109 | /* Initialize netdev structure, set up ethtool ops */ |
3085 | bnad_netdev_init(bnad, using_dac); | 3110 | bnad_netdev_init(bnad, using_dac); |
3086 | 3111 | ||
3112 | /* Set link to down state */ | ||
3113 | netif_carrier_off(netdev); | ||
3114 | |||
3087 | bnad_enable_msix(bnad); | 3115 | bnad_enable_msix(bnad); |
3088 | 3116 | ||
3089 | /* Get resource requirement form bna */ | 3117 | /* Get resource requirement form bna */ |
@@ -3115,11 +3143,13 @@ bnad_pci_probe(struct pci_dev *pdev, | |||
3115 | ((unsigned long)bnad)); | 3143 | ((unsigned long)bnad)); |
3116 | setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check, | 3144 | setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check, |
3117 | ((unsigned long)bnad)); | 3145 | ((unsigned long)bnad)); |
3118 | setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout, | 3146 | setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout, |
3147 | ((unsigned long)bnad)); | ||
3148 | setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout, | ||
3119 | ((unsigned long)bnad)); | 3149 | ((unsigned long)bnad)); |
3120 | 3150 | ||
3121 | /* Now start the timer before calling IOC */ | 3151 | /* Now start the timer before calling IOC */ |
3122 | mod_timer(&bnad->bna.device.ioc.ioc_timer, | 3152 | mod_timer(&bnad->bna.device.ioc.iocpf_timer, |
3123 | jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); | 3153 | jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); |
3124 | 3154 | ||
3125 | /* | 3155 | /* |
@@ -3137,11 +3167,6 @@ bnad_pci_probe(struct pci_dev *pdev, | |||
3137 | 3167 | ||
3138 | mutex_unlock(&bnad->conf_mutex); | 3168 | mutex_unlock(&bnad->conf_mutex); |
3139 | 3169 | ||
3140 | /* | ||
3141 | * Make sure the link appears down to the stack | ||
3142 | */ | ||
3143 | netif_carrier_off(netdev); | ||
3144 | |||
3145 | /* Finally, reguister with net_device layer */ | 3170 | /* Finally, reguister with net_device layer */ |
3146 | err = register_netdev(netdev); | 3171 | err = register_netdev(netdev); |
3147 | if (err) { | 3172 | if (err) { |
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h index ebc3a9078642..8b1d51557def 100644 --- a/drivers/net/bna/bnad.h +++ b/drivers/net/bna/bnad.h | |||
@@ -51,6 +51,7 @@ | |||
51 | */ | 51 | */ |
52 | struct bnad_rx_ctrl { | 52 | struct bnad_rx_ctrl { |
53 | struct bna_ccb *ccb; | 53 | struct bna_ccb *ccb; |
54 | unsigned long flags; | ||
54 | struct napi_struct napi; | 55 | struct napi_struct napi; |
55 | }; | 56 | }; |
56 | 57 | ||
@@ -64,7 +65,7 @@ struct bnad_rx_ctrl { | |||
64 | #define BNAD_NAME "bna" | 65 | #define BNAD_NAME "bna" |
65 | #define BNAD_NAME_LEN 64 | 66 | #define BNAD_NAME_LEN 64 |
66 | 67 | ||
67 | #define BNAD_VERSION "2.3.2.0" | 68 | #define BNAD_VERSION "2.3.2.3" |
68 | 69 | ||
69 | #define BNAD_MAILBOX_MSIX_VECTORS 1 | 70 | #define BNAD_MAILBOX_MSIX_VECTORS 1 |
70 | 71 | ||
@@ -82,6 +83,7 @@ struct bnad_rx_ctrl { | |||
82 | 83 | ||
83 | /* Bit positions for tcb->flags */ | 84 | /* Bit positions for tcb->flags */ |
84 | #define BNAD_TXQ_FREE_SENT 0 | 85 | #define BNAD_TXQ_FREE_SENT 0 |
86 | #define BNAD_TXQ_TX_STARTED 1 | ||
85 | 87 | ||
86 | /* Bit positions for rcb->flags */ | 88 | /* Bit positions for rcb->flags */ |
87 | #define BNAD_RXQ_REFILL 0 | 89 | #define BNAD_RXQ_REFILL 0 |
@@ -124,6 +126,7 @@ struct bnad_completion { | |||
124 | struct bnad_drv_stats { | 126 | struct bnad_drv_stats { |
125 | u64 netif_queue_stop; | 127 | u64 netif_queue_stop; |
126 | u64 netif_queue_wakeup; | 128 | u64 netif_queue_wakeup; |
129 | u64 netif_queue_stopped; | ||
127 | u64 tso4; | 130 | u64 tso4; |
128 | u64 tso6; | 131 | u64 tso6; |
129 | u64 tso_err; | 132 | u64 tso_err; |
@@ -199,12 +202,12 @@ struct bnad_unmap_q { | |||
199 | /* Set, tested & cleared using xxx_bit() functions */ | 202 | /* Set, tested & cleared using xxx_bit() functions */ |
200 | /* Values indicated bit positions */ | 203 | /* Values indicated bit positions */ |
201 | #define BNAD_RF_CEE_RUNNING 1 | 204 | #define BNAD_RF_CEE_RUNNING 1 |
202 | #define BNAD_RF_HW_ERROR 2 | 205 | #define BNAD_RF_MBOX_IRQ_DISABLED 2 |
203 | #define BNAD_RF_MBOX_IRQ_DISABLED 3 | 206 | #define BNAD_RF_RX_STARTED 3 |
204 | #define BNAD_RF_TX_STARTED 4 | 207 | #define BNAD_RF_DIM_TIMER_RUNNING 4 |
205 | #define BNAD_RF_RX_STARTED 5 | 208 | #define BNAD_RF_STATS_TIMER_RUNNING 5 |
206 | #define BNAD_RF_DIM_TIMER_RUNNING 6 | 209 | #define BNAD_RF_TX_SHUTDOWN_DELAYED 6 |
207 | #define BNAD_RF_STATS_TIMER_RUNNING 7 | 210 | #define BNAD_RF_RX_SHUTDOWN_DELAYED 7 |
208 | 211 | ||
209 | struct bnad { | 212 | struct bnad { |
210 | struct net_device *netdev; | 213 | struct net_device *netdev; |
@@ -306,8 +309,10 @@ extern void bnad_cleanup_rx(struct bnad *bnad, uint rx_id); | |||
306 | extern void bnad_dim_timer_start(struct bnad *bnad); | 309 | extern void bnad_dim_timer_start(struct bnad *bnad); |
307 | 310 | ||
308 | /* Statistics */ | 311 | /* Statistics */ |
309 | extern void bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats); | 312 | extern void bnad_netdev_qstats_fill(struct bnad *bnad, |
310 | extern void bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats); | 313 | struct rtnl_link_stats64 *stats); |
314 | extern void bnad_netdev_hwstats_fill(struct bnad *bnad, | ||
315 | struct rtnl_link_stats64 *stats); | ||
311 | 316 | ||
312 | /** | 317 | /** |
313 | * MACROS | 318 | * MACROS |
@@ -320,9 +325,11 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 | |||
320 | 325 | ||
321 | #define bnad_enable_rx_irq_unsafe(_ccb) \ | 326 | #define bnad_enable_rx_irq_unsafe(_ccb) \ |
322 | { \ | 327 | { \ |
323 | bna_ib_coalescing_timer_set((_ccb)->i_dbell, \ | 328 | if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) {\ |
324 | (_ccb)->rx_coalescing_timeo); \ | 329 | bna_ib_coalescing_timer_set((_ccb)->i_dbell, \ |
325 | bna_ib_ack((_ccb)->i_dbell, 0); \ | 330 | (_ccb)->rx_coalescing_timeo); \ |
331 | bna_ib_ack((_ccb)->i_dbell, 0); \ | ||
332 | } \ | ||
326 | } | 333 | } |
327 | 334 | ||
328 | #define bnad_dim_timer_running(_bnad) \ | 335 | #define bnad_dim_timer_running(_bnad) \ |
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c index 11fa2ea842c1..142d6047da27 100644 --- a/drivers/net/bna/bnad_ethtool.c +++ b/drivers/net/bna/bnad_ethtool.c | |||
@@ -68,6 +68,7 @@ static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { | |||
68 | 68 | ||
69 | "netif_queue_stop", | 69 | "netif_queue_stop", |
70 | "netif_queue_wakeup", | 70 | "netif_queue_wakeup", |
71 | "netif_queue_stopped", | ||
71 | "tso4", | 72 | "tso4", |
72 | "tso6", | 73 | "tso6", |
73 | "tso_err", | 74 | "tso_err", |
@@ -274,7 +275,6 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) | |||
274 | 275 | ||
275 | ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); | 276 | ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); |
276 | if (ioc_attr) { | 277 | if (ioc_attr) { |
277 | memset(ioc_attr, 0, sizeof(*ioc_attr)); | ||
278 | spin_lock_irqsave(&bnad->bna_lock, flags); | 278 | spin_lock_irqsave(&bnad->bna_lock, flags); |
279 | bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr); | 279 | bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr); |
280 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 280 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
@@ -330,10 +330,6 @@ do { \ | |||
330 | 330 | ||
331 | BNAD_GET_REG(PCIE_MISC_REG); | 331 | BNAD_GET_REG(PCIE_MISC_REG); |
332 | 332 | ||
333 | BNAD_GET_REG(HOST_SEM0_REG); | ||
334 | BNAD_GET_REG(HOST_SEM1_REG); | ||
335 | BNAD_GET_REG(HOST_SEM2_REG); | ||
336 | BNAD_GET_REG(HOST_SEM3_REG); | ||
337 | BNAD_GET_REG(HOST_SEM0_INFO_REG); | 333 | BNAD_GET_REG(HOST_SEM0_INFO_REG); |
338 | BNAD_GET_REG(HOST_SEM1_INFO_REG); | 334 | BNAD_GET_REG(HOST_SEM1_INFO_REG); |
339 | BNAD_GET_REG(HOST_SEM2_INFO_REG); | 335 | BNAD_GET_REG(HOST_SEM2_INFO_REG); |
@@ -1184,6 +1180,9 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, | |||
1184 | 1180 | ||
1185 | bi = sizeof(*net_stats64) / sizeof(u64); | 1181 | bi = sizeof(*net_stats64) / sizeof(u64); |
1186 | 1182 | ||
1183 | /* Get netif_queue_stopped from stack */ | ||
1184 | bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev); | ||
1185 | |||
1187 | /* Fill driver stats into ethtool buffers */ | 1186 | /* Fill driver stats into ethtool buffers */ |
1188 | stats64 = (u64 *)&bnad->stats.drv_stats; | 1187 | stats64 = (u64 *)&bnad->stats.drv_stats; |
1189 | for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++) | 1188 | for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++) |