aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/bfa/bfa_intr.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/scsi/bfa/bfa_intr.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/scsi/bfa/bfa_intr.c')
-rw-r--r--drivers/scsi/bfa/bfa_intr.c113
1 files changed, 82 insertions, 31 deletions
diff --git a/drivers/scsi/bfa/bfa_intr.c b/drivers/scsi/bfa/bfa_intr.c
index 0ca125712a04..0eba3f930d5b 100644
--- a/drivers/scsi/bfa/bfa_intr.c
+++ b/drivers/scsi/bfa/bfa_intr.c
@@ -15,7 +15,7 @@
15 * General Public License for more details. 15 * General Public License for more details.
16 */ 16 */
17#include <bfa.h> 17#include <bfa.h>
18#include <bfi/bfi_cbreg.h> 18#include <bfi/bfi_ctreg.h>
19#include <bfa_port_priv.h> 19#include <bfa_port_priv.h>
20#include <bfa_intr_priv.h> 20#include <bfa_intr_priv.h>
21#include <cs/bfa_debug.h> 21#include <cs/bfa_debug.h>
@@ -34,6 +34,26 @@ bfa_msix_lpu(struct bfa_s *bfa)
34 bfa_ioc_mbox_isr(&bfa->ioc); 34 bfa_ioc_mbox_isr(&bfa->ioc);
35} 35}
36 36
37static void
38bfa_reqq_resume(struct bfa_s *bfa, int qid)
39{
40 struct list_head *waitq, *qe, *qen;
41 struct bfa_reqq_wait_s *wqe;
42
43 waitq = bfa_reqq(bfa, qid);
44 list_for_each_safe(qe, qen, waitq) {
45 /**
46 * Callback only as long as there is room in request queue
47 */
48 if (bfa_reqq_full(bfa, qid))
49 break;
50
51 list_del(qe);
52 wqe = (struct bfa_reqq_wait_s *) qe;
53 wqe->qresume(wqe->cbarg);
54 }
55}
56
37void 57void
38bfa_msix_all(struct bfa_s *bfa, int vec) 58bfa_msix_all(struct bfa_s *bfa, int vec)
39{ 59{
@@ -59,7 +79,7 @@ bfa_intx(struct bfa_s *bfa)
59 qintr = intr & __HFN_INT_RME_MASK; 79 qintr = intr & __HFN_INT_RME_MASK;
60 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); 80 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
61 81
62 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue ++) { 82 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
63 if (intr & (__HFN_INT_RME_Q0 << queue)) 83 if (intr & (__HFN_INT_RME_Q0 << queue))
64 bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); 84 bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
65 } 85 }
@@ -96,7 +116,8 @@ bfa_isr_enable(struct bfa_s *bfa)
96 116
97 bfa_msix_install(bfa); 117 bfa_msix_install(bfa);
98 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | 118 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
99 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS); 119 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
120 __HFN_INT_LL_HALT);
100 121
101 if (pci_func == 0) 122 if (pci_func == 0)
102 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | 123 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
@@ -127,23 +148,18 @@ bfa_isr_disable(struct bfa_s *bfa)
127void 148void
128bfa_msix_reqq(struct bfa_s *bfa, int qid) 149bfa_msix_reqq(struct bfa_s *bfa, int qid)
129{ 150{
130 struct list_head *waitq, *qe, *qen; 151 struct list_head *waitq;
131 struct bfa_reqq_wait_s *wqe;
132 152
133 qid &= (BFI_IOC_MAX_CQS - 1); 153 qid &= (BFI_IOC_MAX_CQS - 1);
134 154
135 waitq = bfa_reqq(bfa, qid); 155 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
136 list_for_each_safe(qe, qen, waitq) {
137 /**
138 * Callback only as long as there is room in request queue
139 */
140 if (bfa_reqq_full(bfa, qid))
141 break;
142 156
143 list_del(qe); 157 /**
144 wqe = (struct bfa_reqq_wait_s *) qe; 158 * Resume any pending requests in the corresponding reqq.
145 wqe->qresume(wqe->cbarg); 159 */
146 } 160 waitq = bfa_reqq(bfa, qid);
161 if (!list_empty(waitq))
162 bfa_reqq_resume(bfa, qid);
147} 163}
148 164
149void 165void
@@ -157,26 +173,27 @@ bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
157} 173}
158 174
159void 175void
160bfa_msix_rspq(struct bfa_s *bfa, int rsp_qid) 176bfa_msix_rspq(struct bfa_s *bfa, int qid)
161{ 177{
162 struct bfi_msg_s *m; 178 struct bfi_msg_s *m;
163 u32 pi, ci; 179 u32 pi, ci;
180 struct list_head *waitq;
164 181
165 bfa_trc_fp(bfa, rsp_qid); 182 bfa_trc_fp(bfa, qid);
166 183
167 rsp_qid &= (BFI_IOC_MAX_CQS - 1); 184 qid &= (BFI_IOC_MAX_CQS - 1);
168 185
169 bfa->iocfc.hwif.hw_rspq_ack(bfa, rsp_qid); 186 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
170 187
171 ci = bfa_rspq_ci(bfa, rsp_qid); 188 ci = bfa_rspq_ci(bfa, qid);
172 pi = bfa_rspq_pi(bfa, rsp_qid); 189 pi = bfa_rspq_pi(bfa, qid);
173 190
174 bfa_trc_fp(bfa, ci); 191 bfa_trc_fp(bfa, ci);
175 bfa_trc_fp(bfa, pi); 192 bfa_trc_fp(bfa, pi);
176 193
177 if (bfa->rme_process) { 194 if (bfa->rme_process) {
178 while (ci != pi) { 195 while (ci != pi) {
179 m = bfa_rspq_elem(bfa, rsp_qid, ci); 196 m = bfa_rspq_elem(bfa, qid, ci);
180 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX); 197 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
181 198
182 bfa_isrs[m->mhdr.msg_class] (bfa, m); 199 bfa_isrs[m->mhdr.msg_class] (bfa, m);
@@ -188,25 +205,59 @@ bfa_msix_rspq(struct bfa_s *bfa, int rsp_qid)
188 /** 205 /**
189 * update CI 206 * update CI
190 */ 207 */
191 bfa_rspq_ci(bfa, rsp_qid) = pi; 208 bfa_rspq_ci(bfa, qid) = pi;
192 bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[rsp_qid], pi); 209 bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
193 bfa_os_mmiowb(); 210 bfa_os_mmiowb();
211
212 /**
213 * Resume any pending requests in the corresponding reqq.
214 */
215 waitq = bfa_reqq(bfa, qid);
216 if (!list_empty(waitq))
217 bfa_reqq_resume(bfa, qid);
194} 218}
195 219
196void 220void
197bfa_msix_lpu_err(struct bfa_s *bfa, int vec) 221bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
198{ 222{
199 u32 intr; 223 u32 intr, curr_value;
200 224
201 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); 225 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
202 226
203 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) 227 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
204 bfa_msix_lpu(bfa); 228 bfa_msix_lpu(bfa);
205 229
206 if (intr & (__HFN_INT_ERR_EMC | 230 intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
207 __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | 231 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
208 __HFN_INT_ERR_PSS)) 232
233 if (intr) {
234 if (intr & __HFN_INT_LL_HALT) {
235 /**
236 * If LL_HALT bit is set then FW Init Halt LL Port
237 * Register needs to be cleared as well so Interrupt
238 * Status Register will be cleared.
239 */
240 curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
241 curr_value &= ~__FW_INIT_HALT_P;
242 bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
243 }
244
245 if (intr & __HFN_INT_ERR_PSS) {
246 /**
247 * ERR_PSS bit needs to be cleared as well in case
248 * interrups are shared so driver's interrupt handler is
249 * still called eventhough it is already masked out.
250 */
251 curr_value = bfa_reg_read(
252 bfa->ioc.ioc_regs.pss_err_status_reg);
253 curr_value &= __PSS_ERR_STATUS_SET;
254 bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
255 curr_value);
256 }
257
258 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
209 bfa_msix_errint(bfa, intr); 259 bfa_msix_errint(bfa, intr);
260 }
210} 261}
211 262
212void 263void