aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_init.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_init.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c540
1 files changed, 202 insertions, 338 deletions
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1fa067e053d2..b9465643396b 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -29,7 +29,6 @@ static int qla2x00_configure_loop(scsi_qla_host_t *);
29static int qla2x00_configure_local_loop(scsi_qla_host_t *); 29static int qla2x00_configure_local_loop(scsi_qla_host_t *);
30static int qla2x00_configure_fabric(scsi_qla_host_t *); 30static int qla2x00_configure_fabric(scsi_qla_host_t *);
31static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *); 31static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
32static int qla2x00_device_resync(scsi_qla_host_t *);
33static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *, 32static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
34 uint16_t *); 33 uint16_t *);
35 34
@@ -41,11 +40,10 @@ static int qla25xx_init_queues(struct qla_hw_data *);
41 40
42/* SRB Extensions ---------------------------------------------------------- */ 41/* SRB Extensions ---------------------------------------------------------- */
43 42
44static void 43void
45qla2x00_ctx_sp_timeout(unsigned long __data) 44qla2x00_sp_timeout(unsigned long __data)
46{ 45{
47 srb_t *sp = (srb_t *)__data; 46 srb_t *sp = (srb_t *)__data;
48 struct srb_ctx *ctx;
49 struct srb_iocb *iocb; 47 struct srb_iocb *iocb;
50 fc_port_t *fcport = sp->fcport; 48 fc_port_t *fcport = sp->fcport;
51 struct qla_hw_data *ha = fcport->vha->hw; 49 struct qla_hw_data *ha = fcport->vha->hw;
@@ -55,79 +53,25 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
55 spin_lock_irqsave(&ha->hardware_lock, flags); 53 spin_lock_irqsave(&ha->hardware_lock, flags);
56 req = ha->req_q_map[0]; 54 req = ha->req_q_map[0];
57 req->outstanding_cmds[sp->handle] = NULL; 55 req->outstanding_cmds[sp->handle] = NULL;
58 ctx = sp->ctx; 56 iocb = &sp->u.iocb_cmd;
59 iocb = ctx->u.iocb_cmd;
60 iocb->timeout(sp); 57 iocb->timeout(sp);
61 iocb->free(sp); 58 sp->free(fcport->vha, sp);
62 spin_unlock_irqrestore(&ha->hardware_lock, flags); 59 spin_unlock_irqrestore(&ha->hardware_lock, flags);
63} 60}
64 61
65static void 62void
66qla2x00_ctx_sp_free(srb_t *sp) 63qla2x00_sp_free(void *data, void *ptr)
67{ 64{
68 struct srb_ctx *ctx = sp->ctx; 65 srb_t *sp = (srb_t *)ptr;
69 struct srb_iocb *iocb = ctx->u.iocb_cmd; 66 struct srb_iocb *iocb = &sp->u.iocb_cmd;
70 struct scsi_qla_host *vha = sp->fcport->vha; 67 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
71 68
72 del_timer(&iocb->timer); 69 del_timer(&iocb->timer);
73 kfree(iocb); 70 mempool_free(sp, vha->hw->srb_mempool);
74 kfree(ctx);
75 mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
76 71
77 QLA_VHA_MARK_NOT_BUSY(vha); 72 QLA_VHA_MARK_NOT_BUSY(vha);
78} 73}
79 74
80inline srb_t *
81qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
82 unsigned long tmo)
83{
84 srb_t *sp = NULL;
85 struct qla_hw_data *ha = vha->hw;
86 struct srb_ctx *ctx;
87 struct srb_iocb *iocb;
88 uint8_t bail;
89
90 QLA_VHA_MARK_BUSY(vha, bail);
91 if (bail)
92 return NULL;
93
94 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
95 if (!sp)
96 goto done;
97 ctx = kzalloc(size, GFP_KERNEL);
98 if (!ctx) {
99 mempool_free(sp, ha->srb_mempool);
100 sp = NULL;
101 goto done;
102 }
103 iocb = kzalloc(sizeof(struct srb_iocb), GFP_KERNEL);
104 if (!iocb) {
105 mempool_free(sp, ha->srb_mempool);
106 sp = NULL;
107 kfree(ctx);
108 goto done;
109 }
110
111 memset(sp, 0, sizeof(*sp));
112 sp->fcport = fcport;
113 sp->ctx = ctx;
114 ctx->iocbs = 1;
115 ctx->u.iocb_cmd = iocb;
116 iocb->free = qla2x00_ctx_sp_free;
117
118 init_timer(&iocb->timer);
119 if (!tmo)
120 goto done;
121 iocb->timer.expires = jiffies + tmo * HZ;
122 iocb->timer.data = (unsigned long)sp;
123 iocb->timer.function = qla2x00_ctx_sp_timeout;
124 add_timer(&iocb->timer);
125done:
126 if (!sp)
127 QLA_VHA_MARK_NOT_BUSY(vha);
128 return sp;
129}
130
131/* Asynchronous Login/Logout Routines -------------------------------------- */ 75/* Asynchronous Login/Logout Routines -------------------------------------- */
132 76
133static inline unsigned long 77static inline unsigned long
@@ -149,19 +93,19 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha)
149} 93}
150 94
151static void 95static void
152qla2x00_async_iocb_timeout(srb_t *sp) 96qla2x00_async_iocb_timeout(void *data)
153{ 97{
98 srb_t *sp = (srb_t *)data;
154 fc_port_t *fcport = sp->fcport; 99 fc_port_t *fcport = sp->fcport;
155 struct srb_ctx *ctx = sp->ctx;
156 100
157 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, 101 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
158 "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n", 102 "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
159 ctx->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, 103 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
160 fcport->d_id.b.al_pa); 104 fcport->d_id.b.al_pa);
161 105
162 fcport->flags &= ~FCF_ASYNC_SENT; 106 fcport->flags &= ~FCF_ASYNC_SENT;
163 if (ctx->type == SRB_LOGIN_CMD) { 107 if (sp->type == SRB_LOGIN_CMD) {
164 struct srb_iocb *lio = ctx->u.iocb_cmd; 108 struct srb_iocb *lio = &sp->u.iocb_cmd;
165 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL); 109 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
166 /* Retry as needed. */ 110 /* Retry as needed. */
167 lio->u.logio.data[0] = MBS_COMMAND_ERROR; 111 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
@@ -173,14 +117,16 @@ qla2x00_async_iocb_timeout(srb_t *sp)
173} 117}
174 118
175static void 119static void
176qla2x00_async_login_ctx_done(srb_t *sp) 120qla2x00_async_login_sp_done(void *data, void *ptr, int res)
177{ 121{
178 struct srb_ctx *ctx = sp->ctx; 122 srb_t *sp = (srb_t *)ptr;
179 struct srb_iocb *lio = ctx->u.iocb_cmd; 123 struct srb_iocb *lio = &sp->u.iocb_cmd;
180 124 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
181 qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport, 125
182 lio->u.logio.data); 126 if (!test_bit(UNLOADING, &vha->dpc_flags))
183 lio->free(sp); 127 qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
128 lio->u.logio.data);
129 sp->free(sp->fcport->vha, sp);
184} 130}
185 131
186int 132int
@@ -188,22 +134,21 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
188 uint16_t *data) 134 uint16_t *data)
189{ 135{
190 srb_t *sp; 136 srb_t *sp;
191 struct srb_ctx *ctx;
192 struct srb_iocb *lio; 137 struct srb_iocb *lio;
193 int rval; 138 int rval;
194 139
195 rval = QLA_FUNCTION_FAILED; 140 rval = QLA_FUNCTION_FAILED;
196 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 141 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
197 qla2x00_get_async_timeout(vha) + 2);
198 if (!sp) 142 if (!sp)
199 goto done; 143 goto done;
200 144
201 ctx = sp->ctx; 145 sp->type = SRB_LOGIN_CMD;
202 ctx->type = SRB_LOGIN_CMD; 146 sp->name = "login";
203 ctx->name = "login"; 147 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
204 lio = ctx->u.iocb_cmd; 148
149 lio = &sp->u.iocb_cmd;
205 lio->timeout = qla2x00_async_iocb_timeout; 150 lio->timeout = qla2x00_async_iocb_timeout;
206 lio->done = qla2x00_async_login_ctx_done; 151 sp->done = qla2x00_async_login_sp_done;
207 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; 152 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
208 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 153 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
209 lio->u.logio.flags |= SRB_LOGIN_RETRIED; 154 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
@@ -219,42 +164,43 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
219 return rval; 164 return rval;
220 165
221done_free_sp: 166done_free_sp:
222 lio->free(sp); 167 sp->free(fcport->vha, sp);
223done: 168done:
224 return rval; 169 return rval;
225} 170}
226 171
227static void 172static void
228qla2x00_async_logout_ctx_done(srb_t *sp) 173qla2x00_async_logout_sp_done(void *data, void *ptr, int res)
229{ 174{
230 struct srb_ctx *ctx = sp->ctx; 175 srb_t *sp = (srb_t *)ptr;
231 struct srb_iocb *lio = ctx->u.iocb_cmd; 176 struct srb_iocb *lio = &sp->u.iocb_cmd;
232 177 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
233 qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport, 178
234 lio->u.logio.data); 179 if (!test_bit(UNLOADING, &vha->dpc_flags))
235 lio->free(sp); 180 qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
181 lio->u.logio.data);
182 sp->free(sp->fcport->vha, sp);
236} 183}
237 184
238int 185int
239qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) 186qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
240{ 187{
241 srb_t *sp; 188 srb_t *sp;
242 struct srb_ctx *ctx;
243 struct srb_iocb *lio; 189 struct srb_iocb *lio;
244 int rval; 190 int rval;
245 191
246 rval = QLA_FUNCTION_FAILED; 192 rval = QLA_FUNCTION_FAILED;
247 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 193 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
248 qla2x00_get_async_timeout(vha) + 2);
249 if (!sp) 194 if (!sp)
250 goto done; 195 goto done;
251 196
252 ctx = sp->ctx; 197 sp->type = SRB_LOGOUT_CMD;
253 ctx->type = SRB_LOGOUT_CMD; 198 sp->name = "logout";
254 ctx->name = "logout"; 199 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
255 lio = ctx->u.iocb_cmd; 200
201 lio = &sp->u.iocb_cmd;
256 lio->timeout = qla2x00_async_iocb_timeout; 202 lio->timeout = qla2x00_async_iocb_timeout;
257 lio->done = qla2x00_async_logout_ctx_done; 203 sp->done = qla2x00_async_logout_sp_done;
258 rval = qla2x00_start_sp(sp); 204 rval = qla2x00_start_sp(sp);
259 if (rval != QLA_SUCCESS) 205 if (rval != QLA_SUCCESS)
260 goto done_free_sp; 206 goto done_free_sp;
@@ -266,20 +212,22 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
266 return rval; 212 return rval;
267 213
268done_free_sp: 214done_free_sp:
269 lio->free(sp); 215 sp->free(fcport->vha, sp);
270done: 216done:
271 return rval; 217 return rval;
272} 218}
273 219
274static void 220static void
275qla2x00_async_adisc_ctx_done(srb_t *sp) 221qla2x00_async_adisc_sp_done(void *data, void *ptr, int res)
276{ 222{
277 struct srb_ctx *ctx = sp->ctx; 223 srb_t *sp = (srb_t *)ptr;
278 struct srb_iocb *lio = ctx->u.iocb_cmd; 224 struct srb_iocb *lio = &sp->u.iocb_cmd;
279 225 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
280 qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport, 226
281 lio->u.logio.data); 227 if (!test_bit(UNLOADING, &vha->dpc_flags))
282 lio->free(sp); 228 qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
229 lio->u.logio.data);
230 sp->free(sp->fcport->vha, sp);
283} 231}
284 232
285int 233int
@@ -287,22 +235,21 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
287 uint16_t *data) 235 uint16_t *data)
288{ 236{
289 srb_t *sp; 237 srb_t *sp;
290 struct srb_ctx *ctx;
291 struct srb_iocb *lio; 238 struct srb_iocb *lio;
292 int rval; 239 int rval;
293 240
294 rval = QLA_FUNCTION_FAILED; 241 rval = QLA_FUNCTION_FAILED;
295 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 242 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
296 qla2x00_get_async_timeout(vha) + 2);
297 if (!sp) 243 if (!sp)
298 goto done; 244 goto done;
299 245
300 ctx = sp->ctx; 246 sp->type = SRB_ADISC_CMD;
301 ctx->type = SRB_ADISC_CMD; 247 sp->name = "adisc";
302 ctx->name = "adisc"; 248 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
303 lio = ctx->u.iocb_cmd; 249
250 lio = &sp->u.iocb_cmd;
304 lio->timeout = qla2x00_async_iocb_timeout; 251 lio->timeout = qla2x00_async_iocb_timeout;
305 lio->done = qla2x00_async_adisc_ctx_done; 252 sp->done = qla2x00_async_adisc_sp_done;
306 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 253 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
307 lio->u.logio.flags |= SRB_LOGIN_RETRIED; 254 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
308 rval = qla2x00_start_sp(sp); 255 rval = qla2x00_start_sp(sp);
@@ -316,46 +263,62 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
316 return rval; 263 return rval;
317 264
318done_free_sp: 265done_free_sp:
319 lio->free(sp); 266 sp->free(fcport->vha, sp);
320done: 267done:
321 return rval; 268 return rval;
322} 269}
323 270
324static void 271static void
325qla2x00_async_tm_cmd_ctx_done(srb_t *sp) 272qla2x00_async_tm_cmd_done(void *data, void *ptr, int res)
326{ 273{
327 struct srb_ctx *ctx = sp->ctx; 274 srb_t *sp = (srb_t *)ptr;
328 struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd; 275 struct srb_iocb *iocb = &sp->u.iocb_cmd;
276 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
277 uint32_t flags;
278 uint16_t lun;
279 int rval;
280
281 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
282 flags = iocb->u.tmf.flags;
283 lun = (uint16_t)iocb->u.tmf.lun;
284
285 /* Issue Marker IOCB */
286 rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
287 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
288 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
329 289
330 qla2x00_async_tm_cmd_done(sp->fcport->vha, sp->fcport, iocb); 290 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
331 iocb->free(sp); 291 ql_dbg(ql_dbg_taskm, vha, 0x8030,
292 "TM IOCB failed (%x).\n", rval);
293 }
294 }
295 sp->free(sp->fcport->vha, sp);
332} 296}
333 297
334int 298int
335qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, 299qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun,
336 uint32_t tag) 300 uint32_t tag)
337{ 301{
338 struct scsi_qla_host *vha = fcport->vha; 302 struct scsi_qla_host *vha = fcport->vha;
339 srb_t *sp; 303 srb_t *sp;
340 struct srb_ctx *ctx;
341 struct srb_iocb *tcf; 304 struct srb_iocb *tcf;
342 int rval; 305 int rval;
343 306
344 rval = QLA_FUNCTION_FAILED; 307 rval = QLA_FUNCTION_FAILED;
345 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 308 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
346 qla2x00_get_async_timeout(vha) + 2);
347 if (!sp) 309 if (!sp)
348 goto done; 310 goto done;
349 311
350 ctx = sp->ctx; 312 sp->type = SRB_TM_CMD;
351 ctx->type = SRB_TM_CMD; 313 sp->name = "tmf";
352 ctx->name = "tmf"; 314 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
353 tcf = ctx->u.iocb_cmd; 315
354 tcf->u.tmf.flags = flags; 316 tcf = &sp->u.iocb_cmd;
317 tcf->u.tmf.flags = tm_flags;
355 tcf->u.tmf.lun = lun; 318 tcf->u.tmf.lun = lun;
356 tcf->u.tmf.data = tag; 319 tcf->u.tmf.data = tag;
357 tcf->timeout = qla2x00_async_iocb_timeout; 320 tcf->timeout = qla2x00_async_iocb_timeout;
358 tcf->done = qla2x00_async_tm_cmd_ctx_done; 321 sp->done = qla2x00_async_tm_cmd_done;
359 322
360 rval = qla2x00_start_sp(sp); 323 rval = qla2x00_start_sp(sp);
361 if (rval != QLA_SUCCESS) 324 if (rval != QLA_SUCCESS)
@@ -368,7 +331,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
368 return rval; 331 return rval;
369 332
370done_free_sp: 333done_free_sp:
371 tcf->free(sp); 334 sp->free(fcport->vha, sp);
372done: 335done:
373 return rval; 336 return rval;
374} 337}
@@ -387,6 +350,13 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
387 * requests. 350 * requests.
388 */ 351 */
389 rval = qla2x00_get_port_database(vha, fcport, 0); 352 rval = qla2x00_get_port_database(vha, fcport, 0);
353 if (rval == QLA_NOT_LOGGED_IN) {
354 fcport->flags &= ~FCF_ASYNC_SENT;
355 fcport->flags |= FCF_LOGIN_NEEDED;
356 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
357 break;
358 }
359
390 if (rval != QLA_SUCCESS) { 360 if (rval != QLA_SUCCESS) {
391 qla2x00_post_async_logout_work(vha, fcport, NULL); 361 qla2x00_post_async_logout_work(vha, fcport, NULL);
392 qla2x00_post_async_login_work(vha, fcport, NULL); 362 qla2x00_post_async_login_work(vha, fcport, NULL);
@@ -452,30 +422,6 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
452 return; 422 return;
453} 423}
454 424
455void
456qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
457 struct srb_iocb *iocb)
458{
459 int rval;
460 uint32_t flags;
461 uint16_t lun;
462
463 flags = iocb->u.tmf.flags;
464 lun = (uint16_t)iocb->u.tmf.lun;
465
466 /* Issue Marker IOCB */
467 rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
468 vha->hw->rsp_q_map[0], fcport->loop_id, lun,
469 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
470
471 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
472 ql_dbg(ql_dbg_taskm, vha, 0x8030,
473 "TM IOCB failed (%x).\n", rval);
474 }
475
476 return;
477}
478
479/****************************************************************************/ 425/****************************************************************************/
480/* QLogic ISP2x00 Hardware Support Functions. */ 426/* QLogic ISP2x00 Hardware Support Functions. */
481/****************************************************************************/ 427/****************************************************************************/
@@ -969,6 +915,9 @@ qla81xx_reset_mpi(scsi_qla_host_t *vha)
969{ 915{
970 uint16_t mb[4] = {0x1010, 0, 1, 0}; 916 uint16_t mb[4] = {0x1010, 0, 1, 0};
971 917
918 if (!IS_QLA81XX(vha->hw))
919 return QLA_SUCCESS;
920
972 return qla81xx_write_mpi_register(vha, mb); 921 return qla81xx_write_mpi_register(vha, mb);
973} 922}
974 923
@@ -1262,7 +1211,9 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1262 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 1211 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
1263 sizeof(uint16_t); 1212 sizeof(uint16_t);
1264 } else if (IS_FWI2_CAPABLE(ha)) { 1213 } else if (IS_FWI2_CAPABLE(ha)) {
1265 if (IS_QLA81XX(ha)) 1214 if (IS_QLA83XX(ha))
1215 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
1216 else if (IS_QLA81XX(ha))
1266 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); 1217 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
1267 else if (IS_QLA25XX(ha)) 1218 else if (IS_QLA25XX(ha))
1268 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); 1219 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
@@ -1270,10 +1221,20 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1270 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); 1221 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
1271 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 1222 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
1272 sizeof(uint32_t); 1223 sizeof(uint32_t);
1273 if (ha->mqenable) 1224 if (ha->mqenable) {
1274 mq_size = sizeof(struct qla2xxx_mq_chain); 1225 if (!IS_QLA83XX(ha))
1226 mq_size = sizeof(struct qla2xxx_mq_chain);
1227 /*
1228 * Allocate maximum buffer size for all queues.
1229 * Resizing must be done at end-of-dump processing.
1230 */
1231 mq_size += ha->max_req_queues *
1232 (req->length * sizeof(request_t));
1233 mq_size += ha->max_rsp_queues *
1234 (rsp->length * sizeof(response_t));
1235 }
1275 /* Allocate memory for Fibre Channel Event Buffer. */ 1236 /* Allocate memory for Fibre Channel Event Buffer. */
1276 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 1237 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
1277 goto try_eft; 1238 goto try_eft;
1278 1239
1279 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 1240 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
@@ -1484,17 +1445,8 @@ enable_82xx_npiv:
1484 fw_major_version = ha->fw_major_version; 1445 fw_major_version = ha->fw_major_version;
1485 if (IS_QLA82XX(ha)) 1446 if (IS_QLA82XX(ha))
1486 qla82xx_check_md_needed(vha); 1447 qla82xx_check_md_needed(vha);
1487 else { 1448 else
1488 rval = qla2x00_get_fw_version(vha, 1449 rval = qla2x00_get_fw_version(vha);
1489 &ha->fw_major_version,
1490 &ha->fw_minor_version,
1491 &ha->fw_subminor_version,
1492 &ha->fw_attributes,
1493 &ha->fw_memory_size,
1494 ha->mpi_version,
1495 &ha->mpi_capabilities,
1496 ha->phy_version);
1497 }
1498 if (rval != QLA_SUCCESS) 1450 if (rval != QLA_SUCCESS)
1499 goto failed; 1451 goto failed;
1500 ha->flags.npiv_supported = 0; 1452 ha->flags.npiv_supported = 0;
@@ -1535,6 +1487,9 @@ enable_82xx_npiv:
1535 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1487 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1536 } 1488 }
1537 1489
1490 if (IS_QLA83XX(ha))
1491 goto skip_fac_check;
1492
1538 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { 1493 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1539 uint32_t size; 1494 uint32_t size;
1540 1495
@@ -1547,6 +1502,11 @@ enable_82xx_npiv:
1547 "Unsupported FAC firmware (%d.%02d.%02d).\n", 1502 "Unsupported FAC firmware (%d.%02d.%02d).\n",
1548 ha->fw_major_version, ha->fw_minor_version, 1503 ha->fw_major_version, ha->fw_minor_version,
1549 ha->fw_subminor_version); 1504 ha->fw_subminor_version);
1505skip_fac_check:
1506 if (IS_QLA83XX(ha)) {
1507 ha->flags.fac_supported = 0;
1508 rval = QLA_SUCCESS;
1509 }
1550 } 1510 }
1551 } 1511 }
1552failed: 1512failed:
@@ -1725,7 +1685,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1725 struct req_que *req = ha->req_q_map[0]; 1685 struct req_que *req = ha->req_q_map[0];
1726 struct rsp_que *rsp = ha->rsp_q_map[0]; 1686 struct rsp_que *rsp = ha->rsp_q_map[0];
1727 1687
1728/* Setup ring parameters in initialization control block. */ 1688 /* Setup ring parameters in initialization control block. */
1729 icb = (struct init_cb_24xx *)ha->init_cb; 1689 icb = (struct init_cb_24xx *)ha->init_cb;
1730 icb->request_q_outpointer = __constant_cpu_to_le16(0); 1690 icb->request_q_outpointer = __constant_cpu_to_le16(0);
1731 icb->response_q_inpointer = __constant_cpu_to_le16(0); 1691 icb->response_q_inpointer = __constant_cpu_to_le16(0);
@@ -1736,7 +1696,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1736 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 1696 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1737 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 1697 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1738 1698
1739 if (ha->mqenable) { 1699 if (ha->mqenable || IS_QLA83XX(ha)) {
1740 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); 1700 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1741 icb->rid = __constant_cpu_to_le16(rid); 1701 icb->rid = __constant_cpu_to_le16(rid);
1742 if (ha->flags.msix_enabled) { 1702 if (ha->flags.msix_enabled) {
@@ -1756,7 +1716,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1756 __constant_cpu_to_le32(BIT_18); 1716 __constant_cpu_to_le32(BIT_18);
1757 1717
1758 /* Use Disable MSIX Handshake mode for capable adapters */ 1718 /* Use Disable MSIX Handshake mode for capable adapters */
1759 if (IS_MSIX_NACK_CAPABLE(ha)) { 1719 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
1720 (ha->flags.msix_enabled)) {
1760 icb->firmware_options_2 &= 1721 icb->firmware_options_2 &=
1761 __constant_cpu_to_le32(~BIT_22); 1722 __constant_cpu_to_le32(~BIT_22);
1762 ha->flags.disable_msix_handshake = 1; 1723 ha->flags.disable_msix_handshake = 1;
@@ -1800,7 +1761,6 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1800 struct qla_hw_data *ha = vha->hw; 1761 struct qla_hw_data *ha = vha->hw;
1801 struct req_que *req; 1762 struct req_que *req;
1802 struct rsp_que *rsp; 1763 struct rsp_que *rsp;
1803 struct scsi_qla_host *vp;
1804 struct mid_init_cb_24xx *mid_init_cb = 1764 struct mid_init_cb_24xx *mid_init_cb =
1805 (struct mid_init_cb_24xx *) ha->init_cb; 1765 (struct mid_init_cb_24xx *) ha->init_cb;
1806 1766
@@ -1831,11 +1791,6 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1831 } 1791 }
1832 1792
1833 spin_lock(&ha->vport_slock); 1793 spin_lock(&ha->vport_slock);
1834 /* Clear RSCN queue. */
1835 list_for_each_entry(vp, &ha->vp_list, list) {
1836 vp->rscn_in_ptr = 0;
1837 vp->rscn_out_ptr = 0;
1838 }
1839 1794
1840 spin_unlock(&ha->vport_slock); 1795 spin_unlock(&ha->vport_slock);
1841 1796
@@ -2028,7 +1983,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2028 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); 1983 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
2029 if (rval != QLA_SUCCESS) { 1984 if (rval != QLA_SUCCESS) {
2030 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || 1985 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
2031 IS_QLA8XXX_TYPE(ha) || 1986 IS_CNA_CAPABLE(ha) ||
2032 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 1987 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
2033 ql_dbg(ql_dbg_disc, vha, 0x2008, 1988 ql_dbg(ql_dbg_disc, vha, 0x2008,
2034 "Loop is in a transition state.\n"); 1989 "Loop is in a transition state.\n");
@@ -2120,7 +2075,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
2120 uint16_t index; 2075 uint16_t index;
2121 struct qla_hw_data *ha = vha->hw; 2076 struct qla_hw_data *ha = vha->hw;
2122 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && 2077 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
2123 !IS_QLA8XXX_TYPE(ha); 2078 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
2124 2079
2125 if (memcmp(model, BINZERO, len) != 0) { 2080 if (memcmp(model, BINZERO, len) != 0) {
2126 strncpy(ha->model_number, model, len); 2081 strncpy(ha->model_number, model, len);
@@ -2596,13 +2551,11 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2596 if (ha->current_topology == ISP_CFG_FL && 2551 if (ha->current_topology == ISP_CFG_FL &&
2597 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2552 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2598 2553
2599 vha->flags.rscn_queue_overflow = 1;
2600 set_bit(RSCN_UPDATE, &flags); 2554 set_bit(RSCN_UPDATE, &flags);
2601 2555
2602 } else if (ha->current_topology == ISP_CFG_F && 2556 } else if (ha->current_topology == ISP_CFG_F &&
2603 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2557 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2604 2558
2605 vha->flags.rscn_queue_overflow = 1;
2606 set_bit(RSCN_UPDATE, &flags); 2559 set_bit(RSCN_UPDATE, &flags);
2607 clear_bit(LOCAL_LOOP_UPDATE, &flags); 2560 clear_bit(LOCAL_LOOP_UPDATE, &flags);
2608 2561
@@ -2612,7 +2565,6 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2612 } else if (!vha->flags.online || 2565 } else if (!vha->flags.online ||
2613 (test_bit(ABORT_ISP_ACTIVE, &flags))) { 2566 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
2614 2567
2615 vha->flags.rscn_queue_overflow = 1;
2616 set_bit(RSCN_UPDATE, &flags); 2568 set_bit(RSCN_UPDATE, &flags);
2617 set_bit(LOCAL_LOOP_UPDATE, &flags); 2569 set_bit(LOCAL_LOOP_UPDATE, &flags);
2618 } 2570 }
@@ -2622,8 +2574,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2622 ql_dbg(ql_dbg_disc, vha, 0x2015, 2574 ql_dbg(ql_dbg_disc, vha, 0x2015,
2623 "Loop resync needed, failing.\n"); 2575 "Loop resync needed, failing.\n");
2624 rval = QLA_FUNCTION_FAILED; 2576 rval = QLA_FUNCTION_FAILED;
2625 } 2577 } else
2626 else
2627 rval = qla2x00_configure_local_loop(vha); 2578 rval = qla2x00_configure_local_loop(vha);
2628 } 2579 }
2629 2580
@@ -2662,8 +2613,6 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2662 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2613 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2663 if (test_bit(RSCN_UPDATE, &save_flags)) { 2614 if (test_bit(RSCN_UPDATE, &save_flags)) {
2664 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2615 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2665 if (!IS_ALOGIO_CAPABLE(ha))
2666 vha->flags.rscn_queue_overflow = 1;
2667 } 2616 }
2668 } 2617 }
2669 2618
@@ -2699,7 +2648,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2699 2648
2700 found_devs = 0; 2649 found_devs = 0;
2701 new_fcport = NULL; 2650 new_fcport = NULL;
2702 entries = MAX_FIBRE_DEVICES; 2651 entries = MAX_FIBRE_DEVICES_LOOP;
2703 2652
2704 ql_dbg(ql_dbg_disc, vha, 0x2016, 2653 ql_dbg(ql_dbg_disc, vha, 0x2016,
2705 "Getting FCAL position map.\n"); 2654 "Getting FCAL position map.\n");
@@ -2707,7 +2656,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2707 qla2x00_get_fcal_position_map(vha, NULL); 2656 qla2x00_get_fcal_position_map(vha, NULL);
2708 2657
2709 /* Get list of logged in devices. */ 2658 /* Get list of logged in devices. */
2710 memset(ha->gid_list, 0, GID_LIST_SIZE); 2659 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
2711 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, 2660 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
2712 &entries); 2661 &entries);
2713 if (rval != QLA_SUCCESS) 2662 if (rval != QLA_SUCCESS)
@@ -2971,7 +2920,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2971static int 2920static int
2972qla2x00_configure_fabric(scsi_qla_host_t *vha) 2921qla2x00_configure_fabric(scsi_qla_host_t *vha)
2973{ 2922{
2974 int rval, rval2; 2923 int rval;
2975 fc_port_t *fcport, *fcptemp; 2924 fc_port_t *fcport, *fcptemp;
2976 uint16_t next_loopid; 2925 uint16_t next_loopid;
2977 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2926 uint16_t mb[MAILBOX_REGISTER_COUNT];
@@ -2995,12 +2944,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2995 } 2944 }
2996 vha->device_flags |= SWITCH_FOUND; 2945 vha->device_flags |= SWITCH_FOUND;
2997 2946
2998 /* Mark devices that need re-synchronization. */
2999 rval2 = qla2x00_device_resync(vha);
3000 if (rval2 == QLA_RSCNS_HANDLED) {
3001 /* No point doing the scan, just continue. */
3002 return (QLA_SUCCESS);
3003 }
3004 do { 2947 do {
3005 /* FDMI support. */ 2948 /* FDMI support. */
3006 if (ql2xfdmienable && 2949 if (ql2xfdmienable &&
@@ -3012,8 +2955,12 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3012 loop_id = NPH_SNS; 2955 loop_id = NPH_SNS;
3013 else 2956 else
3014 loop_id = SIMPLE_NAME_SERVER; 2957 loop_id = SIMPLE_NAME_SERVER;
3015 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 2958 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
3016 0xfc, mb, BIT_1 | BIT_0); 2959 0xfc, mb, BIT_1|BIT_0);
2960 if (rval != QLA_SUCCESS) {
2961 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2962 return rval;
2963 }
3017 if (mb[0] != MBS_COMMAND_COMPLETE) { 2964 if (mb[0] != MBS_COMMAND_COMPLETE) {
3018 ql_dbg(ql_dbg_disc, vha, 0x2042, 2965 ql_dbg(ql_dbg_disc, vha, 0x2042,
3019 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x " 2966 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
@@ -3044,6 +2991,13 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3044 } 2991 }
3045 } 2992 }
3046 2993
2994#define QLA_FCPORT_SCAN 1
2995#define QLA_FCPORT_FOUND 2
2996
2997 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2998 fcport->scan_state = QLA_FCPORT_SCAN;
2999 }
3000
3047 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); 3001 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
3048 if (rval != QLA_SUCCESS) 3002 if (rval != QLA_SUCCESS)
3049 break; 3003 break;
@@ -3059,7 +3013,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3059 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 3013 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3060 continue; 3014 continue;
3061 3015
3062 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 3016 if (fcport->scan_state == QLA_FCPORT_SCAN &&
3017 atomic_read(&fcport->state) == FCS_ONLINE) {
3063 qla2x00_mark_device_lost(vha, fcport, 3018 qla2x00_mark_device_lost(vha, fcport,
3064 ql2xplogiabsentdevice, 0); 3019 ql2xplogiabsentdevice, 0);
3065 if (fcport->loop_id != FC_NO_LOOP_ID && 3020 if (fcport->loop_id != FC_NO_LOOP_ID &&
@@ -3184,20 +3139,21 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3184 rval = QLA_SUCCESS; 3139 rval = QLA_SUCCESS;
3185 3140
3186 /* Try GID_PT to get device list, else GAN. */ 3141 /* Try GID_PT to get device list, else GAN. */
3187 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL); 3142 if (!ha->swl)
3143 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
3144 GFP_KERNEL);
3145 swl = ha->swl;
3188 if (!swl) { 3146 if (!swl) {
3189 /*EMPTY*/ 3147 /*EMPTY*/
3190 ql_dbg(ql_dbg_disc, vha, 0x2054, 3148 ql_dbg(ql_dbg_disc, vha, 0x2054,
3191 "GID_PT allocations failed, fallback on GA_NXT.\n"); 3149 "GID_PT allocations failed, fallback on GA_NXT.\n");
3192 } else { 3150 } else {
3151 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
3193 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { 3152 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
3194 kfree(swl);
3195 swl = NULL; 3153 swl = NULL;
3196 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) { 3154 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
3197 kfree(swl);
3198 swl = NULL; 3155 swl = NULL;
3199 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { 3156 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
3200 kfree(swl);
3201 swl = NULL; 3157 swl = NULL;
3202 } else if (ql2xiidmaenable && 3158 } else if (ql2xiidmaenable &&
3203 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) { 3159 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
@@ -3215,7 +3171,6 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3215 if (new_fcport == NULL) { 3171 if (new_fcport == NULL) {
3216 ql_log(ql_log_warn, vha, 0x205e, 3172 ql_log(ql_log_warn, vha, 0x205e,
3217 "Failed to allocate memory for fcport.\n"); 3173 "Failed to allocate memory for fcport.\n");
3218 kfree(swl);
3219 return (QLA_MEMORY_ALLOC_FAILED); 3174 return (QLA_MEMORY_ALLOC_FAILED);
3220 } 3175 }
3221 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 3176 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
@@ -3332,6 +3287,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3332 WWN_SIZE)) 3287 WWN_SIZE))
3333 continue; 3288 continue;
3334 3289
3290 fcport->scan_state = QLA_FCPORT_FOUND;
3291
3335 found++; 3292 found++;
3336 3293
3337 /* Update port state. */ 3294 /* Update port state. */
@@ -3368,6 +3325,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3368 fcport->flags |= FCF_LOGIN_NEEDED; 3325 fcport->flags |= FCF_LOGIN_NEEDED;
3369 if (fcport->loop_id != FC_NO_LOOP_ID && 3326 if (fcport->loop_id != FC_NO_LOOP_ID &&
3370 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 3327 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3328 (fcport->flags & FCF_ASYNC_SENT) == 0 &&
3371 fcport->port_type != FCT_INITIATOR && 3329 fcport->port_type != FCT_INITIATOR &&
3372 fcport->port_type != FCT_BROADCAST) { 3330 fcport->port_type != FCT_BROADCAST) {
3373 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3331 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
@@ -3390,14 +3348,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3390 if (new_fcport == NULL) { 3348 if (new_fcport == NULL) {
3391 ql_log(ql_log_warn, vha, 0x2066, 3349 ql_log(ql_log_warn, vha, 0x2066,
3392 "Memory allocation failed for fcport.\n"); 3350 "Memory allocation failed for fcport.\n");
3393 kfree(swl);
3394 return (QLA_MEMORY_ALLOC_FAILED); 3351 return (QLA_MEMORY_ALLOC_FAILED);
3395 } 3352 }
3396 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 3353 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3397 new_fcport->d_id.b24 = nxt_d_id.b24; 3354 new_fcport->d_id.b24 = nxt_d_id.b24;
3398 } 3355 }
3399 3356
3400 kfree(swl);
3401 kfree(new_fcport); 3357 kfree(new_fcport);
3402 3358
3403 return (rval); 3359 return (rval);
@@ -3470,6 +3426,9 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3470 3426
3471 /* If not in use then it is free to use. */ 3427 /* If not in use then it is free to use. */
3472 if (!found) { 3428 if (!found) {
3429 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
3430 "Assigning new loopid=%x, portid=%x.\n",
3431 dev->loop_id, dev->d_id.b24);
3473 break; 3432 break;
3474 } 3433 }
3475 3434
@@ -3488,110 +3447,6 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3488} 3447}
3489 3448
3490/* 3449/*
3491 * qla2x00_device_resync
3492 * Marks devices in the database that needs resynchronization.
3493 *
3494 * Input:
3495 * ha = adapter block pointer.
3496 *
3497 * Context:
3498 * Kernel context.
3499 */
3500static int
3501qla2x00_device_resync(scsi_qla_host_t *vha)
3502{
3503 int rval;
3504 uint32_t mask;
3505 fc_port_t *fcport;
3506 uint32_t rscn_entry;
3507 uint8_t rscn_out_iter;
3508 uint8_t format;
3509 port_id_t d_id = {};
3510
3511 rval = QLA_RSCNS_HANDLED;
3512
3513 while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
3514 vha->flags.rscn_queue_overflow) {
3515
3516 rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
3517 format = MSB(MSW(rscn_entry));
3518 d_id.b.domain = LSB(MSW(rscn_entry));
3519 d_id.b.area = MSB(LSW(rscn_entry));
3520 d_id.b.al_pa = LSB(LSW(rscn_entry));
3521
3522 ql_dbg(ql_dbg_disc, vha, 0x2020,
3523 "RSCN queue entry[%d] = [%02x/%02x%02x%02x].\n",
3524 vha->rscn_out_ptr, format, d_id.b.domain, d_id.b.area,
3525 d_id.b.al_pa);
3526
3527 vha->rscn_out_ptr++;
3528 if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
3529 vha->rscn_out_ptr = 0;
3530
3531 /* Skip duplicate entries. */
3532 for (rscn_out_iter = vha->rscn_out_ptr;
3533 !vha->flags.rscn_queue_overflow &&
3534 rscn_out_iter != vha->rscn_in_ptr;
3535 rscn_out_iter = (rscn_out_iter ==
3536 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
3537
3538 if (rscn_entry != vha->rscn_queue[rscn_out_iter])
3539 break;
3540
3541 ql_dbg(ql_dbg_disc, vha, 0x2021,
3542 "Skipping duplicate RSCN queue entry found at "
3543 "[%d].\n", rscn_out_iter);
3544
3545 vha->rscn_out_ptr = rscn_out_iter;
3546 }
3547
3548 /* Queue overflow, set switch default case. */
3549 if (vha->flags.rscn_queue_overflow) {
3550 ql_dbg(ql_dbg_disc, vha, 0x2022,
3551 "device_resync: rscn overflow.\n");
3552
3553 format = 3;
3554 vha->flags.rscn_queue_overflow = 0;
3555 }
3556
3557 switch (format) {
3558 case 0:
3559 mask = 0xffffff;
3560 break;
3561 case 1:
3562 mask = 0xffff00;
3563 break;
3564 case 2:
3565 mask = 0xff0000;
3566 break;
3567 default:
3568 mask = 0x0;
3569 d_id.b24 = 0;
3570 vha->rscn_out_ptr = vha->rscn_in_ptr;
3571 break;
3572 }
3573
3574 rval = QLA_SUCCESS;
3575
3576 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3577 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3578 (fcport->d_id.b24 & mask) != d_id.b24 ||
3579 fcport->port_type == FCT_BROADCAST)
3580 continue;
3581
3582 if (atomic_read(&fcport->state) == FCS_ONLINE) {
3583 if (format != 3 ||
3584 fcport->port_type != FCT_INITIATOR) {
3585 qla2x00_mark_device_lost(vha, fcport,
3586 0, 0);
3587 }
3588 }
3589 }
3590 }
3591 return (rval);
3592}
3593
3594/*
3595 * qla2x00_fabric_dev_login 3450 * qla2x00_fabric_dev_login
3596 * Login fabric target device and update FC port database. 3451 * Login fabric target device and update FC port database.
3597 * 3452 *
@@ -3644,6 +3499,9 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3644 } else { 3499 } else {
3645 qla2x00_update_fcport(vha, fcport); 3500 qla2x00_update_fcport(vha, fcport);
3646 } 3501 }
3502 } else {
3503 /* Retry Login. */
3504 qla2x00_mark_device_lost(vha, fcport, 1, 0);
3647 } 3505 }
3648 3506
3649 return (rval); 3507 return (rval);
@@ -3684,9 +3542,12 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3684 fcport->d_id.b.area, fcport->d_id.b.al_pa); 3542 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3685 3543
3686 /* Login fcport on switch. */ 3544 /* Login fcport on switch. */
3687 ha->isp_ops->fabric_login(vha, fcport->loop_id, 3545 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
3688 fcport->d_id.b.domain, fcport->d_id.b.area, 3546 fcport->d_id.b.domain, fcport->d_id.b.area,
3689 fcport->d_id.b.al_pa, mb, BIT_0); 3547 fcport->d_id.b.al_pa, mb, BIT_0);
3548 if (rval != QLA_SUCCESS) {
3549 return rval;
3550 }
3690 if (mb[0] == MBS_PORT_ID_USED) { 3551 if (mb[0] == MBS_PORT_ID_USED) {
3691 /* 3552 /*
3692 * Device has another loop ID. The firmware team 3553 * Device has another loop ID. The firmware team
@@ -4100,15 +3961,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4100 ha->isp_abort_cnt = 0; 3961 ha->isp_abort_cnt = 0;
4101 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3962 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4102 3963
4103 if (IS_QLA81XX(ha)) 3964 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
4104 qla2x00_get_fw_version(vha, 3965 qla2x00_get_fw_version(vha);
4105 &ha->fw_major_version,
4106 &ha->fw_minor_version,
4107 &ha->fw_subminor_version,
4108 &ha->fw_attributes, &ha->fw_memory_size,
4109 ha->mpi_version, &ha->mpi_capabilities,
4110 ha->phy_version);
4111
4112 if (ha->fce) { 3966 if (ha->fce) {
4113 ha->flags.fce_enabled = 1; 3967 ha->flags.fce_enabled = 1;
4114 memset(ha->fce, 0, 3968 memset(ha->fce, 0,
@@ -4974,7 +4828,6 @@ try_blob_fw:
4974 4828
4975 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n"); 4829 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
4976 ha->flags.running_gold_fw = 1; 4830 ha->flags.running_gold_fw = 1;
4977
4978 return rval; 4831 return rval;
4979} 4832}
4980 4833
@@ -5009,6 +4862,7 @@ int
5009qla24xx_configure_vhba(scsi_qla_host_t *vha) 4862qla24xx_configure_vhba(scsi_qla_host_t *vha)
5010{ 4863{
5011 int rval = QLA_SUCCESS; 4864 int rval = QLA_SUCCESS;
4865 int rval2;
5012 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4866 uint16_t mb[MAILBOX_REGISTER_COUNT];
5013 struct qla_hw_data *ha = vha->hw; 4867 struct qla_hw_data *ha = vha->hw;
5014 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4868 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
@@ -5033,12 +4887,18 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
5033 vha->flags.management_server_logged_in = 0; 4887 vha->flags.management_server_logged_in = 0;
5034 4888
5035 /* Login to SNS first */ 4889 /* Login to SNS first */
5036 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1); 4890 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
5037 if (mb[0] != MBS_COMMAND_COMPLETE) { 4891 BIT_1);
5038 ql_dbg(ql_dbg_init, vha, 0x0103, 4892 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
5039 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x " 4893 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
5040 "mb[6]=%x mb[7]=%x.\n", 4894 ql_dbg(ql_dbg_init, vha, 0x0120,
5041 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]); 4895 "Failed SNS login: loop_id=%x, rval2=%d\n",
4896 NPH_SNS, rval2);
4897 else
4898 ql_dbg(ql_dbg_init, vha, 0x0103,
4899 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
4900 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
4901 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
5042 return (QLA_FUNCTION_FAILED); 4902 return (QLA_FUNCTION_FAILED);
5043 } 4903 }
5044 4904
@@ -5214,10 +5074,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5214 nv->reset_delay = 5; 5074 nv->reset_delay = 5;
5215 nv->max_luns_per_target = __constant_cpu_to_le16(128); 5075 nv->max_luns_per_target = __constant_cpu_to_le16(128);
5216 nv->port_down_retry_count = __constant_cpu_to_le16(30); 5076 nv->port_down_retry_count = __constant_cpu_to_le16(30);
5217 nv->link_down_timeout = __constant_cpu_to_le16(30); 5077 nv->link_down_timeout = __constant_cpu_to_le16(180);
5218 nv->enode_mac[0] = 0x00; 5078 nv->enode_mac[0] = 0x00;
5219 nv->enode_mac[1] = 0x02; 5079 nv->enode_mac[1] = 0xC0;
5220 nv->enode_mac[2] = 0x03; 5080 nv->enode_mac[2] = 0xDD;
5221 nv->enode_mac[3] = 0x04; 5081 nv->enode_mac[3] = 0x04;
5222 nv->enode_mac[4] = 0x05; 5082 nv->enode_mac[4] = 0x05;
5223 nv->enode_mac[5] = 0x06 + ha->port_no; 5083 nv->enode_mac[5] = 0x06 + ha->port_no;
@@ -5248,9 +5108,9 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5248 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac)); 5108 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
5249 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */ 5109 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
5250 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) { 5110 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
5251 icb->enode_mac[0] = 0x01; 5111 icb->enode_mac[0] = 0x00;
5252 icb->enode_mac[1] = 0x02; 5112 icb->enode_mac[1] = 0xC0;
5253 icb->enode_mac[2] = 0x03; 5113 icb->enode_mac[2] = 0xDD;
5254 icb->enode_mac[3] = 0x04; 5114 icb->enode_mac[3] = 0x04;
5255 icb->enode_mac[4] = 0x05; 5115 icb->enode_mac[4] = 0x05;
5256 icb->enode_mac[5] = 0x06 + ha->port_no; 5116 icb->enode_mac[5] = 0x06 + ha->port_no;
@@ -5353,6 +5213,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5353 if (ql2xloginretrycount) 5213 if (ql2xloginretrycount)
5354 ha->login_retry_count = ql2xloginretrycount; 5214 ha->login_retry_count = ql2xloginretrycount;
5355 5215
5216 /* if not running MSI-X we need handshaking on interrupts */
5217 if (!vha->hw->flags.msix_enabled && IS_QLA83XX(ha))
5218 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
5219
5356 /* Enable ZIO. */ 5220 /* Enable ZIO. */
5357 if (!vha->flags.init_done) { 5221 if (!vha->flags.init_done) {
5358 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 5222 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &