aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorGiridhar Malavali <giridhar.malavali@qlogic.com>2012-02-09 14:15:36 -0500
committerJames Bottomley <JBottomley@Parallels.com>2012-02-19 09:14:08 -0500
commit9ba56b95a588906a65664a9299a9f8ac1a0f6a91 (patch)
tree93786c52320c2a7276c99cc4d9b3672ca6e0a50d /drivers/scsi
parent69e5f1ea61a3e84c03103c6a18ee9cacef4cbb9e (diff)
[SCSI] qla2xxx: Consolidation of SRB processing.
Rework the structures related to SRB processing to minimize the memory allocations per I/O and manage resources associated with and completions from common routines. Signed-off-by: Giridhar Malavali <giridhar.malavali@qlogic.com> Signed-off-by: Chad Dupuis <chad.dupuis@qlogic.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h44
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c256
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h45
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c158
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c299
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c206
11 files changed, 485 insertions, 628 deletions
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 6887538d1736..b2b664483ab4 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -11,29 +11,36 @@
11#include <linux/delay.h> 11#include <linux/delay.h>
12 12
13/* BSG support for ELS/CT pass through */ 13/* BSG support for ELS/CT pass through */
14inline srb_t * 14void
15qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size) 15qla2x00_bsg_job_done(void *data, void *ptr, int res)
16{ 16{
17 srb_t *sp; 17 srb_t *sp = (srb_t *)ptr;
18 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
19 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
20
21 bsg_job->reply->result = res;
22 bsg_job->job_done(bsg_job);
23 sp->free(vha, sp);
24}
25
26void
27qla2x00_bsg_sp_free(void *data, void *ptr)
28{
29 srb_t *sp = (srb_t *)ptr;
30 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
31 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
18 struct qla_hw_data *ha = vha->hw; 32 struct qla_hw_data *ha = vha->hw;
19 struct srb_ctx *ctx;
20 33
21 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); 34 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
22 if (!sp) 35 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
23 goto done;
24 ctx = kzalloc(size, GFP_KERNEL);
25 if (!ctx) {
26 mempool_free(sp, ha->srb_mempool);
27 sp = NULL;
28 goto done;
29 }
30 36
31 memset(sp, 0, sizeof(*sp)); 37 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
32 sp->fcport = fcport; 38 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
33 sp->ctx = ctx; 39
34 ctx->iocbs = 1; 40 if (sp->type == SRB_CT_CMD ||
35done: 41 sp->type == SRB_ELS_CMD_HST)
36 return sp; 42 kfree(sp->fcport);
43 mempool_free(sp, vha->hw->srb_mempool);
37} 44}
38 45
39int 46int
@@ -217,6 +224,7 @@ exit_fcp_prio_cfg:
217 bsg_job->job_done(bsg_job); 224 bsg_job->job_done(bsg_job);
218 return ret; 225 return ret;
219} 226}
227
220static int 228static int
221qla2x00_process_els(struct fc_bsg_job *bsg_job) 229qla2x00_process_els(struct fc_bsg_job *bsg_job)
222{ 230{
@@ -230,7 +238,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
230 int req_sg_cnt, rsp_sg_cnt; 238 int req_sg_cnt, rsp_sg_cnt;
231 int rval = (DRIVER_ERROR << 16); 239 int rval = (DRIVER_ERROR << 16);
232 uint16_t nextlid = 0; 240 uint16_t nextlid = 0;
233 struct srb_ctx *els;
234 241
235 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { 242 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
236 rport = bsg_job->rport; 243 rport = bsg_job->rport;
@@ -337,20 +344,21 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
337 } 344 }
338 345
339 /* Alloc SRB structure */ 346 /* Alloc SRB structure */
340 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx)); 347 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
341 if (!sp) { 348 if (!sp) {
342 rval = -ENOMEM; 349 rval = -ENOMEM;
343 goto done_unmap_sg; 350 goto done_unmap_sg;
344 } 351 }
345 352
346 els = sp->ctx; 353 sp->type =
347 els->type =
348 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? 354 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
349 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); 355 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
350 els->name = 356 sp->name =
351 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? 357 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
352 "bsg_els_rpt" : "bsg_els_hst"); 358 "bsg_els_rpt" : "bsg_els_hst");
353 els->u.bsg_job = bsg_job; 359 sp->u.bsg_job = bsg_job;
360 sp->free = qla2x00_bsg_sp_free;
361 sp->done = qla2x00_bsg_job_done;
354 362
355 ql_dbg(ql_dbg_user, vha, 0x700a, 363 ql_dbg(ql_dbg_user, vha, 0x700a,
356 "bsg rqst type: %s els type: %x - loop-id=%x " 364 "bsg rqst type: %s els type: %x - loop-id=%x "
@@ -362,7 +370,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
362 if (rval != QLA_SUCCESS) { 370 if (rval != QLA_SUCCESS) {
363 ql_log(ql_log_warn, vha, 0x700e, 371 ql_log(ql_log_warn, vha, 0x700e,
364 "qla2x00_start_sp failed = %d\n", rval); 372 "qla2x00_start_sp failed = %d\n", rval);
365 kfree(sp->ctx);
366 mempool_free(sp, ha->srb_mempool); 373 mempool_free(sp, ha->srb_mempool);
367 rval = -EIO; 374 rval = -EIO;
368 goto done_unmap_sg; 375 goto done_unmap_sg;
@@ -409,7 +416,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
409 uint16_t loop_id; 416 uint16_t loop_id;
410 struct fc_port *fcport; 417 struct fc_port *fcport;
411 char *type = "FC_BSG_HST_CT"; 418 char *type = "FC_BSG_HST_CT";
412 struct srb_ctx *ct;
413 419
414 req_sg_cnt = 420 req_sg_cnt =
415 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 421 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
@@ -486,19 +492,20 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
486 fcport->loop_id = loop_id; 492 fcport->loop_id = loop_id;
487 493
488 /* Alloc SRB structure */ 494 /* Alloc SRB structure */
489 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx)); 495 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
490 if (!sp) { 496 if (!sp) {
491 ql_log(ql_log_warn, vha, 0x7015, 497 ql_log(ql_log_warn, vha, 0x7015,
492 "qla2x00_get_ctx_bsg_sp failed.\n"); 498 "qla2x00_get_sp failed.\n");
493 rval = -ENOMEM; 499 rval = -ENOMEM;
494 goto done_free_fcport; 500 goto done_free_fcport;
495 } 501 }
496 502
497 ct = sp->ctx; 503 sp->type = SRB_CT_CMD;
498 ct->type = SRB_CT_CMD; 504 sp->name = "bsg_ct";
499 ct->name = "bsg_ct"; 505 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
500 ct->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 506 sp->u.bsg_job = bsg_job;
501 ct->u.bsg_job = bsg_job; 507 sp->free = qla2x00_bsg_sp_free;
508 sp->done = qla2x00_bsg_job_done;
502 509
503 ql_dbg(ql_dbg_user, vha, 0x7016, 510 ql_dbg(ql_dbg_user, vha, 0x7016,
504 "bsg rqst type: %s else type: %x - " 511 "bsg rqst type: %s else type: %x - "
@@ -511,7 +518,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
511 if (rval != QLA_SUCCESS) { 518 if (rval != QLA_SUCCESS) {
512 ql_log(ql_log_warn, vha, 0x7017, 519 ql_log(ql_log_warn, vha, 0x7017,
513 "qla2x00_start_sp failed=%d.\n", rval); 520 "qla2x00_start_sp failed=%d.\n", rval);
514 kfree(sp->ctx);
515 mempool_free(sp, ha->srb_mempool); 521 mempool_free(sp, ha->srb_mempool);
516 rval = -EIO; 522 rval = -EIO;
517 goto done_free_fcport; 523 goto done_free_fcport;
@@ -1669,7 +1675,6 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1669 int cnt, que; 1675 int cnt, que;
1670 unsigned long flags; 1676 unsigned long flags;
1671 struct req_que *req; 1677 struct req_que *req;
1672 struct srb_ctx *sp_bsg;
1673 1678
1674 /* find the bsg job from the active list of commands */ 1679 /* find the bsg job from the active list of commands */
1675 spin_lock_irqsave(&ha->hardware_lock, flags); 1680 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1681,11 +1686,9 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1681 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 1686 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1682 sp = req->outstanding_cmds[cnt]; 1687 sp = req->outstanding_cmds[cnt];
1683 if (sp) { 1688 if (sp) {
1684 sp_bsg = sp->ctx; 1689 if (((sp->type == SRB_CT_CMD) ||
1685 1690 (sp->type == SRB_ELS_CMD_HST))
1686 if (((sp_bsg->type == SRB_CT_CMD) || 1691 && (sp->u.bsg_job == bsg_job)) {
1687 (sp_bsg->type == SRB_ELS_CMD_HST))
1688 && (sp_bsg->u.bsg_job == bsg_job)) {
1689 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1692 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1690 if (ha->isp_ops->abort_command(sp)) { 1693 if (ha->isp_ops->abort_command(sp)) {
1691 ql_log(ql_log_warn, vha, 0x7089, 1694 ql_log(ql_log_warn, vha, 0x7089,
@@ -1715,7 +1718,6 @@ done:
1715 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1718 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1716 if (bsg_job->request->msgcode == FC_BSG_HST_CT) 1719 if (bsg_job->request->msgcode == FC_BSG_HST_CT)
1717 kfree(sp->fcport); 1720 kfree(sp->fcport);
1718 kfree(sp->ctx);
1719 mempool_free(sp, ha->srb_mempool); 1721 mempool_free(sp, ha->srb_mempool);
1720 return 0; 1722 return 0;
1721} 1723}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index e4c6b9409933..cd278a09bea0 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -14,10 +14,11 @@
14 * | Module Init and Probe | 0x011f | 0x4b,0xfa | 14 * | Module Init and Probe | 0x011f | 0x4b,0xfa |
15 * | Mailbox commands | 0x1139 | 0x112c-0x112e | 15 * | Mailbox commands | 0x1139 | 0x112c-0x112e |
16 * | Device Discovery | 0x2084 | | 16 * | Device Discovery | 0x2084 | |
17 * | Queue Command and IO tracing | 0x302f | 0x3008 | 17 * | Queue Command and IO tracing | 0x302f | 0x3006,0x3008 |
18 * | | | 0x302d-0x302e | 18 * | | | 0x302d-0x302e |
19 * | DPC Thread | 0x401c | | 19 * | DPC Thread | 0x401c | |
20 * | Async Events | 0x5057 | 0x5052 | 20 * | Async Events | 0x5057 | 0x502b-0x502f |
21 * | | | 0x5047,0x5052 |
21 * | Timer Routines | 0x6011 | 0x600e-0x600f | 22 * | Timer Routines | 0x6011 | 0x600e-0x600f |
22 * | User Space Interactions | 0x709e | 0x7018,0x702e | 23 * | User Space Interactions | 0x709e | 0x7018,0x702e |
23 * | | | 0x7039,0x7045 | 24 * | | | 0x7039,0x7045 |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 7b7d829bef8b..6704ef84c450 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -202,20 +202,12 @@ struct sd_dif_tuple {
202/* 202/*
203 * SCSI Request Block 203 * SCSI Request Block
204 */ 204 */
205typedef struct srb { 205struct srb_cmd {
206 atomic_t ref_count;
207 struct fc_port *fcport;
208 uint32_t handle;
209
210 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ 206 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
211
212 uint16_t flags;
213
214 uint32_t request_sense_length; 207 uint32_t request_sense_length;
215 uint8_t *request_sense_ptr; 208 uint8_t *request_sense_ptr;
216
217 void *ctx; 209 void *ctx;
218} srb_t; 210};
219 211
220/* 212/*
221 * SRB flag definitions 213 * SRB flag definitions
@@ -254,10 +246,7 @@ struct srb_iocb {
254 } u; 246 } u;
255 247
256 struct timer_list timer; 248 struct timer_list timer;
257 249 void (*timeout)(void *);
258 void (*done)(srb_t *);
259 void (*free)(srb_t *);
260 void (*timeout)(srb_t *);
261}; 250};
262 251
263/* Values for srb_ctx type */ 252/* Values for srb_ctx type */
@@ -268,16 +257,37 @@ struct srb_iocb {
268#define SRB_CT_CMD 5 257#define SRB_CT_CMD 5
269#define SRB_ADISC_CMD 6 258#define SRB_ADISC_CMD 6
270#define SRB_TM_CMD 7 259#define SRB_TM_CMD 7
260#define SRB_SCSI_CMD 8
271 261
272struct srb_ctx { 262typedef struct srb {
263 atomic_t ref_count;
264 struct fc_port *fcport;
265 uint32_t handle;
266 uint16_t flags;
273 uint16_t type; 267 uint16_t type;
274 char *name; 268 char *name;
275 int iocbs; 269 int iocbs;
276 union { 270 union {
277 struct srb_iocb *iocb_cmd; 271 struct srb_iocb iocb_cmd;
278 struct fc_bsg_job *bsg_job; 272 struct fc_bsg_job *bsg_job;
273 struct srb_cmd scmd;
279 } u; 274 } u;
280}; 275 void (*done)(void *, void *, int);
276 void (*free)(void *, void *);
277} srb_t;
278
279#define GET_CMD_SP(sp) (sp->u.scmd.cmd)
280#define SET_CMD_SP(sp, cmd) (sp->u.scmd.cmd = cmd)
281#define GET_CMD_CTX_SP(sp) (sp->u.scmd.ctx)
282
283#define GET_CMD_SENSE_LEN(sp) \
284 (sp->u.scmd.request_sense_length)
285#define SET_CMD_SENSE_LEN(sp, len) \
286 (sp->u.scmd.request_sense_length = len)
287#define GET_CMD_SENSE_PTR(sp) \
288 (sp->u.scmd.request_sense_ptr)
289#define SET_CMD_SENSE_PTR(sp, ptr) \
290 (sp->u.scmd.request_sense_ptr = ptr)
281 291
282struct msg_echo_lb { 292struct msg_echo_lb {
283 dma_addr_t send_dma; 293 dma_addr_t send_dma;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 362d49cbcb72..87fee23fd167 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -71,8 +71,6 @@ extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
71 uint16_t *); 71 uint16_t *);
72extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *, 72extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
73 uint16_t *); 73 uint16_t *);
74extern void qla2x00_async_tm_cmd_done(struct scsi_qla_host *, fc_port_t *,
75 struct srb_iocb *);
76extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *); 74extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
77extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *); 75extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
78 76
@@ -156,8 +154,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
156extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *); 154extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
157extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *); 155extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
158 156
159extern void qla2x00_sp_compl(struct qla_hw_data *, srb_t *); 157extern void qla2x00_sp_free_dma(void *, void *);
160
161extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *); 158extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
162 159
163extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); 160extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
@@ -542,6 +539,10 @@ extern int qla82xx_restart_isp(scsi_qla_host_t *);
542 539
543/* IOCB related functions */ 540/* IOCB related functions */
544extern int qla82xx_start_scsi(srb_t *); 541extern int qla82xx_start_scsi(srb_t *);
542extern void qla2x00_sp_free(void *, void *);
543extern void qla2x00_sp_timeout(unsigned long);
544extern void qla2x00_bsg_job_done(void *, void *, int);
545extern void qla2x00_bsg_sp_free(void *, void *);
545 546
546/* Interrupt related */ 547/* Interrupt related */
547extern irqreturn_t qla82xx_intr_handler(int, void *); 548extern irqreturn_t qla82xx_intr_handler(int, void *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index e5b417aa9fca..4837b5872cde 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -41,11 +41,10 @@ static int qla25xx_init_queues(struct qla_hw_data *);
41 41
42/* SRB Extensions ---------------------------------------------------------- */ 42/* SRB Extensions ---------------------------------------------------------- */
43 43
44static void 44void
45qla2x00_ctx_sp_timeout(unsigned long __data) 45qla2x00_sp_timeout(unsigned long __data)
46{ 46{
47 srb_t *sp = (srb_t *)__data; 47 srb_t *sp = (srb_t *)__data;
48 struct srb_ctx *ctx;
49 struct srb_iocb *iocb; 48 struct srb_iocb *iocb;
50 fc_port_t *fcport = sp->fcport; 49 fc_port_t *fcport = sp->fcport;
51 struct qla_hw_data *ha = fcport->vha->hw; 50 struct qla_hw_data *ha = fcport->vha->hw;
@@ -55,79 +54,25 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
55 spin_lock_irqsave(&ha->hardware_lock, flags); 54 spin_lock_irqsave(&ha->hardware_lock, flags);
56 req = ha->req_q_map[0]; 55 req = ha->req_q_map[0];
57 req->outstanding_cmds[sp->handle] = NULL; 56 req->outstanding_cmds[sp->handle] = NULL;
58 ctx = sp->ctx; 57 iocb = &sp->u.iocb_cmd;
59 iocb = ctx->u.iocb_cmd;
60 iocb->timeout(sp); 58 iocb->timeout(sp);
61 iocb->free(sp); 59 sp->free(fcport->vha, sp);
62 spin_unlock_irqrestore(&ha->hardware_lock, flags); 60 spin_unlock_irqrestore(&ha->hardware_lock, flags);
63} 61}
64 62
65static void 63void
66qla2x00_ctx_sp_free(srb_t *sp) 64qla2x00_sp_free(void *data, void *ptr)
67{ 65{
68 struct srb_ctx *ctx = sp->ctx; 66 srb_t *sp = (srb_t *)ptr;
69 struct srb_iocb *iocb = ctx->u.iocb_cmd; 67 struct srb_iocb *iocb = &sp->u.iocb_cmd;
70 struct scsi_qla_host *vha = sp->fcport->vha; 68 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
71 69
72 del_timer(&iocb->timer); 70 del_timer(&iocb->timer);
73 kfree(iocb); 71 mempool_free(sp, vha->hw->srb_mempool);
74 kfree(ctx);
75 mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
76 72
77 QLA_VHA_MARK_NOT_BUSY(vha); 73 QLA_VHA_MARK_NOT_BUSY(vha);
78} 74}
79 75
80inline srb_t *
81qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
82 unsigned long tmo)
83{
84 srb_t *sp = NULL;
85 struct qla_hw_data *ha = vha->hw;
86 struct srb_ctx *ctx;
87 struct srb_iocb *iocb;
88 uint8_t bail;
89
90 QLA_VHA_MARK_BUSY(vha, bail);
91 if (bail)
92 return NULL;
93
94 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
95 if (!sp)
96 goto done;
97 ctx = kzalloc(size, GFP_KERNEL);
98 if (!ctx) {
99 mempool_free(sp, ha->srb_mempool);
100 sp = NULL;
101 goto done;
102 }
103 iocb = kzalloc(sizeof(struct srb_iocb), GFP_KERNEL);
104 if (!iocb) {
105 mempool_free(sp, ha->srb_mempool);
106 sp = NULL;
107 kfree(ctx);
108 goto done;
109 }
110
111 memset(sp, 0, sizeof(*sp));
112 sp->fcport = fcport;
113 sp->ctx = ctx;
114 ctx->iocbs = 1;
115 ctx->u.iocb_cmd = iocb;
116 iocb->free = qla2x00_ctx_sp_free;
117
118 init_timer(&iocb->timer);
119 if (!tmo)
120 goto done;
121 iocb->timer.expires = jiffies + tmo * HZ;
122 iocb->timer.data = (unsigned long)sp;
123 iocb->timer.function = qla2x00_ctx_sp_timeout;
124 add_timer(&iocb->timer);
125done:
126 if (!sp)
127 QLA_VHA_MARK_NOT_BUSY(vha);
128 return sp;
129}
130
131/* Asynchronous Login/Logout Routines -------------------------------------- */ 76/* Asynchronous Login/Logout Routines -------------------------------------- */
132 77
133static inline unsigned long 78static inline unsigned long
@@ -149,19 +94,19 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha)
149} 94}
150 95
151static void 96static void
152qla2x00_async_iocb_timeout(srb_t *sp) 97qla2x00_async_iocb_timeout(void *data)
153{ 98{
99 srb_t *sp = (srb_t *)data;
154 fc_port_t *fcport = sp->fcport; 100 fc_port_t *fcport = sp->fcport;
155 struct srb_ctx *ctx = sp->ctx;
156 101
157 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, 102 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
158 "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n", 103 "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
159 ctx->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, 104 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
160 fcport->d_id.b.al_pa); 105 fcport->d_id.b.al_pa);
161 106
162 fcport->flags &= ~FCF_ASYNC_SENT; 107 fcport->flags &= ~FCF_ASYNC_SENT;
163 if (ctx->type == SRB_LOGIN_CMD) { 108 if (sp->type == SRB_LOGIN_CMD) {
164 struct srb_iocb *lio = ctx->u.iocb_cmd; 109 struct srb_iocb *lio = &sp->u.iocb_cmd;
165 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL); 110 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
166 /* Retry as needed. */ 111 /* Retry as needed. */
167 lio->u.logio.data[0] = MBS_COMMAND_ERROR; 112 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
@@ -173,14 +118,16 @@ qla2x00_async_iocb_timeout(srb_t *sp)
173} 118}
174 119
175static void 120static void
176qla2x00_async_login_ctx_done(srb_t *sp) 121qla2x00_async_login_sp_done(void *data, void *ptr, int res)
177{ 122{
178 struct srb_ctx *ctx = sp->ctx; 123 srb_t *sp = (srb_t *)ptr;
179 struct srb_iocb *lio = ctx->u.iocb_cmd; 124 struct srb_iocb *lio = &sp->u.iocb_cmd;
180 125 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
181 qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport, 126
182 lio->u.logio.data); 127 if (!test_bit(UNLOADING, &vha->dpc_flags))
183 lio->free(sp); 128 qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
129 lio->u.logio.data);
130 sp->free(sp->fcport->vha, sp);
184} 131}
185 132
186int 133int
@@ -188,22 +135,21 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
188 uint16_t *data) 135 uint16_t *data)
189{ 136{
190 srb_t *sp; 137 srb_t *sp;
191 struct srb_ctx *ctx;
192 struct srb_iocb *lio; 138 struct srb_iocb *lio;
193 int rval; 139 int rval;
194 140
195 rval = QLA_FUNCTION_FAILED; 141 rval = QLA_FUNCTION_FAILED;
196 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 142 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
197 qla2x00_get_async_timeout(vha) + 2);
198 if (!sp) 143 if (!sp)
199 goto done; 144 goto done;
200 145
201 ctx = sp->ctx; 146 sp->type = SRB_LOGIN_CMD;
202 ctx->type = SRB_LOGIN_CMD; 147 sp->name = "login";
203 ctx->name = "login"; 148 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
204 lio = ctx->u.iocb_cmd; 149
150 lio = &sp->u.iocb_cmd;
205 lio->timeout = qla2x00_async_iocb_timeout; 151 lio->timeout = qla2x00_async_iocb_timeout;
206 lio->done = qla2x00_async_login_ctx_done; 152 sp->done = qla2x00_async_login_sp_done;
207 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; 153 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
208 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 154 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
209 lio->u.logio.flags |= SRB_LOGIN_RETRIED; 155 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
@@ -219,42 +165,43 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
219 return rval; 165 return rval;
220 166
221done_free_sp: 167done_free_sp:
222 lio->free(sp); 168 sp->free(fcport->vha, sp);
223done: 169done:
224 return rval; 170 return rval;
225} 171}
226 172
227static void 173static void
228qla2x00_async_logout_ctx_done(srb_t *sp) 174qla2x00_async_logout_sp_done(void *data, void *ptr, int res)
229{ 175{
230 struct srb_ctx *ctx = sp->ctx; 176 srb_t *sp = (srb_t *)ptr;
231 struct srb_iocb *lio = ctx->u.iocb_cmd; 177 struct srb_iocb *lio = &sp->u.iocb_cmd;
232 178 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
233 qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport, 179
234 lio->u.logio.data); 180 if (!test_bit(UNLOADING, &vha->dpc_flags))
235 lio->free(sp); 181 qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
182 lio->u.logio.data);
183 sp->free(sp->fcport->vha, sp);
236} 184}
237 185
238int 186int
239qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) 187qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
240{ 188{
241 srb_t *sp; 189 srb_t *sp;
242 struct srb_ctx *ctx;
243 struct srb_iocb *lio; 190 struct srb_iocb *lio;
244 int rval; 191 int rval;
245 192
246 rval = QLA_FUNCTION_FAILED; 193 rval = QLA_FUNCTION_FAILED;
247 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 194 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
248 qla2x00_get_async_timeout(vha) + 2);
249 if (!sp) 195 if (!sp)
250 goto done; 196 goto done;
251 197
252 ctx = sp->ctx; 198 sp->type = SRB_LOGOUT_CMD;
253 ctx->type = SRB_LOGOUT_CMD; 199 sp->name = "logout";
254 ctx->name = "logout"; 200 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
255 lio = ctx->u.iocb_cmd; 201
202 lio = &sp->u.iocb_cmd;
256 lio->timeout = qla2x00_async_iocb_timeout; 203 lio->timeout = qla2x00_async_iocb_timeout;
257 lio->done = qla2x00_async_logout_ctx_done; 204 sp->done = qla2x00_async_logout_sp_done;
258 rval = qla2x00_start_sp(sp); 205 rval = qla2x00_start_sp(sp);
259 if (rval != QLA_SUCCESS) 206 if (rval != QLA_SUCCESS)
260 goto done_free_sp; 207 goto done_free_sp;
@@ -266,20 +213,22 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
266 return rval; 213 return rval;
267 214
268done_free_sp: 215done_free_sp:
269 lio->free(sp); 216 sp->free(fcport->vha, sp);
270done: 217done:
271 return rval; 218 return rval;
272} 219}
273 220
274static void 221static void
275qla2x00_async_adisc_ctx_done(srb_t *sp) 222qla2x00_async_adisc_sp_done(void *data, void *ptr, int res)
276{ 223{
277 struct srb_ctx *ctx = sp->ctx; 224 srb_t *sp = (srb_t *)ptr;
278 struct srb_iocb *lio = ctx->u.iocb_cmd; 225 struct srb_iocb *lio = &sp->u.iocb_cmd;
279 226 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
280 qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport, 227
281 lio->u.logio.data); 228 if (!test_bit(UNLOADING, &vha->dpc_flags))
282 lio->free(sp); 229 qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
230 lio->u.logio.data);
231 sp->free(sp->fcport->vha, sp);
283} 232}
284 233
285int 234int
@@ -287,22 +236,21 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
287 uint16_t *data) 236 uint16_t *data)
288{ 237{
289 srb_t *sp; 238 srb_t *sp;
290 struct srb_ctx *ctx;
291 struct srb_iocb *lio; 239 struct srb_iocb *lio;
292 int rval; 240 int rval;
293 241
294 rval = QLA_FUNCTION_FAILED; 242 rval = QLA_FUNCTION_FAILED;
295 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 243 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
296 qla2x00_get_async_timeout(vha) + 2);
297 if (!sp) 244 if (!sp)
298 goto done; 245 goto done;
299 246
300 ctx = sp->ctx; 247 sp->type = SRB_ADISC_CMD;
301 ctx->type = SRB_ADISC_CMD; 248 sp->name = "adisc";
302 ctx->name = "adisc"; 249 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
303 lio = ctx->u.iocb_cmd; 250
251 lio = &sp->u.iocb_cmd;
304 lio->timeout = qla2x00_async_iocb_timeout; 252 lio->timeout = qla2x00_async_iocb_timeout;
305 lio->done = qla2x00_async_adisc_ctx_done; 253 sp->done = qla2x00_async_adisc_sp_done;
306 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 254 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
307 lio->u.logio.flags |= SRB_LOGIN_RETRIED; 255 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
308 rval = qla2x00_start_sp(sp); 256 rval = qla2x00_start_sp(sp);
@@ -316,46 +264,62 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
316 return rval; 264 return rval;
317 265
318done_free_sp: 266done_free_sp:
319 lio->free(sp); 267 sp->free(fcport->vha, sp);
320done: 268done:
321 return rval; 269 return rval;
322} 270}
323 271
324static void 272static void
325qla2x00_async_tm_cmd_ctx_done(srb_t *sp) 273qla2x00_async_tm_cmd_done(void *data, void *ptr, int res)
326{ 274{
327 struct srb_ctx *ctx = sp->ctx; 275 srb_t *sp = (srb_t *)ptr;
328 struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd; 276 struct srb_iocb *iocb = &sp->u.iocb_cmd;
277 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
278 uint32_t flags;
279 uint16_t lun;
280 int rval;
329 281
330 qla2x00_async_tm_cmd_done(sp->fcport->vha, sp->fcport, iocb); 282 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
331 iocb->free(sp); 283 flags = iocb->u.tmf.flags;
284 lun = (uint16_t)iocb->u.tmf.lun;
285
286 /* Issue Marker IOCB */
287 rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
288 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
289 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
290
291 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
292 ql_dbg(ql_dbg_taskm, vha, 0x8030,
293 "TM IOCB failed (%x).\n", rval);
294 }
295 }
296 sp->free(sp->fcport->vha, sp);
332} 297}
333 298
334int 299int
335qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, 300qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun,
336 uint32_t tag) 301 uint32_t tag)
337{ 302{
338 struct scsi_qla_host *vha = fcport->vha; 303 struct scsi_qla_host *vha = fcport->vha;
339 srb_t *sp; 304 srb_t *sp;
340 struct srb_ctx *ctx;
341 struct srb_iocb *tcf; 305 struct srb_iocb *tcf;
342 int rval; 306 int rval;
343 307
344 rval = QLA_FUNCTION_FAILED; 308 rval = QLA_FUNCTION_FAILED;
345 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 309 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
346 qla2x00_get_async_timeout(vha) + 2);
347 if (!sp) 310 if (!sp)
348 goto done; 311 goto done;
349 312
350 ctx = sp->ctx; 313 sp->type = SRB_TM_CMD;
351 ctx->type = SRB_TM_CMD; 314 sp->name = "tmf";
352 ctx->name = "tmf"; 315 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
353 tcf = ctx->u.iocb_cmd; 316
354 tcf->u.tmf.flags = flags; 317 tcf = &sp->u.iocb_cmd;
318 tcf->u.tmf.flags = tm_flags;
355 tcf->u.tmf.lun = lun; 319 tcf->u.tmf.lun = lun;
356 tcf->u.tmf.data = tag; 320 tcf->u.tmf.data = tag;
357 tcf->timeout = qla2x00_async_iocb_timeout; 321 tcf->timeout = qla2x00_async_iocb_timeout;
358 tcf->done = qla2x00_async_tm_cmd_ctx_done; 322 sp->done = qla2x00_async_tm_cmd_done;
359 323
360 rval = qla2x00_start_sp(sp); 324 rval = qla2x00_start_sp(sp);
361 if (rval != QLA_SUCCESS) 325 if (rval != QLA_SUCCESS)
@@ -368,7 +332,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
368 return rval; 332 return rval;
369 333
370done_free_sp: 334done_free_sp:
371 tcf->free(sp); 335 sp->free(fcport->vha, sp);
372done: 336done:
373 return rval; 337 return rval;
374} 338}
@@ -452,30 +416,6 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
452 return; 416 return;
453} 417}
454 418
455void
456qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
457 struct srb_iocb *iocb)
458{
459 int rval;
460 uint32_t flags;
461 uint16_t lun;
462
463 flags = iocb->u.tmf.flags;
464 lun = (uint16_t)iocb->u.tmf.lun;
465
466 /* Issue Marker IOCB */
467 rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
468 vha->hw->rsp_q_map[0], fcport->loop_id, lun,
469 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
470
471 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
472 ql_dbg(ql_dbg_taskm, vha, 0x8030,
473 "TM IOCB failed (%x).\n", rval);
474 }
475
476 return;
477}
478
479/****************************************************************************/ 419/****************************************************************************/
480/* QLogic ISP2x00 Hardware Support Functions. */ 420/* QLogic ISP2x00 Hardware Support Functions. */
481/****************************************************************************/ 421/****************************************************************************/
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 7cc4f36cd539..3ea0cedc6e77 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -72,16 +72,19 @@ static inline void
72qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp) 72qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
73{ 73{
74 struct dsd_dma *dsd_ptr, *tdsd_ptr; 74 struct dsd_dma *dsd_ptr, *tdsd_ptr;
75 struct crc_context *ctx;
76
77 ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
75 78
76 /* clean up allocated prev pool */ 79 /* clean up allocated prev pool */
77 list_for_each_entry_safe(dsd_ptr, tdsd_ptr, 80 list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
78 &((struct crc_context *)sp->ctx)->dsd_list, list) { 81 &ctx->dsd_list, list) {
79 dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, 82 dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
80 dsd_ptr->dsd_list_dma); 83 dsd_ptr->dsd_list_dma);
81 list_del(&dsd_ptr->list); 84 list_del(&dsd_ptr->list);
82 kfree(dsd_ptr); 85 kfree(dsd_ptr);
83 } 86 }
84 INIT_LIST_HEAD(&((struct crc_context *)sp->ctx)->dsd_list); 87 INIT_LIST_HEAD(&ctx->dsd_list);
85} 88}
86 89
87static inline void 90static inline void
@@ -113,8 +116,7 @@ qla2x00_hba_err_chk_enabled(srb_t *sp)
113 return 0; 116 return 0;
114 * 117 *
115 */ 118 */
116 119 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
117 switch (scsi_get_prot_op(sp->cmd)) {
118 case SCSI_PROT_READ_STRIP: 120 case SCSI_PROT_READ_STRIP:
119 case SCSI_PROT_WRITE_INSERT: 121 case SCSI_PROT_WRITE_INSERT:
120 if (ql2xenablehba_err_chk >= 1) 122 if (ql2xenablehba_err_chk >= 1)
@@ -144,3 +146,38 @@ qla2x00_reset_active(scsi_qla_host_t *vha)
144 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 146 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
145 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 147 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
146} 148}
149
150static inline srb_t *
151qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
152{
153 srb_t *sp = NULL;
154 struct qla_hw_data *ha = vha->hw;
155 uint8_t bail;
156
157 QLA_VHA_MARK_BUSY(vha, bail);
158 if (unlikely(bail))
159 return NULL;
160
161 sp = mempool_alloc(ha->srb_mempool, flag);
162 if (!sp)
163 goto done;
164
165 memset(sp, 0, sizeof(*sp));
166 sp->fcport = fcport;
167 sp->iocbs = 1;
168done:
169 if (!sp)
170 QLA_VHA_MARK_NOT_BUSY(vha);
171 return sp;
172}
173
174static inline void
175qla2x00_init_timer(srb_t *sp, unsigned long tmo)
176{
177 init_timer(&sp->u.iocb_cmd.timer);
178 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
179 sp->u.iocb_cmd.timer.data = (unsigned long)sp;
180 sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
181 add_timer(&sp->u.iocb_cmd.timer);
182 sp->free = qla2x00_sp_free;
183}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a281b5422df1..8ce810373b52 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -22,18 +22,19 @@ static inline uint16_t
22qla2x00_get_cmd_direction(srb_t *sp) 22qla2x00_get_cmd_direction(srb_t *sp)
23{ 23{
24 uint16_t cflags; 24 uint16_t cflags;
25 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25 26
26 cflags = 0; 27 cflags = 0;
27 28
28 /* Set transfer direction */ 29 /* Set transfer direction */
29 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) { 30 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
30 cflags = CF_WRITE; 31 cflags = CF_WRITE;
31 sp->fcport->vha->hw->qla_stats.output_bytes += 32 sp->fcport->vha->hw->qla_stats.output_bytes +=
32 scsi_bufflen(sp->cmd); 33 scsi_bufflen(cmd);
33 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) { 34 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
34 cflags = CF_READ; 35 cflags = CF_READ;
35 sp->fcport->vha->hw->qla_stats.input_bytes += 36 sp->fcport->vha->hw->qla_stats.input_bytes +=
36 scsi_bufflen(sp->cmd); 37 scsi_bufflen(cmd);
37 } 38 }
38 return (cflags); 39 return (cflags);
39} 40}
@@ -143,12 +144,13 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
143static inline int 144static inline int
144qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) 145qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
145{ 146{
146 uint8_t guard = scsi_host_get_guard(sp->cmd->device->host); 147 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
148 uint8_t guard = scsi_host_get_guard(cmd->device->host);
147 149
148 /* We only support T10 DIF right now */ 150 /* We only support T10 DIF right now */
149 if (guard != SHOST_DIX_GUARD_CRC) { 151 if (guard != SHOST_DIX_GUARD_CRC) {
150 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007, 152 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
151 "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd); 153 "Unsupported guard: %d for cmd=%p.\n", guard, cmd);
152 return 0; 154 return 0;
153 } 155 }
154 156
@@ -156,7 +158,7 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
156 *fw_prot_opts = 0; 158 *fw_prot_opts = 0;
157 159
158 /* Translate SCSI opcode to a protection opcode */ 160 /* Translate SCSI opcode to a protection opcode */
159 switch (scsi_get_prot_op(sp->cmd)) { 161 switch (scsi_get_prot_op(cmd)) {
160 case SCSI_PROT_READ_STRIP: 162 case SCSI_PROT_READ_STRIP:
161 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 163 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
162 break; 164 break;
@@ -180,7 +182,7 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
180 break; 182 break;
181 } 183 }
182 184
183 return scsi_prot_sg_count(sp->cmd); 185 return scsi_prot_sg_count(cmd);
184} 186}
185 187
186/* 188/*
@@ -201,7 +203,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
201 struct scatterlist *sg; 203 struct scatterlist *sg;
202 int i; 204 int i;
203 205
204 cmd = sp->cmd; 206 cmd = GET_CMD_SP(sp);
205 207
206 /* Update entry type to indicate Command Type 2 IOCB */ 208 /* Update entry type to indicate Command Type 2 IOCB */
207 *((uint32_t *)(&cmd_pkt->entry_type)) = 209 *((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -259,7 +261,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
259 struct scatterlist *sg; 261 struct scatterlist *sg;
260 int i; 262 int i;
261 263
262 cmd = sp->cmd; 264 cmd = GET_CMD_SP(sp);
263 265
264 /* Update entry type to indicate Command Type 3 IOCB */ 266 /* Update entry type to indicate Command Type 3 IOCB */
265 *((uint32_t *)(&cmd_pkt->entry_type)) = 267 *((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -333,7 +335,7 @@ qla2x00_start_scsi(srb_t *sp)
333 vha = sp->fcport->vha; 335 vha = sp->fcport->vha;
334 ha = vha->hw; 336 ha = vha->hw;
335 reg = &ha->iobase->isp; 337 reg = &ha->iobase->isp;
336 cmd = sp->cmd; 338 cmd = GET_CMD_SP(sp);
337 req = ha->req_q_map[0]; 339 req = ha->req_q_map[0];
338 rsp = ha->rsp_q_map[0]; 340 rsp = ha->rsp_q_map[0];
339 /* So we know we haven't pci_map'ed anything yet */ 341 /* So we know we haven't pci_map'ed anything yet */
@@ -391,7 +393,7 @@ qla2x00_start_scsi(srb_t *sp)
391 req->current_outstanding_cmd = handle; 393 req->current_outstanding_cmd = handle;
392 req->outstanding_cmds[handle] = sp; 394 req->outstanding_cmds[handle] = sp;
393 sp->handle = handle; 395 sp->handle = handle;
394 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 396 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
395 req->cnt -= req_cnt; 397 req->cnt -= req_cnt;
396 398
397 cmd_pkt = (cmd_entry_t *)req->ring_ptr; 399 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
@@ -403,7 +405,7 @@ qla2x00_start_scsi(srb_t *sp)
403 405
404 /* Set target ID and LUN number*/ 406 /* Set target ID and LUN number*/
405 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); 407 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406 cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun); 408 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
407 409
408 /* Update tagged queuing modifier */ 410 /* Update tagged queuing modifier */
409 if (scsi_populate_tag_msg(cmd, tag)) { 411 if (scsi_populate_tag_msg(cmd, tag)) {
@@ -608,7 +610,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
608 struct dsd_dma *dsd_ptr; 610 struct dsd_dma *dsd_ptr;
609 struct ct6_dsd *ctx; 611 struct ct6_dsd *ctx;
610 612
611 cmd = sp->cmd; 613 cmd = GET_CMD_SP(sp);
612 614
613 /* Update entry type to indicate Command Type 3 IOCB */ 615 /* Update entry type to indicate Command Type 3 IOCB */
614 *((uint32_t *)(&cmd_pkt->entry_type)) = 616 *((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -635,7 +637,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
635 } 637 }
636 638
637 cur_seg = scsi_sglist(cmd); 639 cur_seg = scsi_sglist(cmd);
638 ctx = sp->ctx; 640 ctx = GET_CMD_CTX_SP(sp);
639 641
640 while (tot_dsds) { 642 while (tot_dsds) {
641 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? 643 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
@@ -724,7 +726,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
724 int i; 726 int i;
725 struct req_que *req; 727 struct req_que *req;
726 728
727 cmd = sp->cmd; 729 cmd = GET_CMD_SP(sp);
728 730
729 /* Update entry type to indicate Command Type 3 IOCB */ 731 /* Update entry type to indicate Command Type 3 IOCB */
730 *((uint32_t *)(&cmd_pkt->entry_type)) = 732 *((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -744,12 +746,12 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
744 cmd_pkt->task_mgmt_flags = 746 cmd_pkt->task_mgmt_flags =
745 __constant_cpu_to_le16(TMF_WRITE_DATA); 747 __constant_cpu_to_le16(TMF_WRITE_DATA);
746 sp->fcport->vha->hw->qla_stats.output_bytes += 748 sp->fcport->vha->hw->qla_stats.output_bytes +=
747 scsi_bufflen(sp->cmd); 749 scsi_bufflen(cmd);
748 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 750 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
749 cmd_pkt->task_mgmt_flags = 751 cmd_pkt->task_mgmt_flags =
750 __constant_cpu_to_le16(TMF_READ_DATA); 752 __constant_cpu_to_le16(TMF_READ_DATA);
751 sp->fcport->vha->hw->qla_stats.input_bytes += 753 sp->fcport->vha->hw->qla_stats.input_bytes +=
752 scsi_bufflen(sp->cmd); 754 scsi_bufflen(cmd);
753 } 755 }
754 756
755 /* One DSD is available in the Command Type 3 IOCB */ 757 /* One DSD is available in the Command Type 3 IOCB */
@@ -796,7 +798,7 @@ static inline void
796qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, 798qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
797 unsigned int protcnt) 799 unsigned int protcnt)
798{ 800{
799 struct scsi_cmnd *cmd = sp->cmd; 801 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
800 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 802 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
801 803
802 switch (scsi_get_prot_type(cmd)) { 804 switch (scsi_get_prot_type(cmd)) {
@@ -951,16 +953,16 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
951 struct qla2_sgx sgx; 953 struct qla2_sgx sgx;
952 dma_addr_t sle_dma; 954 dma_addr_t sle_dma;
953 uint32_t sle_dma_len, tot_prot_dma_len = 0; 955 uint32_t sle_dma_len, tot_prot_dma_len = 0;
954 struct scsi_cmnd *cmd = sp->cmd; 956 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
955 957
956 prot_int = cmd->device->sector_size; 958 prot_int = cmd->device->sector_size;
957 959
958 memset(&sgx, 0, sizeof(struct qla2_sgx)); 960 memset(&sgx, 0, sizeof(struct qla2_sgx));
959 sgx.tot_bytes = scsi_bufflen(sp->cmd); 961 sgx.tot_bytes = scsi_bufflen(cmd);
960 sgx.cur_sg = scsi_sglist(sp->cmd); 962 sgx.cur_sg = scsi_sglist(cmd);
961 sgx.sp = sp; 963 sgx.sp = sp;
962 964
963 sg_prot = scsi_prot_sglist(sp->cmd); 965 sg_prot = scsi_prot_sglist(cmd);
964 966
965 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { 967 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
966 968
@@ -994,7 +996,7 @@ alloc_and_fill:
994 } 996 }
995 997
996 list_add_tail(&dsd_ptr->list, 998 list_add_tail(&dsd_ptr->list,
997 &((struct crc_context *)sp->ctx)->dsd_list); 999 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
998 1000
999 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1001 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1000 1002
@@ -1043,11 +1045,12 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1043 uint32_t *cur_dsd = dsd; 1045 uint32_t *cur_dsd = dsd;
1044 int i; 1046 int i;
1045 uint16_t used_dsds = tot_dsds; 1047 uint16_t used_dsds = tot_dsds;
1046 scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host); 1048 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1049 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1047 1050
1048 uint8_t *cp; 1051 uint8_t *cp;
1049 1052
1050 scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) { 1053 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1051 dma_addr_t sle_dma; 1054 dma_addr_t sle_dma;
1052 1055
1053 /* Allocate additional continuation packets? */ 1056 /* Allocate additional continuation packets? */
@@ -1077,7 +1080,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1077 } 1080 }
1078 1081
1079 list_add_tail(&dsd_ptr->list, 1082 list_add_tail(&dsd_ptr->list,
1080 &((struct crc_context *)sp->ctx)->dsd_list); 1083 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1081 1084
1082 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1085 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1083 1086
@@ -1090,17 +1093,16 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1090 sle_dma = sg_dma_address(sg); 1093 sle_dma = sg_dma_address(sg);
1091 ql_dbg(ql_dbg_io, vha, 0x300a, 1094 ql_dbg(ql_dbg_io, vha, 0x300a,
1092 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n", 1095 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
1093 i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), 1096 i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), cmd);
1094 sp->cmd);
1095 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 1097 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1096 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 1098 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1097 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 1099 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1098 avail_dsds--; 1100 avail_dsds--;
1099 1101
1100 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { 1102 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1101 cp = page_address(sg_page(sg)) + sg->offset; 1103 cp = page_address(sg_page(sg)) + sg->offset;
1102 ql_dbg(ql_dbg_io, vha, 0x300b, 1104 ql_dbg(ql_dbg_io, vha, 0x300b,
1103 "User data buffer=%p for cmd=%p.\n", cp, sp->cmd); 1105 "User data buffer=%p for cmd=%p.\n", cp, cmd);
1104 } 1106 }
1105 } 1107 }
1106 /* Null termination */ 1108 /* Null termination */
@@ -1127,8 +1129,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1127 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 1129 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1128 uint8_t *cp; 1130 uint8_t *cp;
1129 1131
1130 1132 cmd = GET_CMD_SP(sp);
1131 cmd = sp->cmd;
1132 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) { 1133 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1133 dma_addr_t sle_dma; 1134 dma_addr_t sle_dma;
1134 1135
@@ -1159,7 +1160,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1159 } 1160 }
1160 1161
1161 list_add_tail(&dsd_ptr->list, 1162 list_add_tail(&dsd_ptr->list,
1162 &((struct crc_context *)sp->ctx)->dsd_list); 1163 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1163 1164
1164 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1165 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1165 1166
@@ -1170,7 +1171,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1170 cur_dsd = (uint32_t *)next_dsd; 1171 cur_dsd = (uint32_t *)next_dsd;
1171 } 1172 }
1172 sle_dma = sg_dma_address(sg); 1173 sle_dma = sg_dma_address(sg);
1173 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { 1174 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1174 ql_dbg(ql_dbg_io, vha, 0x3027, 1175 ql_dbg(ql_dbg_io, vha, 0x3027,
1175 "%s(): %p, sg_entry %d - " 1176 "%s(): %p, sg_entry %d - "
1176 "addr=0x%x0x%x, len=%d.\n", 1177 "addr=0x%x0x%x, len=%d.\n",
@@ -1181,7 +1182,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1181 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 1182 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1182 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 1183 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1183 1184
1184 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { 1185 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1185 cp = page_address(sg_page(sg)) + sg->offset; 1186 cp = page_address(sg_page(sg)) + sg->offset;
1186 ql_dbg(ql_dbg_io, vha, 0x3028, 1187 ql_dbg(ql_dbg_io, vha, 0x3028,
1187 "%s(): Protection Data buffer = %p.\n", __func__, 1188 "%s(): Protection Data buffer = %p.\n", __func__,
@@ -1227,7 +1228,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1227 dma_addr_t crc_ctx_dma; 1228 dma_addr_t crc_ctx_dma;
1228 char tag[2]; 1229 char tag[2];
1229 1230
1230 cmd = sp->cmd; 1231 cmd = GET_CMD_SP(sp);
1231 1232
1232 sgc = 0; 1233 sgc = 0;
1233 /* Update entry type to indicate Command Type CRC_2 IOCB */ 1234 /* Update entry type to indicate Command Type CRC_2 IOCB */
@@ -1255,15 +1256,15 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1255 __constant_cpu_to_le16(CF_READ_DATA); 1256 __constant_cpu_to_le16(CF_READ_DATA);
1256 } 1257 }
1257 1258
1258 if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) || 1259 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1259 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) || 1260 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1260 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) || 1261 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1261 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT)) 1262 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1262 bundling = 0; 1263 bundling = 0;
1263 1264
1264 /* Allocate CRC context from global pool */ 1265 /* Allocate CRC context from global pool */
1265 crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool, 1266 crc_ctx_pkt = sp->u.scmd.ctx =
1266 GFP_ATOMIC, &crc_ctx_dma); 1267 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1267 1268
1268 if (!crc_ctx_pkt) 1269 if (!crc_ctx_pkt)
1269 goto crc_queuing_error; 1270 goto crc_queuing_error;
@@ -1309,7 +1310,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1309 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 1310 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1310 fcp_cmnd->additional_cdb_len |= 2; 1311 fcp_cmnd->additional_cdb_len |= 2;
1311 1312
1312 int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); 1313 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1313 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 1314 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1314 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); 1315 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1315 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( 1316 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
@@ -1344,7 +1345,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1344 blk_size = cmd->device->sector_size; 1345 blk_size = cmd->device->sector_size;
1345 dif_bytes = (data_bytes / blk_size) * 8; 1346 dif_bytes = (data_bytes / blk_size) * 8;
1346 1347
1347 switch (scsi_get_prot_op(sp->cmd)) { 1348 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1348 case SCSI_PROT_READ_INSERT: 1349 case SCSI_PROT_READ_INSERT:
1349 case SCSI_PROT_WRITE_STRIP: 1350 case SCSI_PROT_WRITE_STRIP:
1350 total_bytes = data_bytes; 1351 total_bytes = data_bytes;
@@ -1444,7 +1445,7 @@ qla24xx_start_scsi(srb_t *sp)
1444 uint16_t tot_dsds; 1445 uint16_t tot_dsds;
1445 struct req_que *req = NULL; 1446 struct req_que *req = NULL;
1446 struct rsp_que *rsp = NULL; 1447 struct rsp_que *rsp = NULL;
1447 struct scsi_cmnd *cmd = sp->cmd; 1448 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1448 struct scsi_qla_host *vha = sp->fcport->vha; 1449 struct scsi_qla_host *vha = sp->fcport->vha;
1449 struct qla_hw_data *ha = vha->hw; 1450 struct qla_hw_data *ha = vha->hw;
1450 char tag[2]; 1451 char tag[2];
@@ -1509,7 +1510,7 @@ qla24xx_start_scsi(srb_t *sp)
1509 req->current_outstanding_cmd = handle; 1510 req->current_outstanding_cmd = handle;
1510 req->outstanding_cmds[handle] = sp; 1511 req->outstanding_cmds[handle] = sp;
1511 sp->handle = handle; 1512 sp->handle = handle;
1512 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1513 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1513 req->cnt -= req_cnt; 1514 req->cnt -= req_cnt;
1514 1515
1515 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 1516 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
@@ -1528,7 +1529,7 @@ qla24xx_start_scsi(srb_t *sp)
1528 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1529 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1529 cmd_pkt->vp_index = sp->fcport->vp_idx; 1530 cmd_pkt->vp_index = sp->fcport->vp_idx;
1530 1531
1531 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 1532 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1532 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1533 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1533 1534
1534 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */ 1535 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -1610,7 +1611,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
1610 uint16_t fw_prot_opts = 0; 1611 uint16_t fw_prot_opts = 0;
1611 struct req_que *req = NULL; 1612 struct req_que *req = NULL;
1612 struct rsp_que *rsp = NULL; 1613 struct rsp_que *rsp = NULL;
1613 struct scsi_cmnd *cmd = sp->cmd; 1614 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1614 struct scsi_qla_host *vha = sp->fcport->vha; 1615 struct scsi_qla_host *vha = sp->fcport->vha;
1615 struct qla_hw_data *ha = vha->hw; 1616 struct qla_hw_data *ha = vha->hw;
1616 struct cmd_type_crc_2 *cmd_pkt; 1617 struct cmd_type_crc_2 *cmd_pkt;
@@ -1727,7 +1728,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
1727 req->current_outstanding_cmd = handle; 1728 req->current_outstanding_cmd = handle;
1728 req->outstanding_cmds[handle] = sp; 1729 req->outstanding_cmds[handle] = sp;
1729 sp->handle = handle; 1730 sp->handle = handle;
1730 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1731 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1731 req->cnt -= req_cnt; 1732 req->cnt -= req_cnt;
1732 1733
1733 /* Fill-in common area */ 1734 /* Fill-in common area */
@@ -1743,7 +1744,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
1743 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1744 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1744 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1745 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1745 1746
1746 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 1747 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1747 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1748 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1748 1749
1749 /* Total Data and protection segment(s) */ 1750 /* Total Data and protection segment(s) */
@@ -1796,7 +1797,7 @@ queuing_error:
1796 1797
1797static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp) 1798static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1798{ 1799{
1799 struct scsi_cmnd *cmd = sp->cmd; 1800 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1800 struct qla_hw_data *ha = sp->fcport->vha->hw; 1801 struct qla_hw_data *ha = sp->fcport->vha->hw;
1801 int affinity = cmd->request->cpu; 1802 int affinity = cmd->request->cpu;
1802 1803
@@ -1817,7 +1818,6 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1817 uint32_t index, handle; 1818 uint32_t index, handle;
1818 request_t *pkt; 1819 request_t *pkt;
1819 uint16_t cnt, req_cnt; 1820 uint16_t cnt, req_cnt;
1820 struct srb_ctx *ctx;
1821 1821
1822 pkt = NULL; 1822 pkt = NULL;
1823 req_cnt = 1; 1823 req_cnt = 1;
@@ -1847,10 +1847,8 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1847 sp->handle = handle; 1847 sp->handle = handle;
1848 1848
1849 /* Adjust entry-counts as needed. */ 1849 /* Adjust entry-counts as needed. */
1850 if (sp->ctx) { 1850 if (sp->type != SRB_SCSI_CMD)
1851 ctx = sp->ctx; 1851 req_cnt = sp->iocbs;
1852 req_cnt = ctx->iocbs;
1853 }
1854 1852
1855skip_cmd_array: 1853skip_cmd_array:
1856 /* Check for room on request queue. */ 1854 /* Check for room on request queue. */
@@ -1888,8 +1886,7 @@ queuing_error:
1888static void 1886static void
1889qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) 1887qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1890{ 1888{
1891 struct srb_ctx *ctx = sp->ctx; 1889 struct srb_iocb *lio = &sp->u.iocb_cmd;
1892 struct srb_iocb *lio = ctx->u.iocb_cmd;
1893 1890
1894 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1891 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1895 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 1892 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
@@ -1908,8 +1905,7 @@ static void
1908qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) 1905qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1909{ 1906{
1910 struct qla_hw_data *ha = sp->fcport->vha->hw; 1907 struct qla_hw_data *ha = sp->fcport->vha->hw;
1911 struct srb_ctx *ctx = sp->ctx; 1908 struct srb_iocb *lio = &sp->u.iocb_cmd;
1912 struct srb_iocb *lio = ctx->u.iocb_cmd;
1913 uint16_t opts; 1909 uint16_t opts;
1914 1910
1915 mbx->entry_type = MBX_IOCB_TYPE; 1911 mbx->entry_type = MBX_IOCB_TYPE;
@@ -1998,8 +1994,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1998 struct fc_port *fcport = sp->fcport; 1994 struct fc_port *fcport = sp->fcport;
1999 scsi_qla_host_t *vha = fcport->vha; 1995 scsi_qla_host_t *vha = fcport->vha;
2000 struct qla_hw_data *ha = vha->hw; 1996 struct qla_hw_data *ha = vha->hw;
2001 struct srb_ctx *ctx = sp->ctx; 1997 struct srb_iocb *iocb = &sp->u.iocb_cmd;
2002 struct srb_iocb *iocb = ctx->u.iocb_cmd;
2003 struct req_que *req = vha->req; 1998 struct req_que *req = vha->req;
2004 1999
2005 flags = iocb->u.tmf.flags; 2000 flags = iocb->u.tmf.flags;
@@ -2026,7 +2021,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2026static void 2021static void
2027qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 2022qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2028{ 2023{
2029 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job; 2024 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2030 2025
2031 els_iocb->entry_type = ELS_IOCB_TYPE; 2026 els_iocb->entry_type = ELS_IOCB_TYPE;
2032 els_iocb->entry_count = 1; 2027 els_iocb->entry_count = 1;
@@ -2040,7 +2035,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2040 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); 2035 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2041 2036
2042 els_iocb->opcode = 2037 els_iocb->opcode =
2043 (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ? 2038 sp->type == SRB_ELS_CMD_RPT ?
2044 bsg_job->request->rqst_data.r_els.els_code : 2039 bsg_job->request->rqst_data.r_els.els_code :
2045 bsg_job->request->rqst_data.h_els.command_code; 2040 bsg_job->request->rqst_data.h_els.command_code;
2046 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 2041 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -2077,7 +2072,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2077 uint16_t tot_dsds; 2072 uint16_t tot_dsds;
2078 scsi_qla_host_t *vha = sp->fcport->vha; 2073 scsi_qla_host_t *vha = sp->fcport->vha;
2079 struct qla_hw_data *ha = vha->hw; 2074 struct qla_hw_data *ha = vha->hw;
2080 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job; 2075 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2081 int loop_iterartion = 0; 2076 int loop_iterartion = 0;
2082 int cont_iocb_prsnt = 0; 2077 int cont_iocb_prsnt = 0;
2083 int entry_count = 1; 2078 int entry_count = 1;
@@ -2154,7 +2149,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2154 uint16_t tot_dsds; 2149 uint16_t tot_dsds;
2155 scsi_qla_host_t *vha = sp->fcport->vha; 2150 scsi_qla_host_t *vha = sp->fcport->vha;
2156 struct qla_hw_data *ha = vha->hw; 2151 struct qla_hw_data *ha = vha->hw;
2157 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job; 2152 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2158 int loop_iterartion = 0; 2153 int loop_iterartion = 0;
2159 int cont_iocb_prsnt = 0; 2154 int cont_iocb_prsnt = 0;
2160 int entry_count = 1; 2155 int entry_count = 1;
@@ -2244,12 +2239,12 @@ qla82xx_start_scsi(srb_t *sp)
2244 struct qla_hw_data *ha = vha->hw; 2239 struct qla_hw_data *ha = vha->hw;
2245 struct req_que *req = NULL; 2240 struct req_que *req = NULL;
2246 struct rsp_que *rsp = NULL; 2241 struct rsp_que *rsp = NULL;
2247 char tag[2]; 2242 char tag[2];
2248 2243
2249 /* Setup device pointers. */ 2244 /* Setup device pointers. */
2250 ret = 0; 2245 ret = 0;
2251 reg = &ha->iobase->isp82; 2246 reg = &ha->iobase->isp82;
2252 cmd = sp->cmd; 2247 cmd = GET_CMD_SP(sp);
2253 req = vha->req; 2248 req = vha->req;
2254 rsp = ha->rsp_q_map[0]; 2249 rsp = ha->rsp_q_map[0];
2255 2250
@@ -2353,12 +2348,14 @@ sufficient_dsds:
2353 if (req->cnt < (req_cnt + 2)) 2348 if (req->cnt < (req_cnt + 2))
2354 goto queuing_error; 2349 goto queuing_error;
2355 2350
2356 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 2351 ctx = sp->u.scmd.ctx =
2357 if (!sp->ctx) { 2352 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2353 if (!ctx) {
2358 ql_log(ql_log_fatal, vha, 0x3010, 2354 ql_log(ql_log_fatal, vha, 0x3010,
2359 "Failed to allocate ctx for cmd=%p.\n", cmd); 2355 "Failed to allocate ctx for cmd=%p.\n", cmd);
2360 goto queuing_error; 2356 goto queuing_error;
2361 } 2357 }
2358
2362 memset(ctx, 0, sizeof(struct ct6_dsd)); 2359 memset(ctx, 0, sizeof(struct ct6_dsd));
2363 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool, 2360 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2364 GFP_ATOMIC, &ctx->fcp_cmnd_dma); 2361 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
@@ -2409,12 +2406,12 @@ sufficient_dsds:
2409 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) 2406 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2410 goto queuing_error_fcp_cmnd; 2407 goto queuing_error_fcp_cmnd;
2411 2408
2412 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 2409 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2413 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2410 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2414 2411
2415 /* build FCP_CMND IU */ 2412 /* build FCP_CMND IU */
2416 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 2413 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2417 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); 2414 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2418 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; 2415 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2419 2416
2420 if (cmd->sc_data_direction == DMA_TO_DEVICE) 2417 if (cmd->sc_data_direction == DMA_TO_DEVICE)
@@ -2494,9 +2491,9 @@ sufficient_dsds:
2494 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2491 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2495 cmd_pkt->vp_index = sp->fcport->vp_idx; 2492 cmd_pkt->vp_index = sp->fcport->vp_idx;
2496 2493
2497 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 2494 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2498 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, 2495 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2499 sizeof(cmd_pkt->lun)); 2496 sizeof(cmd_pkt->lun));
2500 2497
2501 /* 2498 /*
2502 * Update tagged queuing modifier -- default is TSK_SIMPLE (0). 2499 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
@@ -2537,7 +2534,7 @@ sufficient_dsds:
2537 req->current_outstanding_cmd = handle; 2534 req->current_outstanding_cmd = handle;
2538 req->outstanding_cmds[handle] = sp; 2535 req->outstanding_cmds[handle] = sp;
2539 sp->handle = handle; 2536 sp->handle = handle;
2540 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 2537 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2541 req->cnt -= req_cnt; 2538 req->cnt -= req_cnt;
2542 wmb(); 2539 wmb();
2543 2540
@@ -2583,9 +2580,9 @@ queuing_error:
2583 if (tot_dsds) 2580 if (tot_dsds)
2584 scsi_dma_unmap(cmd); 2581 scsi_dma_unmap(cmd);
2585 2582
2586 if (sp->ctx) { 2583 if (sp->u.scmd.ctx) {
2587 mempool_free(sp->ctx, ha->ctx_mempool); 2584 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2588 sp->ctx = NULL; 2585 sp->u.scmd.ctx = NULL;
2589 } 2586 }
2590 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2587 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2591 2588
@@ -2598,7 +2595,6 @@ qla2x00_start_sp(srb_t *sp)
2598 int rval; 2595 int rval;
2599 struct qla_hw_data *ha = sp->fcport->vha->hw; 2596 struct qla_hw_data *ha = sp->fcport->vha->hw;
2600 void *pkt; 2597 void *pkt;
2601 struct srb_ctx *ctx = sp->ctx;
2602 unsigned long flags; 2598 unsigned long flags;
2603 2599
2604 rval = QLA_FUNCTION_FAILED; 2600 rval = QLA_FUNCTION_FAILED;
@@ -2611,7 +2607,7 @@ qla2x00_start_sp(srb_t *sp)
2611 } 2607 }
2612 2608
2613 rval = QLA_SUCCESS; 2609 rval = QLA_SUCCESS;
2614 switch (ctx->type) { 2610 switch (sp->type) {
2615 case SRB_LOGIN_CMD: 2611 case SRB_LOGIN_CMD:
2616 IS_FWI2_CAPABLE(ha) ? 2612 IS_FWI2_CAPABLE(ha) ?
2617 qla24xx_login_iocb(sp, pkt) : 2613 qla24xx_login_iocb(sp, pkt) :
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 18e7d961aa09..87f2611c3803 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -853,8 +853,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
853 req->outstanding_cmds[index] = NULL; 853 req->outstanding_cmds[index] = NULL;
854 854
855 /* Save ISP completion status */ 855 /* Save ISP completion status */
856 sp->cmd->result = DID_OK << 16; 856 sp->done(ha, sp, DID_OK << 16);
857 qla2x00_sp_compl(ha, sp);
858 } else { 857 } else {
859 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 858 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
860 859
@@ -911,7 +910,6 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
911 fc_port_t *fcport; 910 fc_port_t *fcport;
912 srb_t *sp; 911 srb_t *sp;
913 struct srb_iocb *lio; 912 struct srb_iocb *lio;
914 struct srb_ctx *ctx;
915 uint16_t *data; 913 uint16_t *data;
916 uint16_t status; 914 uint16_t status;
917 915
@@ -919,9 +917,8 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
919 if (!sp) 917 if (!sp)
920 return; 918 return;
921 919
922 ctx = sp->ctx; 920 lio = &sp->u.iocb_cmd;
923 lio = ctx->u.iocb_cmd; 921 type = sp->name;
924 type = ctx->name;
925 fcport = sp->fcport; 922 fcport = sp->fcport;
926 data = lio->u.logio.data; 923 data = lio->u.logio.data;
927 924
@@ -945,7 +942,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
945 } 942 }
946 943
947 status = le16_to_cpu(mbx->status); 944 status = le16_to_cpu(mbx->status);
948 if (status == 0x30 && ctx->type == SRB_LOGIN_CMD && 945 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
949 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 946 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
950 status = 0; 947 status = 0;
951 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 948 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
@@ -956,7 +953,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
956 le16_to_cpu(mbx->mb1)); 953 le16_to_cpu(mbx->mb1));
957 954
958 data[0] = MBS_COMMAND_COMPLETE; 955 data[0] = MBS_COMMAND_COMPLETE;
959 if (ctx->type == SRB_LOGIN_CMD) { 956 if (sp->type == SRB_LOGIN_CMD) {
960 fcport->port_type = FCT_TARGET; 957 fcport->port_type = FCT_TARGET;
961 if (le16_to_cpu(mbx->mb1) & BIT_0) 958 if (le16_to_cpu(mbx->mb1) & BIT_0)
962 fcport->port_type = FCT_INITIATOR; 959 fcport->port_type = FCT_INITIATOR;
@@ -987,7 +984,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
987 le16_to_cpu(mbx->mb7)); 984 le16_to_cpu(mbx->mb7));
988 985
989logio_done: 986logio_done:
990 lio->done(sp); 987 sp->done(vha, sp, 0);
991} 988}
992 989
993static void 990static void
@@ -996,29 +993,18 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
996{ 993{
997 const char func[] = "CT_IOCB"; 994 const char func[] = "CT_IOCB";
998 const char *type; 995 const char *type;
999 struct qla_hw_data *ha = vha->hw;
1000 srb_t *sp; 996 srb_t *sp;
1001 struct srb_ctx *sp_bsg;
1002 struct fc_bsg_job *bsg_job; 997 struct fc_bsg_job *bsg_job;
1003 uint16_t comp_status; 998 uint16_t comp_status;
999 int res;
1004 1000
1005 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1001 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1006 if (!sp) 1002 if (!sp)
1007 return; 1003 return;
1008 1004
1009 sp_bsg = sp->ctx; 1005 bsg_job = sp->u.bsg_job;
1010 bsg_job = sp_bsg->u.bsg_job;
1011 1006
1012 type = NULL; 1007 type = "ct pass-through";
1013 switch (sp_bsg->type) {
1014 case SRB_CT_CMD:
1015 type = "ct pass-through";
1016 break;
1017 default:
1018 ql_log(ql_log_warn, vha, 0x5047,
1019 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
1020 return;
1021 }
1022 1008
1023 comp_status = le16_to_cpu(pkt->comp_status); 1009 comp_status = le16_to_cpu(pkt->comp_status);
1024 1010
@@ -1030,7 +1016,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1030 1016
1031 if (comp_status != CS_COMPLETE) { 1017 if (comp_status != CS_COMPLETE) {
1032 if (comp_status == CS_DATA_UNDERRUN) { 1018 if (comp_status == CS_DATA_UNDERRUN) {
1033 bsg_job->reply->result = DID_OK << 16; 1019 res = DID_OK << 16;
1034 bsg_job->reply->reply_payload_rcv_len = 1020 bsg_job->reply->reply_payload_rcv_len =
1035 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1021 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1036 1022
@@ -1043,30 +1029,19 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1043 ql_log(ql_log_warn, vha, 0x5049, 1029 ql_log(ql_log_warn, vha, 0x5049,
1044 "CT pass-through-%s error " 1030 "CT pass-through-%s error "
1045 "comp_status-status=0x%x.\n", type, comp_status); 1031 "comp_status-status=0x%x.\n", type, comp_status);
1046 bsg_job->reply->result = DID_ERROR << 16; 1032 res = DID_ERROR << 16;
1047 bsg_job->reply->reply_payload_rcv_len = 0; 1033 bsg_job->reply->reply_payload_rcv_len = 0;
1048 } 1034 }
1049 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1035 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1050 (uint8_t *)pkt, sizeof(*pkt)); 1036 (uint8_t *)pkt, sizeof(*pkt));
1051 } else { 1037 } else {
1052 bsg_job->reply->result = DID_OK << 16; 1038 res = DID_OK << 16;
1053 bsg_job->reply->reply_payload_rcv_len = 1039 bsg_job->reply->reply_payload_rcv_len =
1054 bsg_job->reply_payload.payload_len; 1040 bsg_job->reply_payload.payload_len;
1055 bsg_job->reply_len = 0; 1041 bsg_job->reply_len = 0;
1056 } 1042 }
1057 1043
1058 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1044 sp->done(vha, sp, res);
1059 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1060
1061 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1062 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1063
1064 if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD)
1065 kfree(sp->fcport);
1066
1067 kfree(sp->ctx);
1068 mempool_free(sp, ha->srb_mempool);
1069 bsg_job->job_done(bsg_job);
1070} 1045}
1071 1046
1072static void 1047static void
@@ -1075,22 +1050,20 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1075{ 1050{
1076 const char func[] = "ELS_CT_IOCB"; 1051 const char func[] = "ELS_CT_IOCB";
1077 const char *type; 1052 const char *type;
1078 struct qla_hw_data *ha = vha->hw;
1079 srb_t *sp; 1053 srb_t *sp;
1080 struct srb_ctx *sp_bsg;
1081 struct fc_bsg_job *bsg_job; 1054 struct fc_bsg_job *bsg_job;
1082 uint16_t comp_status; 1055 uint16_t comp_status;
1083 uint32_t fw_status[3]; 1056 uint32_t fw_status[3];
1084 uint8_t* fw_sts_ptr; 1057 uint8_t* fw_sts_ptr;
1058 int res;
1085 1059
1086 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1060 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1087 if (!sp) 1061 if (!sp)
1088 return; 1062 return;
1089 sp_bsg = sp->ctx; 1063 bsg_job = sp->u.bsg_job;
1090 bsg_job = sp_bsg->u.bsg_job;
1091 1064
1092 type = NULL; 1065 type = NULL;
1093 switch (sp_bsg->type) { 1066 switch (sp->type) {
1094 case SRB_ELS_CMD_RPT: 1067 case SRB_ELS_CMD_RPT:
1095 case SRB_ELS_CMD_HST: 1068 case SRB_ELS_CMD_HST:
1096 type = "els"; 1069 type = "els";
@@ -1100,7 +1073,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1100 break; 1073 break;
1101 default: 1074 default:
1102 ql_log(ql_log_warn, vha, 0x503e, 1075 ql_log(ql_log_warn, vha, 0x503e,
1103 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type); 1076 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1104 return; 1077 return;
1105 } 1078 }
1106 1079
@@ -1116,9 +1089,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1116 1089
1117 if (comp_status != CS_COMPLETE) { 1090 if (comp_status != CS_COMPLETE) {
1118 if (comp_status == CS_DATA_UNDERRUN) { 1091 if (comp_status == CS_DATA_UNDERRUN) {
1119 bsg_job->reply->result = DID_OK << 16; 1092 res = DID_OK << 16;
1120 bsg_job->reply->reply_payload_rcv_len = 1093 bsg_job->reply->reply_payload_rcv_len =
1121 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count); 1094 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1122 1095
1123 ql_log(ql_log_info, vha, 0x503f, 1096 ql_log(ql_log_info, vha, 0x503f,
1124 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1097 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
@@ -1138,7 +1111,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1138 pkt)->error_subcode_1), 1111 pkt)->error_subcode_1),
1139 le16_to_cpu(((struct els_sts_entry_24xx *) 1112 le16_to_cpu(((struct els_sts_entry_24xx *)
1140 pkt)->error_subcode_2)); 1113 pkt)->error_subcode_2));
1141 bsg_job->reply->result = DID_ERROR << 16; 1114 res = DID_ERROR << 16;
1142 bsg_job->reply->reply_payload_rcv_len = 0; 1115 bsg_job->reply->reply_payload_rcv_len = 0;
1143 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1116 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1144 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1117 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
@@ -1147,23 +1120,12 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1147 (uint8_t *)pkt, sizeof(*pkt)); 1120 (uint8_t *)pkt, sizeof(*pkt));
1148 } 1121 }
1149 else { 1122 else {
1150 bsg_job->reply->result = DID_OK << 16; 1123 res = DID_OK << 16;
1151 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1124 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1152 bsg_job->reply_len = 0; 1125 bsg_job->reply_len = 0;
1153 } 1126 }
1154 1127
1155 dma_unmap_sg(&ha->pdev->dev, 1128 sp->done(vha, sp, res);
1156 bsg_job->request_payload.sg_list,
1157 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1158 dma_unmap_sg(&ha->pdev->dev,
1159 bsg_job->reply_payload.sg_list,
1160 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1161 if ((sp_bsg->type == SRB_ELS_CMD_HST) ||
1162 (sp_bsg->type == SRB_CT_CMD))
1163 kfree(sp->fcport);
1164 kfree(sp->ctx);
1165 mempool_free(sp, ha->srb_mempool);
1166 bsg_job->job_done(bsg_job);
1167} 1129}
1168 1130
1169static void 1131static void
@@ -1175,7 +1137,6 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1175 fc_port_t *fcport; 1137 fc_port_t *fcport;
1176 srb_t *sp; 1138 srb_t *sp;
1177 struct srb_iocb *lio; 1139 struct srb_iocb *lio;
1178 struct srb_ctx *ctx;
1179 uint16_t *data; 1140 uint16_t *data;
1180 uint32_t iop[2]; 1141 uint32_t iop[2];
1181 1142
@@ -1183,9 +1144,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1183 if (!sp) 1144 if (!sp)
1184 return; 1145 return;
1185 1146
1186 ctx = sp->ctx; 1147 lio = &sp->u.iocb_cmd;
1187 lio = ctx->u.iocb_cmd; 1148 type = sp->name;
1188 type = ctx->name;
1189 fcport = sp->fcport; 1149 fcport = sp->fcport;
1190 data = lio->u.logio.data; 1150 data = lio->u.logio.data;
1191 1151
@@ -1213,7 +1173,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1213 le32_to_cpu(logio->io_parameter[0])); 1173 le32_to_cpu(logio->io_parameter[0]));
1214 1174
1215 data[0] = MBS_COMMAND_COMPLETE; 1175 data[0] = MBS_COMMAND_COMPLETE;
1216 if (ctx->type != SRB_LOGIN_CMD) 1176 if (sp->type != SRB_LOGIN_CMD)
1217 goto logio_done; 1177 goto logio_done;
1218 1178
1219 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1179 iop[0] = le32_to_cpu(logio->io_parameter[0]);
@@ -1256,7 +1216,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1256 le32_to_cpu(logio->io_parameter[1])); 1216 le32_to_cpu(logio->io_parameter[1]));
1257 1217
1258logio_done: 1218logio_done:
1259 lio->done(sp); 1219 sp->done(vha, sp, 0);
1260} 1220}
1261 1221
1262static void 1222static void
@@ -1268,7 +1228,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1268 fc_port_t *fcport; 1228 fc_port_t *fcport;
1269 srb_t *sp; 1229 srb_t *sp;
1270 struct srb_iocb *iocb; 1230 struct srb_iocb *iocb;
1271 struct srb_ctx *ctx;
1272 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1231 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1273 int error = 1; 1232 int error = 1;
1274 1233
@@ -1276,9 +1235,8 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1276 if (!sp) 1235 if (!sp)
1277 return; 1236 return;
1278 1237
1279 ctx = sp->ctx; 1238 iocb = &sp->u.iocb_cmd;
1280 iocb = ctx->u.iocb_cmd; 1239 type = sp->name;
1281 type = ctx->name;
1282 fcport = sp->fcport; 1240 fcport = sp->fcport;
1283 1241
1284 if (sts->entry_status) { 1242 if (sts->entry_status) {
@@ -1312,7 +1270,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1312 (uint8_t *)sts, sizeof(*sts)); 1270 (uint8_t *)sts, sizeof(*sts));
1313 } 1271 }
1314 1272
1315 iocb->done(sp); 1273 sp->done(vha, sp, 0);
1316} 1274}
1317 1275
1318/** 1276/**
@@ -1398,25 +1356,32 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
1398 1356
1399static inline void 1357static inline void
1400qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1358qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1401 uint32_t sense_len, struct rsp_que *rsp) 1359 uint32_t sense_len, struct rsp_que *rsp, int res)
1402{ 1360{
1403 struct scsi_qla_host *vha = sp->fcport->vha; 1361 struct scsi_qla_host *vha = sp->fcport->vha;
1404 struct scsi_cmnd *cp = sp->cmd; 1362 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1363 uint32_t track_sense_len;
1405 1364
1406 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1365 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1407 sense_len = SCSI_SENSE_BUFFERSIZE; 1366 sense_len = SCSI_SENSE_BUFFERSIZE;
1408 1367
1409 sp->request_sense_length = sense_len; 1368 SET_CMD_SENSE_LEN(sp, sense_len);
1410 sp->request_sense_ptr = cp->sense_buffer; 1369 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1411 if (sp->request_sense_length > par_sense_len) 1370 track_sense_len = sense_len;
1371
1372 if (sense_len > par_sense_len)
1412 sense_len = par_sense_len; 1373 sense_len = par_sense_len;
1413 1374
1414 memcpy(cp->sense_buffer, sense_data, sense_len); 1375 memcpy(cp->sense_buffer, sense_data, sense_len);
1415 1376
1416 sp->request_sense_ptr += sense_len; 1377 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1417 sp->request_sense_length -= sense_len; 1378 track_sense_len -= sense_len;
1418 if (sp->request_sense_length != 0) 1379 SET_CMD_SENSE_LEN(sp, track_sense_len);
1380
1381 if (track_sense_len != 0) {
1419 rsp->status_srb = sp; 1382 rsp->status_srb = sp;
1383 cp->result = res;
1384 }
1420 1385
1421 if (sense_len) { 1386 if (sense_len) {
1422 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 1387 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
@@ -1444,7 +1409,7 @@ static inline int
1444qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1409qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1445{ 1410{
1446 struct scsi_qla_host *vha = sp->fcport->vha; 1411 struct scsi_qla_host *vha = sp->fcport->vha;
1447 struct scsi_cmnd *cmd = sp->cmd; 1412 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1448 uint8_t *ap = &sts24->data[12]; 1413 uint8_t *ap = &sts24->data[12];
1449 uint8_t *ep = &sts24->data[20]; 1414 uint8_t *ep = &sts24->data[20];
1450 uint32_t e_ref_tag, a_ref_tag; 1415 uint32_t e_ref_tag, a_ref_tag;
@@ -1588,6 +1553,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1588 uint16_t que; 1553 uint16_t que;
1589 struct req_que *req; 1554 struct req_que *req;
1590 int logit = 1; 1555 int logit = 1;
1556 int res = 0;
1591 1557
1592 sts = (sts_entry_t *) pkt; 1558 sts = (sts_entry_t *) pkt;
1593 sts24 = (struct sts_entry_24xx *) pkt; 1559 sts24 = (struct sts_entry_24xx *) pkt;
@@ -1627,7 +1593,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1627 qla2xxx_wake_dpc(vha); 1593 qla2xxx_wake_dpc(vha);
1628 return; 1594 return;
1629 } 1595 }
1630 cp = sp->cmd; 1596 cp = GET_CMD_SP(sp);
1631 if (cp == NULL) { 1597 if (cp == NULL) {
1632 ql_dbg(ql_dbg_io, vha, 0x3018, 1598 ql_dbg(ql_dbg_io, vha, 0x3018,
1633 "Command already returned (0x%x/%p).\n", 1599 "Command already returned (0x%x/%p).\n",
@@ -1680,7 +1646,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1680 "FCP I/O protocol failure (0x%x/0x%x).\n", 1646 "FCP I/O protocol failure (0x%x/0x%x).\n",
1681 rsp_info_len, rsp_info[3]); 1647 rsp_info_len, rsp_info[3]);
1682 1648
1683 cp->result = DID_BUS_BUSY << 16; 1649 res = DID_BUS_BUSY << 16;
1684 goto out; 1650 goto out;
1685 } 1651 }
1686 } 1652 }
@@ -1697,7 +1663,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1697 case CS_COMPLETE: 1663 case CS_COMPLETE:
1698 case CS_QUEUE_FULL: 1664 case CS_QUEUE_FULL:
1699 if (scsi_status == 0) { 1665 if (scsi_status == 0) {
1700 cp->result = DID_OK << 16; 1666 res = DID_OK << 16;
1701 break; 1667 break;
1702 } 1668 }
1703 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 1669 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
@@ -1712,11 +1678,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1712 "detected (0x%x of 0x%x bytes).\n", 1678 "detected (0x%x of 0x%x bytes).\n",
1713 resid, scsi_bufflen(cp)); 1679 resid, scsi_bufflen(cp));
1714 1680
1715 cp->result = DID_ERROR << 16; 1681 res = DID_ERROR << 16;
1716 break; 1682 break;
1717 } 1683 }
1718 } 1684 }
1719 cp->result = DID_OK << 16 | lscsi_status; 1685 res = DID_OK << 16 | lscsi_status;
1720 1686
1721 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1687 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1722 ql_dbg(ql_dbg_io, vha, 0x301b, 1688 ql_dbg(ql_dbg_io, vha, 0x301b,
@@ -1732,7 +1698,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1732 break; 1698 break;
1733 1699
1734 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 1700 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
1735 rsp); 1701 rsp, res);
1736 break; 1702 break;
1737 1703
1738 case CS_DATA_UNDERRUN: 1704 case CS_DATA_UNDERRUN:
@@ -1746,7 +1712,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1746 "(0x%x of 0x%x bytes).\n", 1712 "(0x%x of 0x%x bytes).\n",
1747 resid, scsi_bufflen(cp)); 1713 resid, scsi_bufflen(cp));
1748 1714
1749 cp->result = DID_ERROR << 16 | lscsi_status; 1715 res = DID_ERROR << 16 | lscsi_status;
1750 goto check_scsi_status; 1716 goto check_scsi_status;
1751 } 1717 }
1752 1718
@@ -1758,7 +1724,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1758 "detected (0x%x of 0x%x bytes).\n", 1724 "detected (0x%x of 0x%x bytes).\n",
1759 resid, scsi_bufflen(cp)); 1725 resid, scsi_bufflen(cp));
1760 1726
1761 cp->result = DID_ERROR << 16; 1727 res = DID_ERROR << 16;
1762 break; 1728 break;
1763 } 1729 }
1764 } else { 1730 } else {
@@ -1766,11 +1732,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1766 "Dropped frame(s) detected (0x%x " 1732 "Dropped frame(s) detected (0x%x "
1767 "of 0x%x bytes).\n", resid, scsi_bufflen(cp)); 1733 "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
1768 1734
1769 cp->result = DID_ERROR << 16 | lscsi_status; 1735 res = DID_ERROR << 16 | lscsi_status;
1770 goto check_scsi_status; 1736 goto check_scsi_status;
1771 } 1737 }
1772 1738
1773 cp->result = DID_OK << 16 | lscsi_status; 1739 res = DID_OK << 16 | lscsi_status;
1774 logit = 0; 1740 logit = 0;
1775 1741
1776check_scsi_status: 1742check_scsi_status:
@@ -1793,7 +1759,7 @@ check_scsi_status:
1793 break; 1759 break;
1794 1760
1795 qla2x00_handle_sense(sp, sense_data, par_sense_len, 1761 qla2x00_handle_sense(sp, sense_data, par_sense_len,
1796 sense_len, rsp); 1762 sense_len, rsp, res);
1797 } 1763 }
1798 break; 1764 break;
1799 1765
@@ -1810,7 +1776,7 @@ check_scsi_status:
1810 * while we try to recover so instruct the mid layer 1776 * while we try to recover so instruct the mid layer
1811 * to requeue until the class decides how to handle this. 1777 * to requeue until the class decides how to handle this.
1812 */ 1778 */
1813 cp->result = DID_TRANSPORT_DISRUPTED << 16; 1779 res = DID_TRANSPORT_DISRUPTED << 16;
1814 1780
1815 if (comp_status == CS_TIMEOUT) { 1781 if (comp_status == CS_TIMEOUT) {
1816 if (IS_FWI2_CAPABLE(ha)) 1782 if (IS_FWI2_CAPABLE(ha))
@@ -1829,14 +1795,14 @@ check_scsi_status:
1829 break; 1795 break;
1830 1796
1831 case CS_ABORTED: 1797 case CS_ABORTED:
1832 cp->result = DID_RESET << 16; 1798 res = DID_RESET << 16;
1833 break; 1799 break;
1834 1800
1835 case CS_DIF_ERROR: 1801 case CS_DIF_ERROR:
1836 logit = qla2x00_handle_dif_error(sp, sts24); 1802 logit = qla2x00_handle_dif_error(sp, sts24);
1837 break; 1803 break;
1838 default: 1804 default:
1839 cp->result = DID_ERROR << 16; 1805 res = DID_ERROR << 16;
1840 break; 1806 break;
1841 } 1807 }
1842 1808
@@ -1847,7 +1813,7 @@ out:
1847 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x " 1813 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
1848 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x " 1814 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
1849 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", 1815 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
1850 comp_status, scsi_status, cp->result, vha->host_no, 1816 comp_status, scsi_status, res, vha->host_no,
1851 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 1817 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
1852 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 1818 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
1853 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3], 1819 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
@@ -1856,7 +1822,7 @@ out:
1856 resid_len, fw_resid_len); 1822 resid_len, fw_resid_len);
1857 1823
1858 if (rsp->status_srb == NULL) 1824 if (rsp->status_srb == NULL)
1859 qla2x00_sp_compl(ha, sp); 1825 sp->done(ha, sp, res);
1860} 1826}
1861 1827
1862/** 1828/**
@@ -1869,84 +1835,52 @@ out:
1869static void 1835static void
1870qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 1836qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1871{ 1837{
1872 uint8_t sense_sz = 0; 1838 uint8_t sense_sz = 0;
1873 struct qla_hw_data *ha = rsp->hw; 1839 struct qla_hw_data *ha = rsp->hw;
1874 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 1840 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
1875 srb_t *sp = rsp->status_srb; 1841 srb_t *sp = rsp->status_srb;
1876 struct scsi_cmnd *cp; 1842 struct scsi_cmnd *cp;
1843 uint32_t sense_len;
1844 uint8_t *sense_ptr;
1877 1845
1878 if (sp != NULL && sp->request_sense_length != 0) { 1846 if (!sp || !GET_CMD_SENSE_LEN(sp))
1879 cp = sp->cmd; 1847 return;
1880 if (cp == NULL) {
1881 ql_log(ql_log_warn, vha, 0x3025,
1882 "cmd is NULL: already returned to OS (sp=%p).\n",
1883 sp);
1884 1848
1885 rsp->status_srb = NULL; 1849 sense_len = GET_CMD_SENSE_LEN(sp);
1886 return; 1850 sense_ptr = GET_CMD_SENSE_PTR(sp);
1887 }
1888 1851
1889 if (sp->request_sense_length > sizeof(pkt->data)) { 1852 cp = GET_CMD_SP(sp);
1890 sense_sz = sizeof(pkt->data); 1853 if (cp == NULL) {
1891 } else { 1854 ql_log(ql_log_warn, vha, 0x3025,
1892 sense_sz = sp->request_sense_length; 1855 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
1893 }
1894 1856
1895 /* Move sense data. */ 1857 rsp->status_srb = NULL;
1896 if (IS_FWI2_CAPABLE(ha)) 1858 return;
1897 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1898 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1899 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
1900 sp->request_sense_ptr, sense_sz);
1901
1902 sp->request_sense_ptr += sense_sz;
1903 sp->request_sense_length -= sense_sz;
1904
1905 /* Place command on done queue. */
1906 if (sp->request_sense_length == 0) {
1907 rsp->status_srb = NULL;
1908 qla2x00_sp_compl(ha, sp);
1909 }
1910 } 1859 }
1911}
1912 1860
1913static int 1861 if (sense_len > sizeof(pkt->data))
1914qla2x00_free_sp_ctx(scsi_qla_host_t *vha, srb_t *sp) 1862 sense_sz = sizeof(pkt->data);
1915{ 1863 else
1916 struct qla_hw_data *ha = vha->hw; 1864 sense_sz = sense_len;
1917 struct srb_ctx *ctx;
1918 1865
1919 if (!sp->ctx) 1866 /* Move sense data. */
1920 return 1; 1867 if (IS_FWI2_CAPABLE(ha))
1868 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1869 memcpy(sense_ptr, pkt->data, sense_sz);
1870 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
1871 sense_ptr, sense_sz);
1921 1872
1922 ctx = sp->ctx; 1873 sense_len -= sense_sz;
1874 sense_ptr += sense_sz;
1923 1875
1924 if (ctx->type == SRB_LOGIN_CMD || 1876 SET_CMD_SENSE_PTR(sp, sense_ptr);
1925 ctx->type == SRB_LOGOUT_CMD || 1877 SET_CMD_SENSE_LEN(sp, sense_len);
1926 ctx->type == SRB_TM_CMD) { 1878
1927 ctx->u.iocb_cmd->done(sp); 1879 /* Place command on done queue. */
1928 return 0; 1880 if (sense_len == 0) {
1929 } else if (ctx->type == SRB_ADISC_CMD) { 1881 rsp->status_srb = NULL;
1930 ctx->u.iocb_cmd->free(sp); 1882 sp->done(ha, sp, cp->result);
1931 return 0;
1932 } else {
1933 struct fc_bsg_job *bsg_job;
1934
1935 bsg_job = ctx->u.bsg_job;
1936 if (ctx->type == SRB_ELS_CMD_HST ||
1937 ctx->type == SRB_CT_CMD)
1938 kfree(sp->fcport);
1939
1940 bsg_job->reply->reply_data.ctels_reply.status =
1941 FC_CTELS_STATUS_OK;
1942 bsg_job->reply->result = DID_ERROR << 16;
1943 bsg_job->reply->reply_payload_rcv_len = 0;
1944 kfree(sp->ctx);
1945 mempool_free(sp, ha->srb_mempool);
1946 bsg_job->job_done(bsg_job);
1947 return 0;
1948 } 1883 }
1949 return 1;
1950} 1884}
1951 1885
1952/** 1886/**
@@ -1962,43 +1896,18 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1962 const char func[] = "ERROR-IOCB"; 1896 const char func[] = "ERROR-IOCB";
1963 uint16_t que = MSW(pkt->handle); 1897 uint16_t que = MSW(pkt->handle);
1964 struct req_que *req = ha->req_q_map[que]; 1898 struct req_que *req = ha->req_q_map[que];
1899 int res = DID_ERROR << 16;
1965 1900
1966 if (pkt->entry_status & RF_INV_E_ORDER) 1901 ql_dbg(ql_dbg_async, vha, 0x502a,
1967 ql_dbg(ql_dbg_async, vha, 0x502a, 1902 "type of error status in response: 0x%x\n", pkt->entry_status);
1968 "Invalid Entry Order.\n"); 1903
1969 else if (pkt->entry_status & RF_INV_E_COUNT) 1904 if (pkt->entry_status & RF_BUSY)
1970 ql_dbg(ql_dbg_async, vha, 0x502b, 1905 res = DID_BUS_BUSY << 16;
1971 "Invalid Entry Count.\n");
1972 else if (pkt->entry_status & RF_INV_E_PARAM)
1973 ql_dbg(ql_dbg_async, vha, 0x502c,
1974 "Invalid Entry Parameter.\n");
1975 else if (pkt->entry_status & RF_INV_E_TYPE)
1976 ql_dbg(ql_dbg_async, vha, 0x502d,
1977 "Invalid Entry Type.\n");
1978 else if (pkt->entry_status & RF_BUSY)
1979 ql_dbg(ql_dbg_async, vha, 0x502e,
1980 "Busy.\n");
1981 else
1982 ql_dbg(ql_dbg_async, vha, 0x502f,
1983 "UNKNOWN flag error.\n");
1984 1906
1985 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1907 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1986 if (sp) { 1908 if (sp)
1987 if (qla2x00_free_sp_ctx(vha, sp)) { 1909 sp->done(ha, sp, res);
1988 if (pkt->entry_status & 1910 else {
1989 (RF_INV_E_ORDER | RF_INV_E_COUNT |
1990 RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1991 sp->cmd->result = DID_ERROR << 16;
1992 } else if (pkt->entry_status & RF_BUSY) {
1993 sp->cmd->result = DID_BUS_BUSY << 16;
1994 } else {
1995 sp->cmd->result = DID_ERROR << 16;
1996 }
1997 qla2x00_sp_compl(ha, sp);
1998 }
1999 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
2000 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
2001 || pkt->entry_type == COMMAND_TYPE_6) {
2002 ql_log(ql_log_warn, vha, 0x5030, 1911 ql_log(ql_log_warn, vha, 0x5030,
2003 "Error entry - invalid handle.\n"); 1912 "Error entry - invalid handle.\n");
2004 1913
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index be520a9d0b71..50ec272b61db 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -874,6 +874,7 @@ qla2x00_abort_command(srb_t *sp)
874 scsi_qla_host_t *vha = fcport->vha; 874 scsi_qla_host_t *vha = fcport->vha;
875 struct qla_hw_data *ha = vha->hw; 875 struct qla_hw_data *ha = vha->hw;
876 struct req_que *req = vha->req; 876 struct req_que *req = vha->req;
877 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
877 878
878 ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__); 879 ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
879 880
@@ -896,7 +897,7 @@ qla2x00_abort_command(srb_t *sp)
896 mcp->mb[1] = fcport->loop_id << 8; 897 mcp->mb[1] = fcport->loop_id << 8;
897 mcp->mb[2] = (uint16_t)handle; 898 mcp->mb[2] = (uint16_t)handle;
898 mcp->mb[3] = (uint16_t)(handle >> 16); 899 mcp->mb[3] = (uint16_t)(handle >> 16);
899 mcp->mb[6] = (uint16_t)sp->cmd->device->lun; 900 mcp->mb[6] = (uint16_t)cmd->device->lun;
900 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 901 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
901 mcp->in_mb = MBX_0; 902 mcp->in_mb = MBX_0;
902 mcp->tov = MBX_TOV_SECONDS; 903 mcp->tov = MBX_TOV_SECONDS;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 34fb91904fd9..0a2f2d578803 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -3608,7 +3608,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3608 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 3608 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3609 sp = req->outstanding_cmds[cnt]; 3609 sp = req->outstanding_cmds[cnt];
3610 if (sp) { 3610 if (sp) {
3611 if (!sp->ctx || 3611 if (!sp->u.scmd.ctx ||
3612 (sp->flags & SRB_FCP_CMND_DMA_VALID)) { 3612 (sp->flags & SRB_FCP_CMND_DMA_VALID)) {
3613 spin_unlock_irqrestore( 3613 spin_unlock_irqrestore(
3614 &ha->hardware_lock, flags); 3614 &ha->hardware_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 79c752eef991..a8de6a3de499 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -304,7 +304,6 @@ static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
304 struct req_que **, struct rsp_que **); 304 struct req_que **, struct rsp_que **);
305static void qla2x00_free_fw_dump(struct qla_hw_data *); 305static void qla2x00_free_fw_dump(struct qla_hw_data *);
306static void qla2x00_mem_free(struct qla_hw_data *); 306static void qla2x00_mem_free(struct qla_hw_data *);
307static void qla2x00_sp_free_dma(srb_t *);
308 307
309/* -------------------------------------------------------------------------- */ 308/* -------------------------------------------------------------------------- */
310static int qla2x00_alloc_queues(struct qla_hw_data *ha) 309static int qla2x00_alloc_queues(struct qla_hw_data *ha)
@@ -559,28 +558,75 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
559 return str; 558 return str;
560} 559}
561 560
562static inline srb_t * 561void
563qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport, 562qla2x00_sp_free_dma(void *vha, void *ptr)
564 struct scsi_cmnd *cmd)
565{ 563{
566 srb_t *sp; 564 srb_t *sp = (srb_t *)ptr;
567 struct qla_hw_data *ha = vha->hw; 565 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
566 struct qla_hw_data *ha = sp->fcport->vha->hw;
567 void *ctx = GET_CMD_CTX_SP(sp);
568 568
569 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 569 if (sp->flags & SRB_DMA_VALID) {
570 if (!sp) { 570 scsi_dma_unmap(cmd);
571 ql_log(ql_log_warn, vha, 0x3006, 571 sp->flags &= ~SRB_DMA_VALID;
572 "Memory allocation failed for sp.\n");
573 return sp;
574 } 572 }
575 573
576 atomic_set(&sp->ref_count, 1); 574 if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
577 sp->fcport = fcport; 575 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
578 sp->cmd = cmd; 576 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
579 sp->flags = 0; 577 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
580 CMD_SP(cmd) = (void *)sp; 578 }
581 sp->ctx = NULL; 579
580 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
581 /* List assured to be having elements */
582 qla2x00_clean_dsd_pool(ha, sp);
583 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
584 }
585
586 if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
587 dma_pool_free(ha->dl_dma_pool, ctx,
588 ((struct crc_context *)ctx)->crc_ctx_dma);
589 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
590 }
591
592 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
593 struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
594
595 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
596 ctx1->fcp_cmnd_dma);
597 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
598 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
599 ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
600 mempool_free(ctx1, ha->ctx_mempool);
601 ctx1 = NULL;
602 }
603
604 CMD_SP(cmd) = NULL;
605 mempool_free(sp, ha->srb_mempool);
606}
607
608static void
609qla2x00_sp_compl(void *data, void *ptr, int res)
610{
611 struct qla_hw_data *ha = (struct qla_hw_data *)data;
612 srb_t *sp = (srb_t *)ptr;
613 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
614
615 cmd->result = res;
616
617 if (atomic_read(&sp->ref_count) == 0) {
618 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
619 "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
620 sp, GET_CMD_SP(sp));
621 if (ql2xextended_error_logging & ql_dbg_io)
622 BUG();
623 return;
624 }
625 if (!atomic_dec_and_test(&sp->ref_count))
626 return;
582 627
583 return sp; 628 qla2x00_sp_free_dma(ha, sp);
629 cmd->scsi_done(cmd);
584} 630}
585 631
586static int 632static int
@@ -644,10 +690,17 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
644 goto qc24_target_busy; 690 goto qc24_target_busy;
645 } 691 }
646 692
647 sp = qla2x00_get_new_sp(base_vha, fcport, cmd); 693 sp = qla2x00_get_sp(base_vha, fcport, GFP_ATOMIC);
648 if (!sp) 694 if (!sp)
649 goto qc24_host_busy; 695 goto qc24_host_busy;
650 696
697 sp->u.scmd.cmd = cmd;
698 sp->type = SRB_SCSI_CMD;
699 atomic_set(&sp->ref_count, 1);
700 CMD_SP(cmd) = (void *)sp;
701 sp->free = qla2x00_sp_free_dma;
702 sp->done = qla2x00_sp_compl;
703
651 rval = ha->isp_ops->start_scsi(sp); 704 rval = ha->isp_ops->start_scsi(sp);
652 if (rval != QLA_SUCCESS) { 705 if (rval != QLA_SUCCESS) {
653 ql_dbg(ql_dbg_io, vha, 0x3013, 706 ql_dbg(ql_dbg_io, vha, 0x3013,
@@ -658,8 +711,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
658 return 0; 711 return 0;
659 712
660qc24_host_busy_free_sp: 713qc24_host_busy_free_sp:
661 qla2x00_sp_free_dma(sp); 714 qla2x00_sp_free_dma(ha, sp);
662 mempool_free(sp, ha->srb_mempool);
663 715
664qc24_host_busy: 716qc24_host_busy:
665 return SCSI_MLQUEUE_HOST_BUSY; 717 return SCSI_MLQUEUE_HOST_BUSY;
@@ -893,7 +945,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
893 } 945 }
894 946
895 spin_lock_irqsave(&ha->hardware_lock, flags); 947 spin_lock_irqsave(&ha->hardware_lock, flags);
896 qla2x00_sp_compl(ha, sp); 948 sp->done(ha, sp, 0);
897 spin_unlock_irqrestore(&ha->hardware_lock, flags); 949 spin_unlock_irqrestore(&ha->hardware_lock, flags);
898 950
899 /* Did the command return during mailbox execution? */ 951 /* Did the command return during mailbox execution? */
@@ -925,6 +977,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
925 struct qla_hw_data *ha = vha->hw; 977 struct qla_hw_data *ha = vha->hw;
926 struct req_que *req; 978 struct req_que *req;
927 srb_t *sp; 979 srb_t *sp;
980 struct scsi_cmnd *cmd;
928 981
929 status = QLA_SUCCESS; 982 status = QLA_SUCCESS;
930 983
@@ -935,28 +988,29 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
935 sp = req->outstanding_cmds[cnt]; 988 sp = req->outstanding_cmds[cnt];
936 if (!sp) 989 if (!sp)
937 continue; 990 continue;
938 if ((sp->ctx) && !IS_PROT_IO(sp)) 991 if (sp->type != SRB_SCSI_CMD)
939 continue; 992 continue;
940 if (vha->vp_idx != sp->fcport->vha->vp_idx) 993 if (vha->vp_idx != sp->fcport->vha->vp_idx)
941 continue; 994 continue;
942 match = 0; 995 match = 0;
996 cmd = GET_CMD_SP(sp);
943 switch (type) { 997 switch (type) {
944 case WAIT_HOST: 998 case WAIT_HOST:
945 match = 1; 999 match = 1;
946 break; 1000 break;
947 case WAIT_TARGET: 1001 case WAIT_TARGET:
948 match = sp->cmd->device->id == t; 1002 match = cmd->device->id == t;
949 break; 1003 break;
950 case WAIT_LUN: 1004 case WAIT_LUN:
951 match = (sp->cmd->device->id == t && 1005 match = (cmd->device->id == t &&
952 sp->cmd->device->lun == l); 1006 cmd->device->lun == l);
953 break; 1007 break;
954 } 1008 }
955 if (!match) 1009 if (!match)
956 continue; 1010 continue;
957 1011
958 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1012 spin_unlock_irqrestore(&ha->hardware_lock, flags);
959 status = qla2x00_eh_wait_on_command(sp->cmd); 1013 status = qla2x00_eh_wait_on_command(cmd);
960 spin_lock_irqsave(&ha->hardware_lock, flags); 1014 spin_lock_irqsave(&ha->hardware_lock, flags);
961 } 1015 }
962 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1016 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1249,7 +1303,6 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1249 int que, cnt; 1303 int que, cnt;
1250 unsigned long flags; 1304 unsigned long flags;
1251 srb_t *sp; 1305 srb_t *sp;
1252 struct srb_ctx *ctx;
1253 struct qla_hw_data *ha = vha->hw; 1306 struct qla_hw_data *ha = vha->hw;
1254 struct req_que *req; 1307 struct req_que *req;
1255 1308
@@ -1262,31 +1315,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1262 sp = req->outstanding_cmds[cnt]; 1315 sp = req->outstanding_cmds[cnt];
1263 if (sp) { 1316 if (sp) {
1264 req->outstanding_cmds[cnt] = NULL; 1317 req->outstanding_cmds[cnt] = NULL;
1265 if (!sp->ctx || 1318 sp->done(vha, sp, res);
1266 (sp->flags & SRB_FCP_CMND_DMA_VALID) ||
1267 IS_PROT_IO(sp)) {
1268 sp->cmd->result = res;
1269 qla2x00_sp_compl(ha, sp);
1270 } else {
1271 ctx = sp->ctx;
1272 if (ctx->type == SRB_ELS_CMD_RPT ||
1273 ctx->type == SRB_ELS_CMD_HST ||
1274 ctx->type == SRB_CT_CMD) {
1275 struct fc_bsg_job *bsg_job =
1276 ctx->u.bsg_job;
1277 if (bsg_job->request->msgcode
1278 == FC_BSG_HST_CT)
1279 kfree(sp->fcport);
1280 bsg_job->req->errors = 0;
1281 bsg_job->reply->result = res;
1282 bsg_job->job_done(bsg_job);
1283 kfree(sp->ctx);
1284 mempool_free(sp,
1285 ha->srb_mempool);
1286 } else {
1287 ctx->u.iocb_cmd->free(sp);
1288 }
1289 }
1290 } 1319 }
1291 } 1320 }
1292 } 1321 }
@@ -3820,75 +3849,6 @@ qla2x00_rst_aen(scsi_qla_host_t *vha)
3820 } 3849 }
3821} 3850}
3822 3851
3823static void
3824qla2x00_sp_free_dma(srb_t *sp)
3825{
3826 struct scsi_cmnd *cmd = sp->cmd;
3827 struct qla_hw_data *ha = sp->fcport->vha->hw;
3828
3829 if (sp->flags & SRB_DMA_VALID) {
3830 scsi_dma_unmap(cmd);
3831 sp->flags &= ~SRB_DMA_VALID;
3832 }
3833
3834 if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
3835 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
3836 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
3837 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
3838 }
3839
3840 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
3841 /* List assured to be having elements */
3842 qla2x00_clean_dsd_pool(ha, sp);
3843 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
3844 }
3845
3846 if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
3847 dma_pool_free(ha->dl_dma_pool, sp->ctx,
3848 ((struct crc_context *)sp->ctx)->crc_ctx_dma);
3849 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
3850 }
3851
3852 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
3853 struct ct6_dsd *ctx = sp->ctx;
3854 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
3855 ctx->fcp_cmnd_dma);
3856 list_splice(&ctx->dsd_list, &ha->gbl_dsd_list);
3857 ha->gbl_dsd_inuse -= ctx->dsd_use_cnt;
3858 ha->gbl_dsd_avail += ctx->dsd_use_cnt;
3859 mempool_free(sp->ctx, ha->ctx_mempool);
3860 sp->ctx = NULL;
3861 }
3862
3863 CMD_SP(cmd) = NULL;
3864}
3865
3866static void
3867qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
3868{
3869 struct scsi_cmnd *cmd = sp->cmd;
3870
3871 qla2x00_sp_free_dma(sp);
3872 mempool_free(sp, ha->srb_mempool);
3873 cmd->scsi_done(cmd);
3874}
3875
3876void
3877qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
3878{
3879 if (atomic_read(&sp->ref_count) == 0) {
3880 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
3881 "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
3882 sp, sp->cmd);
3883 if (ql2xextended_error_logging & ql_dbg_io)
3884 BUG();
3885 return;
3886 }
3887 if (!atomic_dec_and_test(&sp->ref_count))
3888 return;
3889 qla2x00_sp_final_compl(ha, sp);
3890}
3891
3892/************************************************************************** 3852/**************************************************************************
3893* qla2x00_timer 3853* qla2x00_timer
3894* 3854*
@@ -3959,7 +3919,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
3959 sp = req->outstanding_cmds[index]; 3919 sp = req->outstanding_cmds[index];
3960 if (!sp) 3920 if (!sp)
3961 continue; 3921 continue;
3962 if (sp->ctx && !IS_PROT_IO(sp)) 3922 if (sp->type != SRB_SCSI_CMD)
3963 continue; 3923 continue;
3964 sfcp = sp->fcport; 3924 sfcp = sp->fcport;
3965 if (!(sfcp->flags & FCF_FCP2_DEVICE)) 3925 if (!(sfcp->flags & FCF_FCP2_DEVICE))