aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 16:16:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 16:16:38 -0400
commit5aa1c98862d3f365d9cf6d0833d5dc127d2a76e7 (patch)
tree89cbf0b67634ecc43a863a6ca058ff749df3cce7 /drivers/scsi/lpfc
parent6da6dc2380c3cfe8d6b59d7c3c55fdd7a521fe6c (diff)
parent9e45dd73234af9a59613dc2989dcc2df2dab847f (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull first round of SCSI updates from James "Jej B" Bottomley: "The patch set is mostly driver updates (qla4, qla2 [ISF support updates], lpfc, aacraid [dual firmware image support]) and a few bug fixes" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (47 commits) [SCSI] iscsi_tcp: support PF_MEMALLOC/__GFP_MEMALLOC [SCSI] libiscsi: avoid unnecessary multiple NULL assignments [SCSI] qla4xxx: Update driver version to 5.03.00-k8 [SCSI] qla4xxx: Added print statements to display AENs [SCSI] qla4xxx: Use correct value for max flash node entries [SCSI] qla4xxx: Restrict logout from boot target session using session id [SCSI] qla4xxx: Use correct flash ddb offset for ISP40XX [SCSI] isci: add CONFIG_PM_SLEEP to suspend/resume functions [SCSI] scsi_dh_alua: Add module parameter to allow failover to non preferred path without STPG [SCSI] qla2xxx: Update the driver version to 8.05.00.03-k. [SCSI] qla2xxx: Obtain loopback iteration count from bsg request. [SCSI] qla2xxx: Add clarifying printk to thermal access fail cases. [SCSI] qla2xxx: Remove duplicated include form qla_isr.c [SCSI] qla2xxx: Enhancements to support ISPFx00. [SCSI] qla4xxx: Update driver version to 5.03.00-k7 [SCSI] qla4xxx: Replace dev type macros with generic portal type macros [SCSI] scsi_transport_iscsi: Declare portal type string macros for generic use [SCSI] qla4xxx: Add flash node mgmt support [SCSI] libiscsi: export function iscsi_switch_str_param [SCSI] scsi_transport_iscsi: Add flash node mgmt support ...
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c1107
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c106
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c115
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
12 files changed, 837 insertions, 631 deletions
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index a364cae9e984..9290713af253 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -692,7 +692,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
692 */ 692 */
693 for (i = 0; i < psli->num_rings; i++) { 693 for (i = 0; i < psli->num_rings; i++) {
694 pring = &psli->ring[i]; 694 pring = &psli->ring[i];
695 while (pring->txcmplq_cnt) { 695 while (!list_empty(&pring->txcmplq)) {
696 msleep(10); 696 msleep(10);
697 if (cnt++ > 500) { /* 5 secs */ 697 if (cnt++ > 500) { /* 5 secs */
698 lpfc_printf_log(phba, 698 lpfc_printf_log(phba,
@@ -2302,11 +2302,17 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
2302LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2, 2302LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
2303 "FCF Fast failover=1 Priority failover=2"); 2303 "FCF Fast failover=1 Priority failover=2");
2304 2304
2305int lpfc_enable_rrq; 2305int lpfc_enable_rrq = 2;
2306module_param(lpfc_enable_rrq, int, S_IRUGO); 2306module_param(lpfc_enable_rrq, int, S_IRUGO);
2307MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality"); 2307MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
2308lpfc_param_show(enable_rrq); 2308lpfc_param_show(enable_rrq);
2309lpfc_param_init(enable_rrq, 0, 0, 1); 2309/*
2310# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
2311# 0x0 = disabled, XRI/OXID use not tracked.
2312# 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
2313# 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
2314*/
2315lpfc_param_init(enable_rrq, 2, 0, 2);
2310static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL); 2316static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL);
2311 2317
2312/* 2318/*
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f5d106456f1d..888666892004 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -64,18 +64,14 @@ struct lpfc_bsg_event {
64 struct list_head events_to_get; 64 struct list_head events_to_get;
65 struct list_head events_to_see; 65 struct list_head events_to_see;
66 66
67 /* job waiting for this event to finish */ 67 /* driver data associated with the job */
68 struct fc_bsg_job *set_job; 68 void *dd_data;
69}; 69};
70 70
71struct lpfc_bsg_iocb { 71struct lpfc_bsg_iocb {
72 struct lpfc_iocbq *cmdiocbq; 72 struct lpfc_iocbq *cmdiocbq;
73 struct lpfc_iocbq *rspiocbq; 73 struct lpfc_dmabuf *rmp;
74 struct lpfc_dmabuf *bmp;
75 struct lpfc_nodelist *ndlp; 74 struct lpfc_nodelist *ndlp;
76
77 /* job waiting for this iocb to finish */
78 struct fc_bsg_job *set_job;
79}; 75};
80 76
81struct lpfc_bsg_mbox { 77struct lpfc_bsg_mbox {
@@ -86,20 +82,13 @@ struct lpfc_bsg_mbox {
86 uint32_t mbOffset; /* from app */ 82 uint32_t mbOffset; /* from app */
87 uint32_t inExtWLen; /* from app */ 83 uint32_t inExtWLen; /* from app */
88 uint32_t outExtWLen; /* from app */ 84 uint32_t outExtWLen; /* from app */
89
90 /* job waiting for this mbox command to finish */
91 struct fc_bsg_job *set_job;
92}; 85};
93 86
94#define MENLO_DID 0x0000FC0E 87#define MENLO_DID 0x0000FC0E
95 88
96struct lpfc_bsg_menlo { 89struct lpfc_bsg_menlo {
97 struct lpfc_iocbq *cmdiocbq; 90 struct lpfc_iocbq *cmdiocbq;
98 struct lpfc_iocbq *rspiocbq; 91 struct lpfc_dmabuf *rmp;
99 struct lpfc_dmabuf *bmp;
100
101 /* job waiting for this iocb to finish */
102 struct fc_bsg_job *set_job;
103}; 92};
104 93
105#define TYPE_EVT 1 94#define TYPE_EVT 1
@@ -108,6 +97,7 @@ struct lpfc_bsg_menlo {
108#define TYPE_MENLO 4 97#define TYPE_MENLO 4
109struct bsg_job_data { 98struct bsg_job_data {
110 uint32_t type; 99 uint32_t type;
100 struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */
111 union { 101 union {
112 struct lpfc_bsg_event *evt; 102 struct lpfc_bsg_event *evt;
113 struct lpfc_bsg_iocb iocb; 103 struct lpfc_bsg_iocb iocb;
@@ -141,6 +131,138 @@ struct lpfc_dmabufext {
141 uint32_t flag; 131 uint32_t flag;
142}; 132};
143 133
134static void
135lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
136{
137 struct lpfc_dmabuf *mlast, *next_mlast;
138
139 if (mlist) {
140 list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
141 list) {
142 lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
143 list_del(&mlast->list);
144 kfree(mlast);
145 }
146 lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
147 kfree(mlist);
148 }
149 return;
150}
151
152static struct lpfc_dmabuf *
153lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
154 int outbound_buffers, struct ulp_bde64 *bpl,
155 int *bpl_entries)
156{
157 struct lpfc_dmabuf *mlist = NULL;
158 struct lpfc_dmabuf *mp;
159 unsigned int bytes_left = size;
160
161 /* Verify we can support the size specified */
162 if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
163 return NULL;
164
165 /* Determine the number of dma buffers to allocate */
166 *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
167 size/LPFC_BPL_SIZE);
168
169 /* Allocate dma buffer and place in BPL passed */
170 while (bytes_left) {
171 /* Allocate dma buffer */
172 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
173 if (!mp) {
174 if (mlist)
175 lpfc_free_bsg_buffers(phba, mlist);
176 return NULL;
177 }
178
179 INIT_LIST_HEAD(&mp->list);
180 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
181
182 if (!mp->virt) {
183 kfree(mp);
184 if (mlist)
185 lpfc_free_bsg_buffers(phba, mlist);
186 return NULL;
187 }
188
189 /* Queue it to a linked list */
190 if (!mlist)
191 mlist = mp;
192 else
193 list_add_tail(&mp->list, &mlist->list);
194
195 /* Add buffer to buffer pointer list */
196 if (outbound_buffers)
197 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
198 else
199 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
200 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
201 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
202 bpl->tus.f.bdeSize = (uint16_t)
203 (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
204 bytes_left);
205 bytes_left -= bpl->tus.f.bdeSize;
206 bpl->tus.w = le32_to_cpu(bpl->tus.w);
207 bpl++;
208 }
209 return mlist;
210}
211
212static unsigned int
213lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
214 struct fc_bsg_buffer *bsg_buffers,
215 unsigned int bytes_to_transfer, int to_buffers)
216{
217
218 struct lpfc_dmabuf *mp;
219 unsigned int transfer_bytes, bytes_copied = 0;
220 unsigned int sg_offset, dma_offset;
221 unsigned char *dma_address, *sg_address;
222 struct scatterlist *sgel;
223 LIST_HEAD(temp_list);
224
225
226 list_splice_init(&dma_buffers->list, &temp_list);
227 list_add(&dma_buffers->list, &temp_list);
228 sg_offset = 0;
229 sgel = bsg_buffers->sg_list;
230 list_for_each_entry(mp, &temp_list, list) {
231 dma_offset = 0;
232 while (bytes_to_transfer && sgel &&
233 (dma_offset < LPFC_BPL_SIZE)) {
234 dma_address = mp->virt + dma_offset;
235 if (sg_offset) {
236 /* Continue previous partial transfer of sg */
237 sg_address = sg_virt(sgel) + sg_offset;
238 transfer_bytes = sgel->length - sg_offset;
239 } else {
240 sg_address = sg_virt(sgel);
241 transfer_bytes = sgel->length;
242 }
243 if (bytes_to_transfer < transfer_bytes)
244 transfer_bytes = bytes_to_transfer;
245 if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
246 transfer_bytes = LPFC_BPL_SIZE - dma_offset;
247 if (to_buffers)
248 memcpy(dma_address, sg_address, transfer_bytes);
249 else
250 memcpy(sg_address, dma_address, transfer_bytes);
251 dma_offset += transfer_bytes;
252 sg_offset += transfer_bytes;
253 bytes_to_transfer -= transfer_bytes;
254 bytes_copied += transfer_bytes;
255 if (sg_offset >= sgel->length) {
256 sg_offset = 0;
257 sgel = sg_next(sgel);
258 }
259 }
260 }
261 list_del_init(&dma_buffers->list);
262 list_splice(&temp_list, &dma_buffers->list);
263 return bytes_copied;
264}
265
144/** 266/**
145 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler 267 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
146 * @phba: Pointer to HBA context object. 268 * @phba: Pointer to HBA context object.
@@ -166,62 +288,72 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
166 struct bsg_job_data *dd_data; 288 struct bsg_job_data *dd_data;
167 struct fc_bsg_job *job; 289 struct fc_bsg_job *job;
168 IOCB_t *rsp; 290 IOCB_t *rsp;
169 struct lpfc_dmabuf *bmp; 291 struct lpfc_dmabuf *bmp, *cmp, *rmp;
170 struct lpfc_nodelist *ndlp; 292 struct lpfc_nodelist *ndlp;
171 struct lpfc_bsg_iocb *iocb; 293 struct lpfc_bsg_iocb *iocb;
172 unsigned long flags; 294 unsigned long flags;
295 unsigned int rsp_size;
173 int rc = 0; 296 int rc = 0;
174 297
298 dd_data = cmdiocbq->context1;
299
300 /* Determine if job has been aborted */
175 spin_lock_irqsave(&phba->ct_ev_lock, flags); 301 spin_lock_irqsave(&phba->ct_ev_lock, flags);
176 dd_data = cmdiocbq->context2; 302 job = dd_data->set_job;
177 if (!dd_data) { 303 if (job) {
178 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 304 /* Prevent timeout handling from trying to abort job */
179 lpfc_sli_release_iocbq(phba, cmdiocbq); 305 job->dd_data = NULL;
180 return;
181 } 306 }
307 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
182 308
183 iocb = &dd_data->context_un.iocb; 309 iocb = &dd_data->context_un.iocb;
184 job = iocb->set_job; 310 ndlp = iocb->ndlp;
185 job->dd_data = NULL; /* so timeout handler does not reply */ 311 rmp = iocb->rmp;
186 312 cmp = cmdiocbq->context2;
187 bmp = iocb->bmp; 313 bmp = cmdiocbq->context3;
188 rsp = &rspiocbq->iocb; 314 rsp = &rspiocbq->iocb;
189 ndlp = cmdiocbq->context1;
190 315
191 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 316 /* Copy the completed data or set the error status */
192 job->request_payload.sg_cnt, DMA_TO_DEVICE);
193 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
194 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
195 317
196 if (rsp->ulpStatus) { 318 if (job) {
197 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 319 if (rsp->ulpStatus) {
198 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 320 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
199 case IOERR_SEQUENCE_TIMEOUT: 321 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
200 rc = -ETIMEDOUT; 322 case IOERR_SEQUENCE_TIMEOUT:
201 break; 323 rc = -ETIMEDOUT;
202 case IOERR_INVALID_RPI: 324 break;
203 rc = -EFAULT; 325 case IOERR_INVALID_RPI:
204 break; 326 rc = -EFAULT;
205 default: 327 break;
328 default:
329 rc = -EACCES;
330 break;
331 }
332 } else {
206 rc = -EACCES; 333 rc = -EACCES;
207 break;
208 } 334 }
209 } else 335 } else {
210 rc = -EACCES; 336 rsp_size = rsp->un.genreq64.bdl.bdeSize;
211 } else 337 job->reply->reply_payload_rcv_len =
212 job->reply->reply_payload_rcv_len = 338 lpfc_bsg_copy_data(rmp, &job->reply_payload,
213 rsp->un.genreq64.bdl.bdeSize; 339 rsp_size, 0);
340 }
341 }
214 342
343 lpfc_free_bsg_buffers(phba, cmp);
344 lpfc_free_bsg_buffers(phba, rmp);
215 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 345 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
346 kfree(bmp);
216 lpfc_sli_release_iocbq(phba, cmdiocbq); 347 lpfc_sli_release_iocbq(phba, cmdiocbq);
217 lpfc_nlp_put(ndlp); 348 lpfc_nlp_put(ndlp);
218 kfree(bmp);
219 kfree(dd_data); 349 kfree(dd_data);
220 /* make error code available to userspace */ 350
221 job->reply->result = rc; 351 /* Complete the job if the job is still active */
222 /* complete the job back to userspace */ 352
223 job->job_done(job); 353 if (job) {
224 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 354 job->reply->result = rc;
355 job->job_done(job);
356 }
225 return; 357 return;
226} 358}
227 359
@@ -240,12 +372,9 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
240 uint32_t timeout; 372 uint32_t timeout;
241 struct lpfc_iocbq *cmdiocbq = NULL; 373 struct lpfc_iocbq *cmdiocbq = NULL;
242 IOCB_t *cmd; 374 IOCB_t *cmd;
243 struct lpfc_dmabuf *bmp = NULL; 375 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
244 int request_nseg; 376 int request_nseg;
245 int reply_nseg; 377 int reply_nseg;
246 struct scatterlist *sgel = NULL;
247 int numbde;
248 dma_addr_t busaddr;
249 struct bsg_job_data *dd_data; 378 struct bsg_job_data *dd_data;
250 uint32_t creg_val; 379 uint32_t creg_val;
251 int rc = 0; 380 int rc = 0;
@@ -268,54 +397,50 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
268 goto no_ndlp; 397 goto no_ndlp;
269 } 398 }
270 399
271 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
272 if (!bmp) {
273 rc = -ENOMEM;
274 goto free_ndlp;
275 }
276
277 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { 400 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
278 rc = -ENODEV; 401 rc = -ENODEV;
279 goto free_bmp; 402 goto free_ndlp;
280 } 403 }
281 404
282 cmdiocbq = lpfc_sli_get_iocbq(phba); 405 cmdiocbq = lpfc_sli_get_iocbq(phba);
283 if (!cmdiocbq) { 406 if (!cmdiocbq) {
284 rc = -ENOMEM; 407 rc = -ENOMEM;
285 goto free_bmp; 408 goto free_ndlp;
286 } 409 }
287 410
288 cmd = &cmdiocbq->iocb; 411 cmd = &cmdiocbq->iocb;
412
413 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
414 if (!bmp) {
415 rc = -ENOMEM;
416 goto free_cmdiocbq;
417 }
289 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 418 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
290 if (!bmp->virt) { 419 if (!bmp->virt) {
291 rc = -ENOMEM; 420 rc = -ENOMEM;
292 goto free_cmdiocbq; 421 goto free_bmp;
293 } 422 }
294 423
295 INIT_LIST_HEAD(&bmp->list); 424 INIT_LIST_HEAD(&bmp->list);
425
296 bpl = (struct ulp_bde64 *) bmp->virt; 426 bpl = (struct ulp_bde64 *) bmp->virt;
297 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 427 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
298 job->request_payload.sg_cnt, DMA_TO_DEVICE); 428 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
299 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 429 1, bpl, &request_nseg);
300 busaddr = sg_dma_address(sgel); 430 if (!cmp) {
301 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 431 rc = -ENOMEM;
302 bpl->tus.f.bdeSize = sg_dma_len(sgel); 432 goto free_bmp;
303 bpl->tus.w = cpu_to_le32(bpl->tus.w);
304 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
305 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
306 bpl++;
307 } 433 }
434 lpfc_bsg_copy_data(cmp, &job->request_payload,
435 job->request_payload.payload_len, 1);
308 436
309 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, 437 bpl += request_nseg;
310 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 438 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
311 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { 439 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
312 busaddr = sg_dma_address(sgel); 440 bpl, &reply_nseg);
313 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 441 if (!rmp) {
314 bpl->tus.f.bdeSize = sg_dma_len(sgel); 442 rc = -ENOMEM;
315 bpl->tus.w = cpu_to_le32(bpl->tus.w); 443 goto free_cmp;
316 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
317 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
318 bpl++;
319 } 444 }
320 445
321 cmd->un.genreq64.bdl.ulpIoTag32 = 0; 446 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
@@ -343,17 +468,20 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
343 cmd->ulpTimeout = timeout; 468 cmd->ulpTimeout = timeout;
344 469
345 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; 470 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
346 cmdiocbq->context1 = ndlp; 471 cmdiocbq->context1 = dd_data;
347 cmdiocbq->context2 = dd_data; 472 cmdiocbq->context2 = cmp;
473 cmdiocbq->context3 = bmp;
348 dd_data->type = TYPE_IOCB; 474 dd_data->type = TYPE_IOCB;
475 dd_data->set_job = job;
349 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 476 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
350 dd_data->context_un.iocb.set_job = job; 477 dd_data->context_un.iocb.ndlp = ndlp;
351 dd_data->context_un.iocb.bmp = bmp; 478 dd_data->context_un.iocb.rmp = rmp;
479 job->dd_data = dd_data;
352 480
353 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 481 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
354 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 482 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
355 rc = -EIO ; 483 rc = -EIO ;
356 goto free_cmdiocbq; 484 goto free_rmp;
357 } 485 }
358 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 486 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
359 writel(creg_val, phba->HCregaddr); 487 writel(creg_val, phba->HCregaddr);
@@ -368,19 +496,18 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
368 else 496 else
369 rc = -EIO; 497 rc = -EIO;
370 498
371
372 /* iocb failed so cleanup */ 499 /* iocb failed so cleanup */
373 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
374 job->request_payload.sg_cnt, DMA_TO_DEVICE);
375 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
376 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
377 500
378 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 501free_rmp:
379 502 lpfc_free_bsg_buffers(phba, rmp);
380free_cmdiocbq: 503free_cmp:
381 lpfc_sli_release_iocbq(phba, cmdiocbq); 504 lpfc_free_bsg_buffers(phba, cmp);
382free_bmp: 505free_bmp:
506 if (bmp->virt)
507 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
383 kfree(bmp); 508 kfree(bmp);
509free_cmdiocbq:
510 lpfc_sli_release_iocbq(phba, cmdiocbq);
384free_ndlp: 511free_ndlp:
385 lpfc_nlp_put(ndlp); 512 lpfc_nlp_put(ndlp);
386no_ndlp: 513no_ndlp:
@@ -418,67 +545,68 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
418 struct fc_bsg_job *job; 545 struct fc_bsg_job *job;
419 IOCB_t *rsp; 546 IOCB_t *rsp;
420 struct lpfc_nodelist *ndlp; 547 struct lpfc_nodelist *ndlp;
421 struct lpfc_dmabuf *pbuflist = NULL; 548 struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
422 struct fc_bsg_ctels_reply *els_reply; 549 struct fc_bsg_ctels_reply *els_reply;
423 uint8_t *rjt_data; 550 uint8_t *rjt_data;
424 unsigned long flags; 551 unsigned long flags;
552 unsigned int rsp_size;
425 int rc = 0; 553 int rc = 0;
426 554
427 spin_lock_irqsave(&phba->ct_ev_lock, flags);
428 dd_data = cmdiocbq->context1; 555 dd_data = cmdiocbq->context1;
429 /* normal completion and timeout crossed paths, already done */ 556 ndlp = dd_data->context_un.iocb.ndlp;
430 if (!dd_data) { 557 cmdiocbq->context1 = ndlp;
431 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
432 return;
433 }
434 558
435 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 559 /* Determine if job has been aborted */
436 if (cmdiocbq->context2 && rspiocbq) 560 spin_lock_irqsave(&phba->ct_ev_lock, flags);
437 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 561 job = dd_data->set_job;
438 &rspiocbq->iocb, sizeof(IOCB_t)); 562 if (job) {
563 /* Prevent timeout handling from trying to abort job */
564 job->dd_data = NULL;
565 }
566 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
439 567
440 job = dd_data->context_un.iocb.set_job;
441 cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
442 rspiocbq = dd_data->context_un.iocb.rspiocbq;
443 rsp = &rspiocbq->iocb; 568 rsp = &rspiocbq->iocb;
444 ndlp = dd_data->context_un.iocb.ndlp; 569 pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
570 prsp = (struct lpfc_dmabuf *)pcmd->list.next;
445 571
446 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 572 /* Copy the completed job data or determine the job status if job is
447 job->request_payload.sg_cnt, DMA_TO_DEVICE); 573 * still active
448 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 574 */
449 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
450 575
451 if (job->reply->result == -EAGAIN) 576 if (job) {
452 rc = -EAGAIN; 577 if (rsp->ulpStatus == IOSTAT_SUCCESS) {
453 else if (rsp->ulpStatus == IOSTAT_SUCCESS) 578 rsp_size = rsp->un.elsreq64.bdl.bdeSize;
454 job->reply->reply_payload_rcv_len = 579 job->reply->reply_payload_rcv_len =
455 rsp->un.elsreq64.bdl.bdeSize; 580 sg_copy_from_buffer(job->reply_payload.sg_list,
456 else if (rsp->ulpStatus == IOSTAT_LS_RJT) { 581 job->reply_payload.sg_cnt,
457 job->reply->reply_payload_rcv_len = 582 prsp->virt,
458 sizeof(struct fc_bsg_ctels_reply); 583 rsp_size);
459 /* LS_RJT data returned in word 4 */ 584 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
460 rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; 585 job->reply->reply_payload_rcv_len =
461 els_reply = &job->reply->reply_data.ctels_reply; 586 sizeof(struct fc_bsg_ctels_reply);
462 els_reply->status = FC_CTELS_STATUS_REJECT; 587 /* LS_RJT data returned in word 4 */
463 els_reply->rjt_data.action = rjt_data[3]; 588 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
464 els_reply->rjt_data.reason_code = rjt_data[2]; 589 els_reply = &job->reply->reply_data.ctels_reply;
465 els_reply->rjt_data.reason_explanation = rjt_data[1]; 590 els_reply->status = FC_CTELS_STATUS_REJECT;
466 els_reply->rjt_data.vendor_unique = rjt_data[0]; 591 els_reply->rjt_data.action = rjt_data[3];
467 } else 592 els_reply->rjt_data.reason_code = rjt_data[2];
468 rc = -EIO; 593 els_reply->rjt_data.reason_explanation = rjt_data[1];
594 els_reply->rjt_data.vendor_unique = rjt_data[0];
595 } else {
596 rc = -EIO;
597 }
598 }
469 599
470 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
471 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
472 lpfc_sli_release_iocbq(phba, rspiocbq);
473 lpfc_sli_release_iocbq(phba, cmdiocbq);
474 lpfc_nlp_put(ndlp); 600 lpfc_nlp_put(ndlp);
601 lpfc_els_free_iocb(phba, cmdiocbq);
475 kfree(dd_data); 602 kfree(dd_data);
476 /* make error code available to userspace */ 603
477 job->reply->result = rc; 604 /* Complete the job if the job is still active */
478 job->dd_data = NULL; 605
479 /* complete the job back to userspace */ 606 if (job) {
480 job->job_done(job); 607 job->reply->result = rc;
481 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 608 job->job_done(job);
609 }
482 return; 610 return;
483} 611}
484 612
@@ -496,19 +624,8 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
496 uint32_t elscmd; 624 uint32_t elscmd;
497 uint32_t cmdsize; 625 uint32_t cmdsize;
498 uint32_t rspsize; 626 uint32_t rspsize;
499 struct lpfc_iocbq *rspiocbq;
500 struct lpfc_iocbq *cmdiocbq; 627 struct lpfc_iocbq *cmdiocbq;
501 IOCB_t *rsp;
502 uint16_t rpi = 0; 628 uint16_t rpi = 0;
503 struct lpfc_dmabuf *pcmd;
504 struct lpfc_dmabuf *prsp;
505 struct lpfc_dmabuf *pbuflist = NULL;
506 struct ulp_bde64 *bpl;
507 int request_nseg;
508 int reply_nseg;
509 struct scatterlist *sgel = NULL;
510 int numbde;
511 dma_addr_t busaddr;
512 struct bsg_job_data *dd_data; 629 struct bsg_job_data *dd_data;
513 uint32_t creg_val; 630 uint32_t creg_val;
514 int rc = 0; 631 int rc = 0;
@@ -516,6 +633,15 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
516 /* in case no data is transferred */ 633 /* in case no data is transferred */
517 job->reply->reply_payload_rcv_len = 0; 634 job->reply->reply_payload_rcv_len = 0;
518 635
636 /* verify the els command is not greater than the
637 * maximum ELS transfer size.
638 */
639
640 if (job->request_payload.payload_len > FCELSSIZE) {
641 rc = -EINVAL;
642 goto no_dd_data;
643 }
644
519 /* allocate our bsg tracking structure */ 645 /* allocate our bsg tracking structure */
520 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 646 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
521 if (!dd_data) { 647 if (!dd_data) {
@@ -525,88 +651,51 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
525 goto no_dd_data; 651 goto no_dd_data;
526 } 652 }
527 653
528 if (!lpfc_nlp_get(ndlp)) {
529 rc = -ENODEV;
530 goto free_dd_data;
531 }
532
533 elscmd = job->request->rqst_data.r_els.els_code; 654 elscmd = job->request->rqst_data.r_els.els_code;
534 cmdsize = job->request_payload.payload_len; 655 cmdsize = job->request_payload.payload_len;
535 rspsize = job->reply_payload.payload_len; 656 rspsize = job->reply_payload.payload_len;
536 rspiocbq = lpfc_sli_get_iocbq(phba); 657
537 if (!rspiocbq) { 658 if (!lpfc_nlp_get(ndlp)) {
538 lpfc_nlp_put(ndlp); 659 rc = -ENODEV;
539 rc = -ENOMEM;
540 goto free_dd_data; 660 goto free_dd_data;
541 } 661 }
542 662
543 rsp = &rspiocbq->iocb; 663 /* We will use the allocated dma buffers by prep els iocb for command
544 rpi = ndlp->nlp_rpi; 664 * and response to ensure if the job times out and the request is freed,
665 * we won't be dma into memory that is no longer allocated to for the
666 * request.
667 */
545 668
546 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, 669 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
547 ndlp->nlp_DID, elscmd); 670 ndlp->nlp_DID, elscmd);
548 if (!cmdiocbq) { 671 if (!cmdiocbq) {
549 rc = -EIO; 672 rc = -EIO;
550 goto free_rspiocbq; 673 goto release_ndlp;
551 } 674 }
552 675
553 /* prep els iocb set context1 to the ndlp, context2 to the command 676 rpi = ndlp->nlp_rpi;
554 * dmabuf, context3 holds the data dmabuf 677
555 */ 678 /* Transfer the request payload to allocated command dma buffer */
556 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2; 679
557 prsp = (struct lpfc_dmabuf *) pcmd->list.next; 680 sg_copy_to_buffer(job->request_payload.sg_list,
558 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 681 job->request_payload.sg_cnt,
559 kfree(pcmd); 682 ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
560 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 683 cmdsize);
561 kfree(prsp);
562 cmdiocbq->context2 = NULL;
563
564 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
565 bpl = (struct ulp_bde64 *) pbuflist->virt;
566
567 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
568 job->request_payload.sg_cnt, DMA_TO_DEVICE);
569 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
570 busaddr = sg_dma_address(sgel);
571 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
572 bpl->tus.f.bdeSize = sg_dma_len(sgel);
573 bpl->tus.w = cpu_to_le32(bpl->tus.w);
574 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
575 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
576 bpl++;
577 }
578 684
579 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
580 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
581 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
582 busaddr = sg_dma_address(sgel);
583 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
584 bpl->tus.f.bdeSize = sg_dma_len(sgel);
585 bpl->tus.w = cpu_to_le32(bpl->tus.w);
586 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
587 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
588 bpl++;
589 }
590 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
591 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
592 if (phba->sli_rev == LPFC_SLI_REV4) 685 if (phba->sli_rev == LPFC_SLI_REV4)
593 cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi]; 686 cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
594 else 687 else
595 cmdiocbq->iocb.ulpContext = rpi; 688 cmdiocbq->iocb.ulpContext = rpi;
596 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 689 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
597 cmdiocbq->context1 = NULL;
598 cmdiocbq->context2 = NULL;
599
600 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
601 cmdiocbq->context1 = dd_data; 690 cmdiocbq->context1 = dd_data;
602 cmdiocbq->context_un.ndlp = ndlp; 691 cmdiocbq->context_un.ndlp = ndlp;
603 cmdiocbq->context2 = rspiocbq; 692 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
604 dd_data->type = TYPE_IOCB; 693 dd_data->type = TYPE_IOCB;
694 dd_data->set_job = job;
605 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 695 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
606 dd_data->context_un.iocb.rspiocbq = rspiocbq;
607 dd_data->context_un.iocb.set_job = job;
608 dd_data->context_un.iocb.bmp = NULL;
609 dd_data->context_un.iocb.ndlp = ndlp; 696 dd_data->context_un.iocb.ndlp = ndlp;
697 dd_data->context_un.iocb.rmp = NULL;
698 job->dd_data = dd_data;
610 699
611 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 700 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
612 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 701 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
@@ -617,8 +706,9 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
617 writel(creg_val, phba->HCregaddr); 706 writel(creg_val, phba->HCregaddr);
618 readl(phba->HCregaddr); /* flush */ 707 readl(phba->HCregaddr); /* flush */
619 } 708 }
709
620 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 710 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
621 lpfc_nlp_put(ndlp); 711
622 if (rc == IOCB_SUCCESS) 712 if (rc == IOCB_SUCCESS)
623 return 0; /* done for now */ 713 return 0; /* done for now */
624 else if (rc == IOCB_BUSY) 714 else if (rc == IOCB_BUSY)
@@ -627,17 +717,12 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
627 rc = -EIO; 717 rc = -EIO;
628 718
629linkdown_err: 719linkdown_err:
630 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
631 job->request_payload.sg_cnt, DMA_TO_DEVICE);
632 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
633 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
634 720
635 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys); 721 cmdiocbq->context1 = ndlp;
636 722 lpfc_els_free_iocb(phba, cmdiocbq);
637 lpfc_sli_release_iocbq(phba, cmdiocbq);
638 723
639free_rspiocbq: 724release_ndlp:
640 lpfc_sli_release_iocbq(phba, rspiocbq); 725 lpfc_nlp_put(ndlp);
641 726
642free_dd_data: 727free_dd_data:
643 kfree(dd_data); 728 kfree(dd_data);
@@ -680,6 +765,7 @@ lpfc_bsg_event_free(struct kref *kref)
680 kfree(ed); 765 kfree(ed);
681 } 766 }
682 767
768 kfree(evt->dd_data);
683 kfree(evt); 769 kfree(evt);
684} 770}
685 771
@@ -723,6 +809,7 @@ lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
723 evt->req_id = ev_req_id; 809 evt->req_id = ev_req_id;
724 evt->reg_id = ev_reg_id; 810 evt->reg_id = ev_reg_id;
725 evt->wait_time_stamp = jiffies; 811 evt->wait_time_stamp = jiffies;
812 evt->dd_data = NULL;
726 init_waitqueue_head(&evt->wq); 813 init_waitqueue_head(&evt->wq);
727 kref_init(&evt->kref); 814 kref_init(&evt->kref);
728 return evt; 815 return evt;
@@ -790,6 +877,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
790 struct lpfc_hbq_entry *hbqe; 877 struct lpfc_hbq_entry *hbqe;
791 struct lpfc_sli_ct_request *ct_req; 878 struct lpfc_sli_ct_request *ct_req;
792 struct fc_bsg_job *job = NULL; 879 struct fc_bsg_job *job = NULL;
880 struct bsg_job_data *dd_data = NULL;
793 unsigned long flags; 881 unsigned long flags;
794 int size = 0; 882 int size = 0;
795 883
@@ -986,10 +1074,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
986 } 1074 }
987 1075
988 list_move(evt->events_to_see.prev, &evt->events_to_get); 1076 list_move(evt->events_to_see.prev, &evt->events_to_get);
989 lpfc_bsg_event_unref(evt);
990 1077
991 job = evt->set_job; 1078 dd_data = (struct bsg_job_data *)evt->dd_data;
992 evt->set_job = NULL; 1079 job = dd_data->set_job;
1080 dd_data->set_job = NULL;
1081 lpfc_bsg_event_unref(evt);
993 if (job) { 1082 if (job) {
994 job->reply->reply_payload_rcv_len = size; 1083 job->reply->reply_payload_rcv_len = size;
995 /* make error code available to userspace */ 1084 /* make error code available to userspace */
@@ -1078,14 +1167,6 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
1078 goto job_error; 1167 goto job_error;
1079 } 1168 }
1080 1169
1081 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1082 if (dd_data == NULL) {
1083 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1084 "2734 Failed allocation of dd_data\n");
1085 rc = -ENOMEM;
1086 goto job_error;
1087 }
1088
1089 event_req = (struct set_ct_event *) 1170 event_req = (struct set_ct_event *)
1090 job->request->rqst_data.h_vendor.vendor_cmd; 1171 job->request->rqst_data.h_vendor.vendor_cmd;
1091 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & 1172 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
@@ -1095,6 +1176,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
1095 if (evt->reg_id == event_req->ev_reg_id) { 1176 if (evt->reg_id == event_req->ev_reg_id) {
1096 lpfc_bsg_event_ref(evt); 1177 lpfc_bsg_event_ref(evt);
1097 evt->wait_time_stamp = jiffies; 1178 evt->wait_time_stamp = jiffies;
1179 dd_data = (struct bsg_job_data *)evt->dd_data;
1098 break; 1180 break;
1099 } 1181 }
1100 } 1182 }
@@ -1102,6 +1184,13 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
1102 1184
1103 if (&evt->node == &phba->ct_ev_waiters) { 1185 if (&evt->node == &phba->ct_ev_waiters) {
1104 /* no event waiting struct yet - first call */ 1186 /* no event waiting struct yet - first call */
1187 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1188 if (dd_data == NULL) {
1189 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1190 "2734 Failed allocation of dd_data\n");
1191 rc = -ENOMEM;
1192 goto job_error;
1193 }
1105 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id, 1194 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1106 event_req->ev_req_id); 1195 event_req->ev_req_id);
1107 if (!evt) { 1196 if (!evt) {
@@ -1111,7 +1200,10 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
1111 rc = -ENOMEM; 1200 rc = -ENOMEM;
1112 goto job_error; 1201 goto job_error;
1113 } 1202 }
1114 1203 dd_data->type = TYPE_EVT;
1204 dd_data->set_job = NULL;
1205 dd_data->context_un.evt = evt;
1206 evt->dd_data = (void *)dd_data;
1115 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1207 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1116 list_add(&evt->node, &phba->ct_ev_waiters); 1208 list_add(&evt->node, &phba->ct_ev_waiters);
1117 lpfc_bsg_event_ref(evt); 1209 lpfc_bsg_event_ref(evt);
@@ -1121,9 +1213,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
1121 1213
1122 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1214 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1123 evt->waiting = 1; 1215 evt->waiting = 1;
1124 dd_data->type = TYPE_EVT; 1216 dd_data->set_job = job; /* for unsolicited command */
1125 dd_data->context_un.evt = evt;
1126 evt->set_job = job; /* for unsolicited command */
1127 job->dd_data = dd_data; /* for fc transport timeout callback*/ 1217 job->dd_data = dd_data; /* for fc transport timeout callback*/
1128 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1218 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1129 return 0; /* call job done later */ 1219 return 0; /* call job done later */
@@ -1252,57 +1342,64 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1252 struct bsg_job_data *dd_data; 1342 struct bsg_job_data *dd_data;
1253 struct fc_bsg_job *job; 1343 struct fc_bsg_job *job;
1254 IOCB_t *rsp; 1344 IOCB_t *rsp;
1255 struct lpfc_dmabuf *bmp; 1345 struct lpfc_dmabuf *bmp, *cmp;
1256 struct lpfc_nodelist *ndlp; 1346 struct lpfc_nodelist *ndlp;
1257 unsigned long flags; 1347 unsigned long flags;
1258 int rc = 0; 1348 int rc = 0;
1259 1349
1350 dd_data = cmdiocbq->context1;
1351
1352 /* Determine if job has been aborted */
1260 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1353 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1261 dd_data = cmdiocbq->context2; 1354 job = dd_data->set_job;
1262 /* normal completion and timeout crossed paths, already done */ 1355 if (job) {
1263 if (!dd_data) { 1356 /* Prevent timeout handling from trying to abort job */
1264 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1357 job->dd_data = NULL;
1265 return;
1266 } 1358 }
1359 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1267 1360
1268 job = dd_data->context_un.iocb.set_job;
1269 bmp = dd_data->context_un.iocb.bmp;
1270 rsp = &rspiocbq->iocb;
1271 ndlp = dd_data->context_un.iocb.ndlp; 1361 ndlp = dd_data->context_un.iocb.ndlp;
1362 cmp = cmdiocbq->context2;
1363 bmp = cmdiocbq->context3;
1364 rsp = &rspiocbq->iocb;
1272 1365
1273 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 1366 /* Copy the completed job data or set the error status */
1274 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1275 1367
1276 if (rsp->ulpStatus) { 1368 if (job) {
1277 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 1369 if (rsp->ulpStatus) {
1278 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 1370 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1279 case IOERR_SEQUENCE_TIMEOUT: 1371 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
1280 rc = -ETIMEDOUT; 1372 case IOERR_SEQUENCE_TIMEOUT:
1281 break; 1373 rc = -ETIMEDOUT;
1282 case IOERR_INVALID_RPI: 1374 break;
1283 rc = -EFAULT; 1375 case IOERR_INVALID_RPI:
1284 break; 1376 rc = -EFAULT;
1285 default: 1377 break;
1378 default:
1379 rc = -EACCES;
1380 break;
1381 }
1382 } else {
1286 rc = -EACCES; 1383 rc = -EACCES;
1287 break;
1288 } 1384 }
1289 } else 1385 } else {
1290 rc = -EACCES; 1386 job->reply->reply_payload_rcv_len = 0;
1291 } else 1387 }
1292 job->reply->reply_payload_rcv_len = 1388 }
1293 rsp->un.genreq64.bdl.bdeSize;
1294 1389
1390 lpfc_free_bsg_buffers(phba, cmp);
1295 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1391 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1392 kfree(bmp);
1296 lpfc_sli_release_iocbq(phba, cmdiocbq); 1393 lpfc_sli_release_iocbq(phba, cmdiocbq);
1297 lpfc_nlp_put(ndlp); 1394 lpfc_nlp_put(ndlp);
1298 kfree(bmp);
1299 kfree(dd_data); 1395 kfree(dd_data);
1300 /* make error code available to userspace */ 1396
1301 job->reply->result = rc; 1397 /* Complete the job if the job is still active */
1302 job->dd_data = NULL; 1398
1303 /* complete the job back to userspace */ 1399 if (job) {
1304 job->job_done(job); 1400 job->reply->result = rc;
1305 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1401 job->job_done(job);
1402 }
1306 return; 1403 return;
1307} 1404}
1308 1405
@@ -1316,7 +1413,8 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1316 **/ 1413 **/
1317static int 1414static int
1318lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, 1415lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1319 struct lpfc_dmabuf *bmp, int num_entry) 1416 struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
1417 int num_entry)
1320{ 1418{
1321 IOCB_t *icmd; 1419 IOCB_t *icmd;
1322 struct lpfc_iocbq *ctiocb = NULL; 1420 struct lpfc_iocbq *ctiocb = NULL;
@@ -1377,7 +1475,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1377 1475
1378 /* Check if the ndlp is active */ 1476 /* Check if the ndlp is active */
1379 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1477 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1380 rc = -IOCB_ERROR; 1478 rc = IOCB_ERROR;
1381 goto issue_ct_rsp_exit; 1479 goto issue_ct_rsp_exit;
1382 } 1480 }
1383 1481
@@ -1385,7 +1483,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1385 * we respond 1483 * we respond
1386 */ 1484 */
1387 if (!lpfc_nlp_get(ndlp)) { 1485 if (!lpfc_nlp_get(ndlp)) {
1388 rc = -IOCB_ERROR; 1486 rc = IOCB_ERROR;
1389 goto issue_ct_rsp_exit; 1487 goto issue_ct_rsp_exit;
1390 } 1488 }
1391 1489
@@ -1407,17 +1505,17 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1407 ctiocb->iocb_cmpl = NULL; 1505 ctiocb->iocb_cmpl = NULL;
1408 ctiocb->iocb_flag |= LPFC_IO_LIBDFC; 1506 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1409 ctiocb->vport = phba->pport; 1507 ctiocb->vport = phba->pport;
1508 ctiocb->context1 = dd_data;
1509 ctiocb->context2 = cmp;
1410 ctiocb->context3 = bmp; 1510 ctiocb->context3 = bmp;
1411
1412 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; 1511 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1413 ctiocb->context2 = dd_data; 1512
1414 ctiocb->context1 = ndlp;
1415 dd_data->type = TYPE_IOCB; 1513 dd_data->type = TYPE_IOCB;
1514 dd_data->set_job = job;
1416 dd_data->context_un.iocb.cmdiocbq = ctiocb; 1515 dd_data->context_un.iocb.cmdiocbq = ctiocb;
1417 dd_data->context_un.iocb.rspiocbq = NULL;
1418 dd_data->context_un.iocb.set_job = job;
1419 dd_data->context_un.iocb.bmp = bmp;
1420 dd_data->context_un.iocb.ndlp = ndlp; 1516 dd_data->context_un.iocb.ndlp = ndlp;
1517 dd_data->context_un.iocb.rmp = NULL;
1518 job->dd_data = dd_data;
1421 1519
1422 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1520 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1423 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 1521 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
@@ -1454,11 +1552,8 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1454 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) 1552 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1455 job->request->rqst_data.h_vendor.vendor_cmd; 1553 job->request->rqst_data.h_vendor.vendor_cmd;
1456 struct ulp_bde64 *bpl; 1554 struct ulp_bde64 *bpl;
1457 struct lpfc_dmabuf *bmp = NULL; 1555 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
1458 struct scatterlist *sgel = NULL; 1556 int bpl_entries;
1459 int request_nseg;
1460 int numbde;
1461 dma_addr_t busaddr;
1462 uint32_t tag = mgmt_resp->tag; 1557 uint32_t tag = mgmt_resp->tag;
1463 unsigned long reqbfrcnt = 1558 unsigned long reqbfrcnt =
1464 (unsigned long)job->request_payload.payload_len; 1559 (unsigned long)job->request_payload.payload_len;
@@ -1486,30 +1581,28 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1486 1581
1487 INIT_LIST_HEAD(&bmp->list); 1582 INIT_LIST_HEAD(&bmp->list);
1488 bpl = (struct ulp_bde64 *) bmp->virt; 1583 bpl = (struct ulp_bde64 *) bmp->virt;
1489 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 1584 bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
1490 job->request_payload.sg_cnt, DMA_TO_DEVICE); 1585 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
1491 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 1586 1, bpl, &bpl_entries);
1492 busaddr = sg_dma_address(sgel); 1587 if (!cmp) {
1493 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1588 rc = -ENOMEM;
1494 bpl->tus.f.bdeSize = sg_dma_len(sgel); 1589 goto send_mgmt_rsp_free_bmp;
1495 bpl->tus.w = cpu_to_le32(bpl->tus.w);
1496 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
1497 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
1498 bpl++;
1499 } 1590 }
1591 lpfc_bsg_copy_data(cmp, &job->request_payload,
1592 job->request_payload.payload_len, 1);
1500 1593
1501 rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg); 1594 rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
1502 1595
1503 if (rc == IOCB_SUCCESS) 1596 if (rc == IOCB_SUCCESS)
1504 return 0; /* done for now */ 1597 return 0; /* done for now */
1505 1598
1506 /* TBD need to handle a timeout */
1507 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1508 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1509 rc = -EACCES; 1599 rc = -EACCES;
1510 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1600
1601 lpfc_free_bsg_buffers(phba, cmp);
1511 1602
1512send_mgmt_rsp_free_bmp: 1603send_mgmt_rsp_free_bmp:
1604 if (bmp->virt)
1605 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1513 kfree(bmp); 1606 kfree(bmp);
1514send_mgmt_rsp_exit: 1607send_mgmt_rsp_exit:
1515 /* make error code available to userspace */ 1608 /* make error code available to userspace */
@@ -1559,7 +1652,7 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1559 scsi_block_requests(shost); 1652 scsi_block_requests(shost);
1560 } 1653 }
1561 1654
1562 while (pring->txcmplq_cnt) { 1655 while (!list_empty(&pring->txcmplq)) {
1563 if (i++ > 500) /* wait up to 5 seconds */ 1656 if (i++ > 500) /* wait up to 5 seconds */
1564 break; 1657 break;
1565 msleep(10); 1658 msleep(10);
@@ -3193,13 +3286,7 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3193 unsigned long flags; 3286 unsigned long flags;
3194 uint8_t *pmb, *pmb_buf; 3287 uint8_t *pmb, *pmb_buf;
3195 3288
3196 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3197 dd_data = pmboxq->context1; 3289 dd_data = pmboxq->context1;
3198 /* job already timed out? */
3199 if (!dd_data) {
3200 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3201 return;
3202 }
3203 3290
3204 /* 3291 /*
3205 * The outgoing buffer is readily referred from the dma buffer, 3292 * The outgoing buffer is readily referred from the dma buffer,
@@ -3209,29 +3296,33 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3209 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3296 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3210 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3297 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3211 3298
3212 job = dd_data->context_un.mbox.set_job; 3299 /* Determine if job has been aborted */
3300
3301 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3302 job = dd_data->set_job;
3303 if (job) {
3304 /* Prevent timeout handling from trying to abort job */
3305 job->dd_data = NULL;
3306 }
3307 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3308
3309 /* Copy the mailbox data to the job if it is still active */
3310
3213 if (job) { 3311 if (job) {
3214 size = job->reply_payload.payload_len; 3312 size = job->reply_payload.payload_len;
3215 job->reply->reply_payload_rcv_len = 3313 job->reply->reply_payload_rcv_len =
3216 sg_copy_from_buffer(job->reply_payload.sg_list, 3314 sg_copy_from_buffer(job->reply_payload.sg_list,
3217 job->reply_payload.sg_cnt, 3315 job->reply_payload.sg_cnt,
3218 pmb_buf, size); 3316 pmb_buf, size);
3219 /* need to hold the lock until we set job->dd_data to NULL
3220 * to hold off the timeout handler returning to the mid-layer
3221 * while we are still processing the job.
3222 */
3223 job->dd_data = NULL;
3224 dd_data->context_un.mbox.set_job = NULL;
3225 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3226 } else {
3227 dd_data->context_un.mbox.set_job = NULL;
3228 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3229 } 3317 }
3230 3318
3319 dd_data->set_job = NULL;
3231 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 3320 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
3232 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers); 3321 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
3233 kfree(dd_data); 3322 kfree(dd_data);
3234 3323
3324 /* Complete the job if the job is still active */
3325
3235 if (job) { 3326 if (job) {
3236 job->reply->result = 0; 3327 job->reply->result = 0;
3237 job->job_done(job); 3328 job->job_done(job);
@@ -3377,19 +3468,22 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3377 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3468 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3378 uint8_t *pmbx; 3469 uint8_t *pmbx;
3379 3470
3380 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3381 dd_data = pmboxq->context1; 3471 dd_data = pmboxq->context1;
3382 /* has the job already timed out? */ 3472
3383 if (!dd_data) { 3473 /* Determine if job has been aborted */
3384 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3474 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3385 job = NULL; 3475 job = dd_data->set_job;
3386 goto job_done_out; 3476 if (job) {
3477 /* Prevent timeout handling from trying to abort job */
3478 job->dd_data = NULL;
3387 } 3479 }
3480 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3388 3481
3389 /* 3482 /*
3390 * The outgoing buffer is readily referred from the dma buffer, 3483 * The outgoing buffer is readily referred from the dma buffer,
3391 * just need to get header part from mailboxq structure. 3484 * just need to get header part from mailboxq structure.
3392 */ 3485 */
3486
3393 pmb = (uint8_t *)&pmboxq->u.mb; 3487 pmb = (uint8_t *)&pmboxq->u.mb;
3394 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3488 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3395 /* Copy the byte swapped response mailbox back to the user */ 3489 /* Copy the byte swapped response mailbox back to the user */
@@ -3406,21 +3500,18 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3406 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len); 3500 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
3407 } 3501 }
3408 3502
3409 job = dd_data->context_un.mbox.set_job; 3503 /* Complete the job if the job is still active */
3504
3410 if (job) { 3505 if (job) {
3411 size = job->reply_payload.payload_len; 3506 size = job->reply_payload.payload_len;
3412 job->reply->reply_payload_rcv_len = 3507 job->reply->reply_payload_rcv_len =
3413 sg_copy_from_buffer(job->reply_payload.sg_list, 3508 sg_copy_from_buffer(job->reply_payload.sg_list,
3414 job->reply_payload.sg_cnt, 3509 job->reply_payload.sg_cnt,
3415 pmb_buf, size); 3510 pmb_buf, size);
3511
3416 /* result for successful */ 3512 /* result for successful */
3417 job->reply->result = 0; 3513 job->reply->result = 0;
3418 job->dd_data = NULL; 3514
3419 /* need to hold the lock util we set job->dd_data to NULL
3420 * to hold off the timeout handler from midlayer to take
3421 * any action.
3422 */
3423 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3424 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3515 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3425 "2937 SLI_CONFIG ext-buffer maibox command " 3516 "2937 SLI_CONFIG ext-buffer maibox command "
3426 "(x%x/x%x) complete bsg job done, bsize:%d\n", 3517 "(x%x/x%x) complete bsg job done, bsize:%d\n",
@@ -3431,20 +3522,18 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3431 phba->mbox_ext_buf_ctx.mboxType, 3522 phba->mbox_ext_buf_ctx.mboxType,
3432 dma_ebuf, sta_pos_addr, 3523 dma_ebuf, sta_pos_addr,
3433 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0); 3524 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3434 } else 3525 } else {
3435 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3436
3437job_done_out:
3438 if (!job)
3439 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3526 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3440 "2938 SLI_CONFIG ext-buffer maibox " 3527 "2938 SLI_CONFIG ext-buffer maibox "
3441 "command (x%x/x%x) failure, rc:x%x\n", 3528 "command (x%x/x%x) failure, rc:x%x\n",
3442 phba->mbox_ext_buf_ctx.nembType, 3529 phba->mbox_ext_buf_ctx.nembType,
3443 phba->mbox_ext_buf_ctx.mboxType, rc); 3530 phba->mbox_ext_buf_ctx.mboxType, rc);
3531 }
3532
3533
3444 /* state change */ 3534 /* state change */
3445 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE; 3535 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3446 kfree(dd_data); 3536 kfree(dd_data);
3447
3448 return job; 3537 return job;
3449} 3538}
3450 3539
@@ -3461,8 +3550,10 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3461{ 3550{
3462 struct fc_bsg_job *job; 3551 struct fc_bsg_job *job;
3463 3552
3553 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3554
3464 /* handle the BSG job with mailbox command */ 3555 /* handle the BSG job with mailbox command */
3465 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS) 3556 if (!job)
3466 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3557 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3467 3558
3468 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3559 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3470,15 +3561,13 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3470 "complete, ctxState:x%x, mbxStatus:x%x\n", 3561 "complete, ctxState:x%x, mbxStatus:x%x\n",
3471 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3562 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3472 3563
3473 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3474
3475 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1) 3564 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3476 lpfc_bsg_mbox_ext_session_reset(phba); 3565 lpfc_bsg_mbox_ext_session_reset(phba);
3477 3566
3478 /* free base driver mailbox structure memory */ 3567 /* free base driver mailbox structure memory */
3479 mempool_free(pmboxq, phba->mbox_mem_pool); 3568 mempool_free(pmboxq, phba->mbox_mem_pool);
3480 3569
3481 /* complete the bsg job if we have it */ 3570 /* if the job is still active, call job done */
3482 if (job) 3571 if (job)
3483 job->job_done(job); 3572 job->job_done(job);
3484 3573
@@ -3498,8 +3587,10 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3498{ 3587{
3499 struct fc_bsg_job *job; 3588 struct fc_bsg_job *job;
3500 3589
3590 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3591
3501 /* handle the BSG job with the mailbox command */ 3592 /* handle the BSG job with the mailbox command */
3502 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS) 3593 if (!job)
3503 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3594 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3504 3595
3505 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3596 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3507,13 +3598,11 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3507 "complete, ctxState:x%x, mbxStatus:x%x\n", 3598 "complete, ctxState:x%x, mbxStatus:x%x\n",
3508 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3599 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3509 3600
3510 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3511
3512 /* free all memory, including dma buffers */ 3601 /* free all memory, including dma buffers */
3513 mempool_free(pmboxq, phba->mbox_mem_pool); 3602 mempool_free(pmboxq, phba->mbox_mem_pool);
3514 lpfc_bsg_mbox_ext_session_reset(phba); 3603 lpfc_bsg_mbox_ext_session_reset(phba);
3515 3604
3516 /* complete the bsg job if we have it */ 3605 /* if the job is still active, call job done */
3517 if (job) 3606 if (job)
3518 job->job_done(job); 3607 job->job_done(job);
3519 3608
@@ -3759,9 +3848,9 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3759 /* context fields to callback function */ 3848 /* context fields to callback function */
3760 pmboxq->context1 = dd_data; 3849 pmboxq->context1 = dd_data;
3761 dd_data->type = TYPE_MBOX; 3850 dd_data->type = TYPE_MBOX;
3851 dd_data->set_job = job;
3762 dd_data->context_un.mbox.pmboxq = pmboxq; 3852 dd_data->context_un.mbox.pmboxq = pmboxq;
3763 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 3853 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3764 dd_data->context_un.mbox.set_job = job;
3765 job->dd_data = dd_data; 3854 job->dd_data = dd_data;
3766 3855
3767 /* state change */ 3856 /* state change */
@@ -3928,14 +4017,14 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3928 /* context fields to callback function */ 4017 /* context fields to callback function */
3929 pmboxq->context1 = dd_data; 4018 pmboxq->context1 = dd_data;
3930 dd_data->type = TYPE_MBOX; 4019 dd_data->type = TYPE_MBOX;
4020 dd_data->set_job = job;
3931 dd_data->context_un.mbox.pmboxq = pmboxq; 4021 dd_data->context_un.mbox.pmboxq = pmboxq;
3932 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx; 4022 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
3933 dd_data->context_un.mbox.set_job = job;
3934 job->dd_data = dd_data; 4023 job->dd_data = dd_data;
3935 4024
3936 /* state change */ 4025 /* state change */
3937 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3938 4026
4027 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3939 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4028 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3940 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4029 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3941 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4030 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3951,6 +4040,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3951 } 4040 }
3952 4041
3953 /* wait for additoinal external buffers */ 4042 /* wait for additoinal external buffers */
4043
3954 job->reply->result = 0; 4044 job->reply->result = 0;
3955 job->job_done(job); 4045 job->job_done(job);
3956 return SLI_CONFIG_HANDLED; 4046 return SLI_CONFIG_HANDLED;
@@ -4268,9 +4358,9 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
4268 /* context fields to callback function */ 4358 /* context fields to callback function */
4269 pmboxq->context1 = dd_data; 4359 pmboxq->context1 = dd_data;
4270 dd_data->type = TYPE_MBOX; 4360 dd_data->type = TYPE_MBOX;
4361 dd_data->set_job = job;
4271 dd_data->context_un.mbox.pmboxq = pmboxq; 4362 dd_data->context_un.mbox.pmboxq = pmboxq;
4272 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf; 4363 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
4273 dd_data->context_un.mbox.set_job = job;
4274 job->dd_data = dd_data; 4364 job->dd_data = dd_data;
4275 4365
4276 /* state change */ 4366 /* state change */
@@ -4455,7 +4545,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4455 uint8_t *from; 4545 uint8_t *from;
4456 uint32_t size; 4546 uint32_t size;
4457 4547
4458
4459 /* in case no data is transferred */ 4548 /* in case no data is transferred */
4460 job->reply->reply_payload_rcv_len = 0; 4549 job->reply->reply_payload_rcv_len = 0;
4461 4550
@@ -4681,9 +4770,9 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4681 /* setup context field to pass wait_queue pointer to wake function */ 4770 /* setup context field to pass wait_queue pointer to wake function */
4682 pmboxq->context1 = dd_data; 4771 pmboxq->context1 = dd_data;
4683 dd_data->type = TYPE_MBOX; 4772 dd_data->type = TYPE_MBOX;
4773 dd_data->set_job = job;
4684 dd_data->context_un.mbox.pmboxq = pmboxq; 4774 dd_data->context_un.mbox.pmboxq = pmboxq;
4685 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 4775 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4686 dd_data->context_un.mbox.set_job = job;
4687 dd_data->context_un.mbox.ext = ext; 4776 dd_data->context_un.mbox.ext = ext;
4688 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; 4777 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4689 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen; 4778 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
@@ -4797,75 +4886,79 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
4797 struct bsg_job_data *dd_data; 4886 struct bsg_job_data *dd_data;
4798 struct fc_bsg_job *job; 4887 struct fc_bsg_job *job;
4799 IOCB_t *rsp; 4888 IOCB_t *rsp;
4800 struct lpfc_dmabuf *bmp; 4889 struct lpfc_dmabuf *bmp, *cmp, *rmp;
4801 struct lpfc_bsg_menlo *menlo; 4890 struct lpfc_bsg_menlo *menlo;
4802 unsigned long flags; 4891 unsigned long flags;
4803 struct menlo_response *menlo_resp; 4892 struct menlo_response *menlo_resp;
4893 unsigned int rsp_size;
4804 int rc = 0; 4894 int rc = 0;
4805 4895
4806 spin_lock_irqsave(&phba->ct_ev_lock, flags);
4807 dd_data = cmdiocbq->context1; 4896 dd_data = cmdiocbq->context1;
4808 if (!dd_data) { 4897 cmp = cmdiocbq->context2;
4809 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 4898 bmp = cmdiocbq->context3;
4810 return;
4811 }
4812
4813 menlo = &dd_data->context_un.menlo; 4899 menlo = &dd_data->context_un.menlo;
4814 job = menlo->set_job; 4900 rmp = menlo->rmp;
4815 job->dd_data = NULL; /* so timeout handler does not reply */
4816
4817 spin_lock(&phba->hbalock);
4818 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
4819 if (cmdiocbq->context2 && rspiocbq)
4820 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
4821 &rspiocbq->iocb, sizeof(IOCB_t));
4822 spin_unlock(&phba->hbalock);
4823
4824 bmp = menlo->bmp;
4825 rspiocbq = menlo->rspiocbq;
4826 rsp = &rspiocbq->iocb; 4901 rsp = &rspiocbq->iocb;
4827 4902
4828 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 4903 /* Determine if job has been aborted */
4829 job->request_payload.sg_cnt, DMA_TO_DEVICE); 4904 spin_lock_irqsave(&phba->ct_ev_lock, flags);
4830 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 4905 job = dd_data->set_job;
4831 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 4906 if (job) {
4907 /* Prevent timeout handling from trying to abort job */
4908 job->dd_data = NULL;
4909 }
4910 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4911
4912 /* Copy the job data or set the failing status for the job */
4832 4913
4833 /* always return the xri, this would be used in the case 4914 if (job) {
4834 * of a menlo download to allow the data to be sent as a continuation 4915 /* always return the xri, this would be used in the case
4835 * of the exchange. 4916 * of a menlo download to allow the data to be sent as a
4836 */ 4917 * continuation of the exchange.
4837 menlo_resp = (struct menlo_response *) 4918 */
4838 job->reply->reply_data.vendor_reply.vendor_rsp; 4919
4839 menlo_resp->xri = rsp->ulpContext; 4920 menlo_resp = (struct menlo_response *)
4840 if (rsp->ulpStatus) { 4921 job->reply->reply_data.vendor_reply.vendor_rsp;
4841 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 4922 menlo_resp->xri = rsp->ulpContext;
4842 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { 4923 if (rsp->ulpStatus) {
4843 case IOERR_SEQUENCE_TIMEOUT: 4924 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
4844 rc = -ETIMEDOUT; 4925 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
4845 break; 4926 case IOERR_SEQUENCE_TIMEOUT:
4846 case IOERR_INVALID_RPI: 4927 rc = -ETIMEDOUT;
4847 rc = -EFAULT; 4928 break;
4848 break; 4929 case IOERR_INVALID_RPI:
4849 default: 4930 rc = -EFAULT;
4931 break;
4932 default:
4933 rc = -EACCES;
4934 break;
4935 }
4936 } else {
4850 rc = -EACCES; 4937 rc = -EACCES;
4851 break;
4852 } 4938 }
4853 } else 4939 } else {
4854 rc = -EACCES; 4940 rsp_size = rsp->un.genreq64.bdl.bdeSize;
4855 } else 4941 job->reply->reply_payload_rcv_len =
4856 job->reply->reply_payload_rcv_len = 4942 lpfc_bsg_copy_data(rmp, &job->reply_payload,
4857 rsp->un.genreq64.bdl.bdeSize; 4943 rsp_size, 0);
4944 }
4945
4946 }
4858 4947
4859 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
4860 lpfc_sli_release_iocbq(phba, rspiocbq);
4861 lpfc_sli_release_iocbq(phba, cmdiocbq); 4948 lpfc_sli_release_iocbq(phba, cmdiocbq);
4949 lpfc_free_bsg_buffers(phba, cmp);
4950 lpfc_free_bsg_buffers(phba, rmp);
4951 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
4862 kfree(bmp); 4952 kfree(bmp);
4863 kfree(dd_data); 4953 kfree(dd_data);
4864 /* make error code available to userspace */ 4954
4865 job->reply->result = rc; 4955 /* Complete the job if active */
4866 /* complete the job back to userspace */ 4956
4867 job->job_done(job); 4957 if (job) {
4868 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 4958 job->reply->result = rc;
4959 job->job_done(job);
4960 }
4961
4869 return; 4962 return;
4870} 4963}
4871 4964
@@ -4883,17 +4976,14 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
4883{ 4976{
4884 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 4977 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
4885 struct lpfc_hba *phba = vport->phba; 4978 struct lpfc_hba *phba = vport->phba;
4886 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 4979 struct lpfc_iocbq *cmdiocbq;
4887 IOCB_t *cmd, *rsp; 4980 IOCB_t *cmd;
4888 int rc = 0; 4981 int rc = 0;
4889 struct menlo_command *menlo_cmd; 4982 struct menlo_command *menlo_cmd;
4890 struct menlo_response *menlo_resp; 4983 struct menlo_response *menlo_resp;
4891 struct lpfc_dmabuf *bmp = NULL; 4984 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
4892 int request_nseg; 4985 int request_nseg;
4893 int reply_nseg; 4986 int reply_nseg;
4894 struct scatterlist *sgel = NULL;
4895 int numbde;
4896 dma_addr_t busaddr;
4897 struct bsg_job_data *dd_data; 4987 struct bsg_job_data *dd_data;
4898 struct ulp_bde64 *bpl = NULL; 4988 struct ulp_bde64 *bpl = NULL;
4899 4989
@@ -4948,50 +5038,38 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
4948 goto free_dd; 5038 goto free_dd;
4949 } 5039 }
4950 5040
4951 cmdiocbq = lpfc_sli_get_iocbq(phba); 5041 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
4952 if (!cmdiocbq) { 5042 if (!bmp->virt) {
4953 rc = -ENOMEM; 5043 rc = -ENOMEM;
4954 goto free_bmp; 5044 goto free_bmp;
4955 } 5045 }
4956 5046
4957 rspiocbq = lpfc_sli_get_iocbq(phba); 5047 INIT_LIST_HEAD(&bmp->list);
4958 if (!rspiocbq) {
4959 rc = -ENOMEM;
4960 goto free_cmdiocbq;
4961 }
4962
4963 rsp = &rspiocbq->iocb;
4964 5048
4965 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 5049 bpl = (struct ulp_bde64 *)bmp->virt;
4966 if (!bmp->virt) { 5050 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
5051 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
5052 1, bpl, &request_nseg);
5053 if (!cmp) {
4967 rc = -ENOMEM; 5054 rc = -ENOMEM;
4968 goto free_rspiocbq; 5055 goto free_bmp;
4969 } 5056 }
5057 lpfc_bsg_copy_data(cmp, &job->request_payload,
5058 job->request_payload.payload_len, 1);
4970 5059
4971 INIT_LIST_HEAD(&bmp->list); 5060 bpl += request_nseg;
4972 bpl = (struct ulp_bde64 *) bmp->virt; 5061 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
4973 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 5062 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
4974 job->request_payload.sg_cnt, DMA_TO_DEVICE); 5063 bpl, &reply_nseg);
4975 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 5064 if (!rmp) {
4976 busaddr = sg_dma_address(sgel); 5065 rc = -ENOMEM;
4977 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 5066 goto free_cmp;
4978 bpl->tus.f.bdeSize = sg_dma_len(sgel);
4979 bpl->tus.w = cpu_to_le32(bpl->tus.w);
4980 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
4981 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
4982 bpl++;
4983 } 5067 }
4984 5068
4985 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, 5069 cmdiocbq = lpfc_sli_get_iocbq(phba);
4986 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 5070 if (!cmdiocbq) {
4987 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { 5071 rc = -ENOMEM;
4988 busaddr = sg_dma_address(sgel); 5072 goto free_rmp;
4989 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
4990 bpl->tus.f.bdeSize = sg_dma_len(sgel);
4991 bpl->tus.w = cpu_to_le32(bpl->tus.w);
4992 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
4993 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
4994 bpl++;
4995 } 5073 }
4996 5074
4997 cmd = &cmdiocbq->iocb; 5075 cmd = &cmdiocbq->iocb;
@@ -5013,11 +5091,10 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
5013 cmdiocbq->vport = phba->pport; 5091 cmdiocbq->vport = phba->pport;
5014 /* We want the firmware to timeout before we do */ 5092 /* We want the firmware to timeout before we do */
5015 cmd->ulpTimeout = MENLO_TIMEOUT - 5; 5093 cmd->ulpTimeout = MENLO_TIMEOUT - 5;
5016 cmdiocbq->context3 = bmp;
5017 cmdiocbq->context2 = rspiocbq;
5018 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp; 5094 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
5019 cmdiocbq->context1 = dd_data; 5095 cmdiocbq->context1 = dd_data;
5020 cmdiocbq->context2 = rspiocbq; 5096 cmdiocbq->context2 = cmp;
5097 cmdiocbq->context3 = bmp;
5021 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) { 5098 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
5022 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 5099 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
5023 cmd->ulpPU = MENLO_PU; /* 3 */ 5100 cmd->ulpPU = MENLO_PU; /* 3 */
@@ -5031,29 +5108,25 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
5031 } 5108 }
5032 5109
5033 dd_data->type = TYPE_MENLO; 5110 dd_data->type = TYPE_MENLO;
5111 dd_data->set_job = job;
5034 dd_data->context_un.menlo.cmdiocbq = cmdiocbq; 5112 dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
5035 dd_data->context_un.menlo.rspiocbq = rspiocbq; 5113 dd_data->context_un.menlo.rmp = rmp;
5036 dd_data->context_un.menlo.set_job = job; 5114 job->dd_data = dd_data;
5037 dd_data->context_un.menlo.bmp = bmp;
5038 5115
5039 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 5116 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
5040 MENLO_TIMEOUT - 5); 5117 MENLO_TIMEOUT - 5);
5041 if (rc == IOCB_SUCCESS) 5118 if (rc == IOCB_SUCCESS)
5042 return 0; /* done for now */ 5119 return 0; /* done for now */
5043 5120
5044 /* iocb failed so cleanup */
5045 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
5046 job->request_payload.sg_cnt, DMA_TO_DEVICE);
5047 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
5048 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
5049
5050 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5051
5052free_rspiocbq:
5053 lpfc_sli_release_iocbq(phba, rspiocbq);
5054free_cmdiocbq:
5055 lpfc_sli_release_iocbq(phba, cmdiocbq); 5121 lpfc_sli_release_iocbq(phba, cmdiocbq);
5122
5123free_rmp:
5124 lpfc_free_bsg_buffers(phba, rmp);
5125free_cmp:
5126 lpfc_free_bsg_buffers(phba, cmp);
5056free_bmp: 5127free_bmp:
5128 if (bmp->virt)
5129 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5057 kfree(bmp); 5130 kfree(bmp);
5058free_dd: 5131free_dd:
5059 kfree(dd_data); 5132 kfree(dd_data);
@@ -5162,70 +5235,94 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
5162 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 5235 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
5163 struct lpfc_hba *phba = vport->phba; 5236 struct lpfc_hba *phba = vport->phba;
5164 struct lpfc_iocbq *cmdiocb; 5237 struct lpfc_iocbq *cmdiocb;
5165 struct lpfc_bsg_event *evt;
5166 struct lpfc_bsg_iocb *iocb;
5167 struct lpfc_bsg_mbox *mbox;
5168 struct lpfc_bsg_menlo *menlo;
5169 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 5238 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5170 struct bsg_job_data *dd_data; 5239 struct bsg_job_data *dd_data;
5171 unsigned long flags; 5240 unsigned long flags;
5241 int rc = 0;
5242 LIST_HEAD(completions);
5243 struct lpfc_iocbq *check_iocb, *next_iocb;
5244
5245 /* if job's driver data is NULL, the command completed or is in the
5246 * the process of completing. In this case, return status to request
5247 * so the timeout is retried. This avoids double completion issues
5248 * and the request will be pulled off the timer queue when the
5249 * command's completion handler executes. Otherwise, prevent the
5250 * command's completion handler from executing the job done callback
5251 * and continue processing to abort the outstanding the command.
5252 */
5172 5253
5173 spin_lock_irqsave(&phba->ct_ev_lock, flags); 5254 spin_lock_irqsave(&phba->ct_ev_lock, flags);
5174 dd_data = (struct bsg_job_data *)job->dd_data; 5255 dd_data = (struct bsg_job_data *)job->dd_data;
5175 /* timeout and completion crossed paths if no dd_data */ 5256 if (dd_data) {
5176 if (!dd_data) { 5257 dd_data->set_job = NULL;
5258 job->dd_data = NULL;
5259 } else {
5177 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5260 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5178 return 0; 5261 return -EAGAIN;
5179 } 5262 }
5180 5263
5181 switch (dd_data->type) { 5264 switch (dd_data->type) {
5182 case TYPE_IOCB: 5265 case TYPE_IOCB:
5183 iocb = &dd_data->context_un.iocb; 5266 /* Check to see if IOCB was issued to the port or not. If not,
5184 cmdiocb = iocb->cmdiocbq; 5267 * remove it from the txq queue and call cancel iocbs.
5185 /* hint to completion handler that the job timed out */ 5268 * Otherwise, call abort iotag
5186 job->reply->result = -EAGAIN; 5269 */
5187 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5270
5188 /* this will call our completion handler */ 5271 cmdiocb = dd_data->context_un.iocb.cmdiocbq;
5189 spin_lock_irq(&phba->hbalock); 5272 spin_lock_irq(&phba->hbalock);
5190 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5273 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5274 list) {
5275 if (check_iocb == cmdiocb) {
5276 list_move_tail(&check_iocb->list, &completions);
5277 break;
5278 }
5279 }
5280 if (list_empty(&completions))
5281 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5191 spin_unlock_irq(&phba->hbalock); 5282 spin_unlock_irq(&phba->hbalock);
5283 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5284 if (!list_empty(&completions)) {
5285 lpfc_sli_cancel_iocbs(phba, &completions,
5286 IOSTAT_LOCAL_REJECT,
5287 IOERR_SLI_ABORTED);
5288 }
5192 break; 5289 break;
5290
5193 case TYPE_EVT: 5291 case TYPE_EVT:
5194 evt = dd_data->context_un.evt;
5195 /* this event has no job anymore */
5196 evt->set_job = NULL;
5197 job->dd_data = NULL;
5198 job->reply->reply_payload_rcv_len = 0;
5199 /* Return -EAGAIN which is our way of signallying the
5200 * app to retry.
5201 */
5202 job->reply->result = -EAGAIN;
5203 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5292 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5204 job->job_done(job);
5205 break; 5293 break;
5294
5206 case TYPE_MBOX: 5295 case TYPE_MBOX:
5207 mbox = &dd_data->context_un.mbox; 5296 /* Update the ext buf ctx state if needed */
5208 /* this mbox has no job anymore */ 5297
5209 mbox->set_job = NULL;
5210 job->dd_data = NULL;
5211 job->reply->reply_payload_rcv_len = 0;
5212 job->reply->result = -EAGAIN;
5213 /* the mbox completion handler can now be run */
5214 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5215 job->job_done(job);
5216 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) 5298 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
5217 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; 5299 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
5300 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5218 break; 5301 break;
5219 case TYPE_MENLO: 5302 case TYPE_MENLO:
5220 menlo = &dd_data->context_un.menlo; 5303 /* Check to see if IOCB was issued to the port or not. If not,
5221 cmdiocb = menlo->cmdiocbq; 5304 * remove it from the txq queue and call cancel iocbs.
5222 /* hint to completion handler that the job timed out */ 5305 * Otherwise, call abort iotag.
5223 job->reply->result = -EAGAIN; 5306 */
5224 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5307
5225 /* this will call our completion handler */ 5308 cmdiocb = dd_data->context_un.menlo.cmdiocbq;
5226 spin_lock_irq(&phba->hbalock); 5309 spin_lock_irq(&phba->hbalock);
5227 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5310 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5311 list) {
5312 if (check_iocb == cmdiocb) {
5313 list_move_tail(&check_iocb->list, &completions);
5314 break;
5315 }
5316 }
5317 if (list_empty(&completions))
5318 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5228 spin_unlock_irq(&phba->hbalock); 5319 spin_unlock_irq(&phba->hbalock);
5320 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5321 if (!list_empty(&completions)) {
5322 lpfc_sli_cancel_iocbs(phba, &completions,
5323 IOSTAT_LOCAL_REJECT,
5324 IOERR_SLI_ABORTED);
5325 }
5229 break; 5326 break;
5230 default: 5327 default:
5231 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5328 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
@@ -5236,5 +5333,5 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
5236 * otherwise an error message will be displayed on the console 5333 * otherwise an error message will be displayed on the console
5237 * so always return success (zero) 5334 * so always return success (zero)
5238 */ 5335 */
5239 return 0; 5336 return rc;
5240} 5337}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 76ca65dae781..7631893ae005 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -106,6 +106,7 @@ void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
106void lpfc_cleanup(struct lpfc_vport *); 106void lpfc_cleanup(struct lpfc_vport *);
107void lpfc_disc_timeout(unsigned long); 107void lpfc_disc_timeout(unsigned long);
108 108
109int lpfc_unregister_fcf_prep(struct lpfc_hba *);
109struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); 110struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
110struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); 111struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
111void lpfc_worker_wake_up(struct lpfc_hba *); 112void lpfc_worker_wake_up(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 08d156a9094f..bbed8471bf0b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -484,6 +484,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
484 vport->port_state = LPFC_FABRIC_CFG_LINK; 484 vport->port_state = LPFC_FABRIC_CFG_LINK;
485 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam)); 485 memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
486 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 486 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
487
487 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 488 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
488 mboxq->vport = vport; 489 mboxq->vport = vport;
489 mboxq->context1 = dmabuf; 490 mboxq->context1 = dmabuf;
@@ -700,6 +701,20 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
700 } 701 }
701 } 702 }
702 703
704 /*
705 * For FC we need to do some special processing because of the SLI
706 * Port's default settings of the Common Service Parameters.
707 */
708 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) {
709 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
710 if ((phba->sli_rev == LPFC_SLI_REV4) && fabric_param_changed)
711 lpfc_unregister_fcf_prep(phba);
712
713 /* This should just update the VFI CSPs*/
714 if (vport->fc_flag & FC_VFI_REGISTERED)
715 lpfc_issue_reg_vfi(vport);
716 }
717
703 if (fabric_param_changed && 718 if (fabric_param_changed &&
704 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 719 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
705 720
@@ -6225,7 +6240,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
6225 spin_unlock_irq(&phba->hbalock); 6240 spin_unlock_irq(&phba->hbalock);
6226 } 6241 }
6227 6242
6228 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) 6243 if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
6229 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 6244 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
6230} 6245}
6231 6246
@@ -6279,7 +6294,6 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
6279 continue; 6294 continue;
6280 6295
6281 list_move_tail(&piocb->list, &completions); 6296 list_move_tail(&piocb->list, &completions);
6282 pring->txq_cnt--;
6283 } 6297 }
6284 6298
6285 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 6299 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
@@ -6339,7 +6353,6 @@ lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
6339 cmd->ulpCommand == CMD_ABORT_XRI_CN) 6353 cmd->ulpCommand == CMD_ABORT_XRI_CN)
6340 continue; 6354 continue;
6341 list_move_tail(&piocb->list, &completions); 6355 list_move_tail(&piocb->list, &completions);
6342 pring->txq_cnt--;
6343 } 6356 }
6344 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 6357 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
6345 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 6358 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
@@ -8065,7 +8078,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
8065 rxid, 1); 8078 rxid, 1);
8066 8079
8067 /* Check if TXQ queue needs to be serviced */ 8080 /* Check if TXQ queue needs to be serviced */
8068 if (pring->txq_cnt) 8081 if (!(list_empty(&pring->txq)))
8069 lpfc_worker_wake_up(phba); 8082 lpfc_worker_wake_up(phba);
8070 return; 8083 return;
8071 } 8084 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index bfda18467ee6..326e05a65a73 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -691,12 +691,15 @@ lpfc_work_done(struct lpfc_hba *phba)
691 /* Set the lpfc data pending flag */ 691 /* Set the lpfc data pending flag */
692 set_bit(LPFC_DATA_READY, &phba->data_flags); 692 set_bit(LPFC_DATA_READY, &phba->data_flags);
693 } else { 693 } else {
694 pring->flag &= ~LPFC_DEFERRED_RING_EVENT; 694 if (phba->link_state >= LPFC_LINK_UP) {
695 lpfc_sli_handle_slow_ring_event(phba, pring, 695 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
696 (status & 696 lpfc_sli_handle_slow_ring_event(phba, pring,
697 HA_RXMASK)); 697 (status &
698 HA_RXMASK));
699 }
698 } 700 }
699 if ((phba->sli_rev == LPFC_SLI_REV4) && pring->txq_cnt) 701 if ((phba->sli_rev == LPFC_SLI_REV4) &
702 (!list_empty(&pring->txq)))
700 lpfc_drain_txq(phba); 703 lpfc_drain_txq(phba);
701 /* 704 /*
702 * Turn on Ring interrupts 705 * Turn on Ring interrupts
@@ -1792,6 +1795,8 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1792 virt_addr = mboxq->sge_array->addr[0]; 1795 virt_addr = mboxq->sge_array->addr[0];
1793 1796
1794 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; 1797 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1798 lpfc_sli_pcimem_bcopy(shdr, shdr,
1799 sizeof(union lpfc_sli4_cfg_shdr));
1795 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 1800 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1796 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 1801 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1797 if (shdr_status || shdr_add_status) { 1802 if (shdr_status || shdr_add_status) {
@@ -2888,6 +2893,11 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2888 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2893 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2889 goto out_free_mem; 2894 goto out_free_mem;
2890 } 2895 }
2896
2897 /* If the VFI is already registered, there is nothing else to do */
2898 if (vport->fc_flag & FC_VFI_REGISTERED)
2899 goto out_free_mem;
2900
2891 /* The VPI is implicitly registered when the VFI is registered */ 2901 /* The VPI is implicitly registered when the VFI is registered */
2892 spin_lock_irq(shost->host_lock); 2902 spin_lock_irq(shost->host_lock);
2893 vport->vpi_state |= LPFC_VPI_REGISTERED; 2903 vport->vpi_state |= LPFC_VPI_REGISTERED;
@@ -2980,6 +2990,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
2980 struct lpfc_dmabuf *mp; 2990 struct lpfc_dmabuf *mp;
2981 int rc; 2991 int rc;
2982 struct fcf_record *fcf_record; 2992 struct fcf_record *fcf_record;
2993 uint32_t fc_flags = 0;
2983 2994
2984 spin_lock_irq(&phba->hbalock); 2995 spin_lock_irq(&phba->hbalock);
2985 switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { 2996 switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
@@ -3011,11 +3022,8 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3011 "1309 Link Up Event npiv not supported in loop " 3022 "1309 Link Up Event npiv not supported in loop "
3012 "topology\n"); 3023 "topology\n");
3013 /* Get Loop Map information */ 3024 /* Get Loop Map information */
3014 if (bf_get(lpfc_mbx_read_top_il, la)) { 3025 if (bf_get(lpfc_mbx_read_top_il, la))
3015 spin_lock(shost->host_lock); 3026 fc_flags |= FC_LBIT;
3016 vport->fc_flag |= FC_LBIT;
3017 spin_unlock(shost->host_lock);
3018 }
3019 3027
3020 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la); 3028 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
3021 i = la->lilpBde64.tus.f.bdeSize; 3029 i = la->lilpBde64.tus.f.bdeSize;
@@ -3064,12 +3072,16 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3064 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3072 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3065 } 3073 }
3066 vport->fc_myDID = phba->fc_pref_DID; 3074 vport->fc_myDID = phba->fc_pref_DID;
3067 spin_lock(shost->host_lock); 3075 fc_flags |= FC_LBIT;
3068 vport->fc_flag |= FC_LBIT;
3069 spin_unlock(shost->host_lock);
3070 } 3076 }
3071 spin_unlock_irq(&phba->hbalock); 3077 spin_unlock_irq(&phba->hbalock);
3072 3078
3079 if (fc_flags) {
3080 spin_lock_irq(shost->host_lock);
3081 vport->fc_flag |= fc_flags;
3082 spin_unlock_irq(shost->host_lock);
3083 }
3084
3073 lpfc_linkup(phba); 3085 lpfc_linkup(phba);
3074 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3086 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3075 if (!sparam_mbox) 3087 if (!sparam_mbox)
@@ -3237,8 +3249,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3237 vport->fc_flag &= ~FC_BYPASSED_MODE; 3249 vport->fc_flag &= ~FC_BYPASSED_MODE;
3238 spin_unlock_irq(shost->host_lock); 3250 spin_unlock_irq(shost->host_lock);
3239 3251
3240 if ((phba->fc_eventTag < la->eventTag) || 3252 if (phba->fc_eventTag <= la->eventTag) {
3241 (phba->fc_eventTag == la->eventTag)) {
3242 phba->fc_stat.LinkMultiEvent++; 3253 phba->fc_stat.LinkMultiEvent++;
3243 if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) 3254 if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)
3244 if (phba->fc_eventTag != 0) 3255 if (phba->fc_eventTag != 0)
@@ -3246,16 +3257,18 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3246 } 3257 }
3247 3258
3248 phba->fc_eventTag = la->eventTag; 3259 phba->fc_eventTag = la->eventTag;
3249 spin_lock_irq(&phba->hbalock); 3260 if (phba->sli_rev < LPFC_SLI_REV4) {
3250 if (bf_get(lpfc_mbx_read_top_mm, la)) 3261 spin_lock_irq(&phba->hbalock);
3251 phba->sli.sli_flag |= LPFC_MENLO_MAINT; 3262 if (bf_get(lpfc_mbx_read_top_mm, la))
3252 else 3263 phba->sli.sli_flag |= LPFC_MENLO_MAINT;
3253 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; 3264 else
3254 spin_unlock_irq(&phba->hbalock); 3265 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
3266 spin_unlock_irq(&phba->hbalock);
3267 }
3255 3268
3256 phba->link_events++; 3269 phba->link_events++;
3257 if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) && 3270 if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) &&
3258 (!bf_get(lpfc_mbx_read_top_mm, la))) { 3271 !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
3259 phba->fc_stat.LinkUp++; 3272 phba->fc_stat.LinkUp++;
3260 if (phba->link_flag & LS_LOOPBACK_MODE) { 3273 if (phba->link_flag & LS_LOOPBACK_MODE) {
3261 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3274 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -3300,8 +3313,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3300 bf_get(lpfc_mbx_read_top_fa, la)); 3313 bf_get(lpfc_mbx_read_top_fa, la));
3301 lpfc_mbx_issue_link_down(phba); 3314 lpfc_mbx_issue_link_down(phba);
3302 } 3315 }
3303 if ((bf_get(lpfc_mbx_read_top_mm, la)) && 3316 if ((phba->sli.sli_flag & LPFC_MENLO_MAINT) &&
3304 (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)) { 3317 ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP))) {
3305 if (phba->link_state != LPFC_LINK_DOWN) { 3318 if (phba->link_state != LPFC_LINK_DOWN) {
3306 phba->fc_stat.LinkDown++; 3319 phba->fc_stat.LinkDown++;
3307 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3320 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -3329,8 +3342,9 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3329 } 3342 }
3330 } 3343 }
3331 3344
3332 if (bf_get(lpfc_mbx_read_top_fa, la)) { 3345 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3333 if (bf_get(lpfc_mbx_read_top_mm, la)) 3346 bf_get(lpfc_mbx_read_top_fa, la)) {
3347 if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
3334 lpfc_issue_clear_la(phba, vport); 3348 lpfc_issue_clear_la(phba, vport);
3335 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 3349 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3336 "1311 fa %d\n", 3350 "1311 fa %d\n",
@@ -4354,7 +4368,6 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
4354 with an error */ 4368 with an error */
4355 list_move_tail(&iocb->list, 4369 list_move_tail(&iocb->list,
4356 &completions); 4370 &completions);
4357 pring->txq_cnt--;
4358 } 4371 }
4359 } 4372 }
4360 spin_unlock_irq(&phba->hbalock); 4373 spin_unlock_irq(&phba->hbalock);
@@ -5055,7 +5068,6 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5055 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) { 5068 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
5056 5069
5057 list_move_tail(&iocb->list, &completions); 5070 list_move_tail(&iocb->list, &completions);
5058 pring->txq_cnt--;
5059 } 5071 }
5060 } 5072 }
5061 5073
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 6e93b886cd4d..1dd2f6f0a127 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1958,6 +1958,9 @@ struct lpfc_mbx_init_vfi {
1958 1958
1959struct lpfc_mbx_reg_vfi { 1959struct lpfc_mbx_reg_vfi {
1960 uint32_t word1; 1960 uint32_t word1;
1961#define lpfc_reg_vfi_upd_SHIFT 29
1962#define lpfc_reg_vfi_upd_MASK 0x00000001
1963#define lpfc_reg_vfi_upd_WORD word1
1961#define lpfc_reg_vfi_vp_SHIFT 28 1964#define lpfc_reg_vfi_vp_SHIFT 28
1962#define lpfc_reg_vfi_vp_MASK 0x00000001 1965#define lpfc_reg_vfi_vp_MASK 0x00000001
1963#define lpfc_reg_vfi_vp_WORD word1 1966#define lpfc_reg_vfi_vp_WORD word1
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 314b4f61b9e3..5da297290262 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -839,7 +839,6 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba)
839 * way, nothing should be on txcmplq as it will NEVER complete. 839 * way, nothing should be on txcmplq as it will NEVER complete.
840 */ 840 */
841 list_splice_init(&pring->txcmplq, &completions); 841 list_splice_init(&pring->txcmplq, &completions);
842 pring->txcmplq_cnt = 0;
843 spin_unlock_irq(&phba->hbalock); 842 spin_unlock_irq(&phba->hbalock);
844 843
845 /* Cancel all the IOCBs from the completions list */ 844 /* Cancel all the IOCBs from the completions list */
@@ -2915,9 +2914,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
2915 sglq_entry->state = SGL_FREED; 2914 sglq_entry->state = SGL_FREED;
2916 list_add_tail(&sglq_entry->list, &els_sgl_list); 2915 list_add_tail(&sglq_entry->list, &els_sgl_list);
2917 } 2916 }
2918 spin_lock(&phba->hbalock); 2917 spin_lock_irq(&phba->hbalock);
2919 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); 2918 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
2920 spin_unlock(&phba->hbalock); 2919 spin_unlock_irq(&phba->hbalock);
2921 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 2920 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
2922 /* els xri-sgl shrinked */ 2921 /* els xri-sgl shrinked */
2923 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 2922 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
@@ -3015,9 +3014,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
3015 psb->cur_iocbq.sli4_lxritag = lxri; 3014 psb->cur_iocbq.sli4_lxritag = lxri;
3016 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3015 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3017 } 3016 }
3018 spin_lock(&phba->scsi_buf_list_lock); 3017 spin_lock_irq(&phba->scsi_buf_list_lock);
3019 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list); 3018 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
3020 spin_unlock(&phba->scsi_buf_list_lock); 3019 spin_unlock_irq(&phba->scsi_buf_list_lock);
3021 3020
3022 return 0; 3021 return 0;
3023 3022
@@ -4004,6 +4003,52 @@ lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
4004} 4003}
4005 4004
4006/** 4005/**
4006 * lpfc_sli4_perform_inuse_fcf_recovery - Perform inuse fcf recovery
4007 * @vport: pointer to lpfc hba data structure.
4008 *
4009 * This routine is to perform FCF recovery when the in-use FCF either dead or
4010 * got modified.
4011 **/
4012static void
4013lpfc_sli4_perform_inuse_fcf_recovery(struct lpfc_hba *phba,
4014 struct lpfc_acqe_fip *acqe_fip)
4015{
4016 int rc;
4017
4018 spin_lock_irq(&phba->hbalock);
4019 /* Mark the fast failover process in progress */
4020 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
4021 spin_unlock_irq(&phba->hbalock);
4022
4023 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4024 "2771 Start FCF fast failover process due to in-use "
4025 "FCF DEAD/MODIFIED event: evt_tag:x%x, index:x%x\n",
4026 acqe_fip->event_tag, acqe_fip->index);
4027 rc = lpfc_sli4_redisc_fcf_table(phba);
4028 if (rc) {
4029 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4030 "2772 Issue FCF rediscover mabilbox command "
4031 "failed, fail through to FCF dead event\n");
4032 spin_lock_irq(&phba->hbalock);
4033 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
4034 spin_unlock_irq(&phba->hbalock);
4035 /*
4036 * Last resort will fail over by treating this as a link
4037 * down to FCF registration.
4038 */
4039 lpfc_sli4_fcf_dead_failthrough(phba);
4040 } else {
4041 /* Reset FCF roundrobin bmask for new discovery */
4042 lpfc_sli4_clear_fcf_rr_bmask(phba);
4043 /*
4044 * Handling fast FCF failover to a DEAD FCF event is
4045 * considered equalivant to receiving CVL to all vports.
4046 */
4047 lpfc_sli4_perform_all_vport_cvl(phba);
4048 }
4049}
4050
4051/**
4007 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 4052 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
4008 * @phba: pointer to lpfc hba data structure. 4053 * @phba: pointer to lpfc hba data structure.
4009 * @acqe_link: pointer to the async fcoe completion queue entry. 4054 * @acqe_link: pointer to the async fcoe completion queue entry.
@@ -4068,9 +4113,22 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
4068 break; 4113 break;
4069 } 4114 }
4070 4115
4071 /* If the FCF has been in discovered state, do nothing. */ 4116 /* If FCF has been in discovered state, perform rediscovery
4072 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 4117 * only if the FCF with the same index of the in-use FCF got
4118 * modified during normal operation. Otherwise, do nothing.
4119 */
4120 if (phba->pport->port_state > LPFC_FLOGI) {
4073 spin_unlock_irq(&phba->hbalock); 4121 spin_unlock_irq(&phba->hbalock);
4122 if (phba->fcf.current_rec.fcf_indx ==
4123 acqe_fip->index) {
4124 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
4125 "3300 In-use FCF (%d) "
4126 "modified, perform FCF "
4127 "rediscovery\n",
4128 acqe_fip->index);
4129 lpfc_sli4_perform_inuse_fcf_recovery(phba,
4130 acqe_fip);
4131 }
4074 break; 4132 break;
4075 } 4133 }
4076 spin_unlock_irq(&phba->hbalock); 4134 spin_unlock_irq(&phba->hbalock);
@@ -4123,39 +4181,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
4123 * is no longer valid as we are not in the middle of FCF 4181 * is no longer valid as we are not in the middle of FCF
4124 * failover process already. 4182 * failover process already.
4125 */ 4183 */
4126 spin_lock_irq(&phba->hbalock); 4184 lpfc_sli4_perform_inuse_fcf_recovery(phba, acqe_fip);
4127 /* Mark the fast failover process in progress */
4128 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
4129 spin_unlock_irq(&phba->hbalock);
4130
4131 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4132 "2771 Start FCF fast failover process due to "
4133 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
4134 "\n", acqe_fip->event_tag, acqe_fip->index);
4135 rc = lpfc_sli4_redisc_fcf_table(phba);
4136 if (rc) {
4137 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4138 LOG_DISCOVERY,
4139 "2772 Issue FCF rediscover mabilbox "
4140 "command failed, fail through to FCF "
4141 "dead event\n");
4142 spin_lock_irq(&phba->hbalock);
4143 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
4144 spin_unlock_irq(&phba->hbalock);
4145 /*
4146 * Last resort will fail over by treating this
4147 * as a link down to FCF registration.
4148 */
4149 lpfc_sli4_fcf_dead_failthrough(phba);
4150 } else {
4151 /* Reset FCF roundrobin bmask for new discovery */
4152 lpfc_sli4_clear_fcf_rr_bmask(phba);
4153 /*
4154 * Handling fast FCF failover to a DEAD FCF event is
4155 * considered equalivant to receiving CVL to all vports.
4156 */
4157 lpfc_sli4_perform_all_vport_cvl(phba);
4158 }
4159 break; 4185 break;
4160 case LPFC_FIP_EVENT_TYPE_CVL: 4186 case LPFC_FIP_EVENT_TYPE_CVL:
4161 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 4187 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index efc9cd9def8b..a7a9fa468308 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2126,32 +2126,40 @@ void
2126lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) 2126lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
2127{ 2127{
2128 struct lpfc_mbx_reg_vfi *reg_vfi; 2128 struct lpfc_mbx_reg_vfi *reg_vfi;
2129 struct lpfc_hba *phba = vport->phba;
2129 2130
2130 memset(mbox, 0, sizeof(*mbox)); 2131 memset(mbox, 0, sizeof(*mbox));
2131 reg_vfi = &mbox->u.mqe.un.reg_vfi; 2132 reg_vfi = &mbox->u.mqe.un.reg_vfi;
2132 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI); 2133 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
2133 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1); 2134 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
2134 bf_set(lpfc_reg_vfi_vfi, reg_vfi, 2135 bf_set(lpfc_reg_vfi_vfi, reg_vfi,
2135 vport->phba->sli4_hba.vfi_ids[vport->vfi]); 2136 phba->sli4_hba.vfi_ids[vport->vfi]);
2136 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); 2137 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, phba->fcf.fcfi);
2137 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]); 2138 bf_set(lpfc_reg_vfi_vpi, reg_vfi, phba->vpi_ids[vport->vpi]);
2138 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name)); 2139 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
2139 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]); 2140 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
2140 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); 2141 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
2141 reg_vfi->e_d_tov = vport->phba->fc_edtov; 2142 reg_vfi->e_d_tov = phba->fc_edtov;
2142 reg_vfi->r_a_tov = vport->phba->fc_ratov; 2143 reg_vfi->r_a_tov = phba->fc_ratov;
2143 reg_vfi->bde.addrHigh = putPaddrHigh(phys); 2144 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
2144 reg_vfi->bde.addrLow = putPaddrLow(phys); 2145 reg_vfi->bde.addrLow = putPaddrLow(phys);
2145 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); 2146 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
2146 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 2147 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2147 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID); 2148 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
2149
2150 /* Only FC supports upd bit */
2151 if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
2152 (vport->fc_flag & FC_VFI_REGISTERED)) {
2153 bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
2154 bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
2155 }
2148 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX, 2156 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
2149 "3134 Register VFI, mydid:x%x, fcfi:%d, " 2157 "3134 Register VFI, mydid:x%x, fcfi:%d, "
2150 " vfi:%d, vpi:%d, fc_pname:%x%x\n", 2158 " vfi:%d, vpi:%d, fc_pname:%x%x\n",
2151 vport->fc_myDID, 2159 vport->fc_myDID,
2152 vport->phba->fcf.fcfi, 2160 phba->fcf.fcfi,
2153 vport->phba->sli4_hba.vfi_ids[vport->vfi], 2161 phba->sli4_hba.vfi_ids[vport->vfi],
2154 vport->phba->vpi_ids[vport->vpi], 2162 phba->vpi_ids[vport->vpi],
2155 reg_vfi->wwn[0], reg_vfi->wwn[1]); 2163 reg_vfi->wwn[0], reg_vfi->wwn[1]);
2156} 2164}
2157 2165
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 46128c679202..82f4d3542289 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -226,7 +226,6 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
226 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { 226 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
227 /* It matches, so deque and call compl with anp error */ 227 /* It matches, so deque and call compl with anp error */
228 list_move_tail(&iocb->list, &completions); 228 list_move_tail(&iocb->list, &completions);
229 pring->txq_cnt--;
230 } 229 }
231 } 230 }
232 231
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 98af07c6e300..74b8710e1e90 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -732,7 +732,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
732 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 732 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
733 psb->exch_busy = 0; 733 psb->exch_busy = 0;
734 spin_unlock_irqrestore(&phba->hbalock, iflag); 734 spin_unlock_irqrestore(&phba->hbalock, iflag);
735 if (pring->txq_cnt) 735 if (!list_empty(&pring->txq))
736 lpfc_worker_wake_up(phba); 736 lpfc_worker_wake_up(phba);
737 return; 737 return;
738 738
@@ -885,9 +885,9 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
885 int num_posted, rc = 0; 885 int num_posted, rc = 0;
886 886
887 /* get all SCSI buffers need to repost to a local list */ 887 /* get all SCSI buffers need to repost to a local list */
888 spin_lock(&phba->scsi_buf_list_lock); 888 spin_lock_irq(&phba->scsi_buf_list_lock);
889 list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist); 889 list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
890 spin_unlock(&phba->scsi_buf_list_lock); 890 spin_unlock_irq(&phba->scsi_buf_list_lock);
891 891
892 /* post the list of scsi buffer sgls to port if available */ 892 /* post the list of scsi buffer sgls to port if available */
893 if (!list_empty(&post_sblist)) { 893 if (!list_empty(&post_sblist)) {
@@ -4246,7 +4246,7 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4246 unsigned long poll_tmo_expires = 4246 unsigned long poll_tmo_expires =
4247 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); 4247 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4248 4248
4249 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt) 4249 if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
4250 mod_timer(&phba->fcp_poll_timer, 4250 mod_timer(&phba->fcp_poll_timer,
4251 poll_tmo_expires); 4251 poll_tmo_expires);
4252} 4252}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index d43faf34c1e2..35dd17eb0f27 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -873,14 +873,16 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
873 xritag, rxid, ndlp->nlp_DID, send_rrq); 873 xritag, rxid, ndlp->nlp_DID, send_rrq);
874 return -EINVAL; 874 return -EINVAL;
875 } 875 }
876 rrq->send_rrq = send_rrq; 876 if (phba->cfg_enable_rrq == 1)
877 rrq->send_rrq = send_rrq;
878 else
879 rrq->send_rrq = 0;
877 rrq->xritag = xritag; 880 rrq->xritag = xritag;
878 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); 881 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
879 rrq->ndlp = ndlp; 882 rrq->ndlp = ndlp;
880 rrq->nlp_DID = ndlp->nlp_DID; 883 rrq->nlp_DID = ndlp->nlp_DID;
881 rrq->vport = ndlp->vport; 884 rrq->vport = ndlp->vport;
882 rrq->rxid = rxid; 885 rrq->rxid = rxid;
883 rrq->send_rrq = send_rrq;
884 spin_lock_irqsave(&phba->hbalock, iflags); 886 spin_lock_irqsave(&phba->hbalock, iflags);
885 empty = list_empty(&phba->active_rrq_list); 887 empty = list_empty(&phba->active_rrq_list);
886 list_add_tail(&rrq->list, &phba->active_rrq_list); 888 list_add_tail(&rrq->list, &phba->active_rrq_list);
@@ -1009,6 +1011,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1009 else 1011 else
1010 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 1012 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1011 1013
1014 /*
1015 ** This should have been removed from the txcmplq before calling
1016 ** iocbq_release. The normal completion
1017 ** path should have already done the list_del_init.
1018 */
1019 if (unlikely(!list_empty(&iocbq->list))) {
1020 if (iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)
1021 iocbq->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
1022 list_del_init(&iocbq->list);
1023 }
1024
1025
1012 if (sglq) { 1026 if (sglq) {
1013 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 1027 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1014 (sglq->state != SGL_XRI_ABORTED)) { 1028 (sglq->state != SGL_XRI_ABORTED)) {
@@ -1025,7 +1039,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1025 &phba->sli4_hba.lpfc_sgl_list); 1039 &phba->sli4_hba.lpfc_sgl_list);
1026 1040
1027 /* Check if TXQ queue needs to be serviced */ 1041 /* Check if TXQ queue needs to be serviced */
1028 if (pring->txq_cnt) 1042 if (!list_empty(&pring->txq))
1029 lpfc_worker_wake_up(phba); 1043 lpfc_worker_wake_up(phba);
1030 } 1044 }
1031 } 1045 }
@@ -1057,6 +1071,14 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1057 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1071 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1058 1072
1059 /* 1073 /*
1074 ** This should have been removed from the txcmplq before calling
1075 ** iocbq_release. The normal completion
1076 ** path should have already done the list_del_init.
1077 */
1078 if (unlikely(!list_empty(&iocbq->list)))
1079 list_del_init(&iocbq->list);
1080
1081 /*
1060 * Clean all volatile data fields, preserve iotag and node struct. 1082 * Clean all volatile data fields, preserve iotag and node struct.
1061 */ 1083 */
1062 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1084 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
@@ -1122,7 +1144,6 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1122 1144
1123 while (!list_empty(iocblist)) { 1145 while (!list_empty(iocblist)) {
1124 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1146 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1125
1126 if (!piocb->iocb_cmpl) 1147 if (!piocb->iocb_cmpl)
1127 lpfc_sli_release_iocbq(phba, piocb); 1148 lpfc_sli_release_iocbq(phba, piocb);
1128 else { 1149 else {
@@ -1310,9 +1331,6 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1310{ 1331{
1311 list_add_tail(&piocb->list, &pring->txcmplq); 1332 list_add_tail(&piocb->list, &pring->txcmplq);
1312 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; 1333 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1313 pring->txcmplq_cnt++;
1314 if (pring->txcmplq_cnt > pring->txcmplq_max)
1315 pring->txcmplq_max = pring->txcmplq_cnt;
1316 1334
1317 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1335 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1318 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1336 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@@ -1344,8 +1362,6 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1344 struct lpfc_iocbq *cmd_iocb; 1362 struct lpfc_iocbq *cmd_iocb;
1345 1363
1346 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1364 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1347 if (cmd_iocb != NULL)
1348 pring->txq_cnt--;
1349 return cmd_iocb; 1365 return cmd_iocb;
1350} 1366}
1351 1367
@@ -1614,8 +1630,9 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1614 * (c) link attention events can be processed (fcp ring only) 1630 * (c) link attention events can be processed (fcp ring only)
1615 * (d) IOCB processing is not blocked by the outstanding mbox command. 1631 * (d) IOCB processing is not blocked by the outstanding mbox command.
1616 */ 1632 */
1617 if (pring->txq_cnt && 1633
1618 lpfc_is_link_up(phba) && 1634 if (lpfc_is_link_up(phba) &&
1635 (!list_empty(&pring->txq)) &&
1619 (pring->ringno != phba->sli.fcp_ring || 1636 (pring->ringno != phba->sli.fcp_ring ||
1620 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1637 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1621 1638
@@ -2612,7 +2629,6 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2612 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2629 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2613 list_del_init(&cmd_iocb->list); 2630 list_del_init(&cmd_iocb->list);
2614 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2631 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2615 pring->txcmplq_cnt--;
2616 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2632 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2617 } 2633 }
2618 return cmd_iocb; 2634 return cmd_iocb;
@@ -2650,7 +2666,6 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2650 /* remove from txcmpl queue list */ 2666 /* remove from txcmpl queue list */
2651 list_del_init(&cmd_iocb->list); 2667 list_del_init(&cmd_iocb->list);
2652 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2668 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2653 pring->txcmplq_cnt--;
2654 return cmd_iocb; 2669 return cmd_iocb;
2655 } 2670 }
2656 } 2671 }
@@ -3499,7 +3514,6 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3499 */ 3514 */
3500 spin_lock_irq(&phba->hbalock); 3515 spin_lock_irq(&phba->hbalock);
3501 list_splice_init(&pring->txq, &completions); 3516 list_splice_init(&pring->txq, &completions);
3502 pring->txq_cnt = 0;
3503 3517
3504 /* Next issue ABTS for everything on the txcmplq */ 3518 /* Next issue ABTS for everything on the txcmplq */
3505 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3519 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
@@ -3536,11 +3550,9 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3536 spin_lock_irq(&phba->hbalock); 3550 spin_lock_irq(&phba->hbalock);
3537 /* Retrieve everything on txq */ 3551 /* Retrieve everything on txq */
3538 list_splice_init(&pring->txq, &txq); 3552 list_splice_init(&pring->txq, &txq);
3539 pring->txq_cnt = 0;
3540 3553
3541 /* Retrieve everything on the txcmplq */ 3554 /* Retrieve everything on the txcmplq */
3542 list_splice_init(&pring->txcmplq, &txcmplq); 3555 list_splice_init(&pring->txcmplq, &txcmplq);
3543 pring->txcmplq_cnt = 0;
3544 3556
3545 /* Indicate the I/O queues are flushed */ 3557 /* Indicate the I/O queues are flushed */
3546 phba->hba_flag |= HBA_FCP_IOQ_FLUSH; 3558 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
@@ -5988,9 +6000,9 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
5988 LIST_HEAD(post_sgl_list); 6000 LIST_HEAD(post_sgl_list);
5989 LIST_HEAD(free_sgl_list); 6001 LIST_HEAD(free_sgl_list);
5990 6002
5991 spin_lock(&phba->hbalock); 6003 spin_lock_irq(&phba->hbalock);
5992 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); 6004 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
5993 spin_unlock(&phba->hbalock); 6005 spin_unlock_irq(&phba->hbalock);
5994 6006
5995 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 6007 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
5996 &allc_sgl_list, list) { 6008 &allc_sgl_list, list) {
@@ -6091,10 +6103,10 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6091 6103
6092 /* push els sgls posted to the availble list */ 6104 /* push els sgls posted to the availble list */
6093 if (!list_empty(&post_sgl_list)) { 6105 if (!list_empty(&post_sgl_list)) {
6094 spin_lock(&phba->hbalock); 6106 spin_lock_irq(&phba->hbalock);
6095 list_splice_init(&post_sgl_list, 6107 list_splice_init(&post_sgl_list,
6096 &phba->sli4_hba.lpfc_sgl_list); 6108 &phba->sli4_hba.lpfc_sgl_list);
6097 spin_unlock(&phba->hbalock); 6109 spin_unlock_irq(&phba->hbalock);
6098 } else { 6110 } else {
6099 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6111 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6100 "3161 Failure to post els sgl to port.\n"); 6112 "3161 Failure to post els sgl to port.\n");
@@ -7615,7 +7627,6 @@ __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7615{ 7627{
7616 /* Insert the caller's iocb in the txq tail for later processing. */ 7628 /* Insert the caller's iocb in the txq tail for later processing. */
7617 list_add_tail(&piocb->list, &pring->txq); 7629 list_add_tail(&piocb->list, &pring->txq);
7618 pring->txq_cnt++;
7619} 7630}
7620 7631
7621/** 7632/**
@@ -8387,7 +8398,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8387 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 8398 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
8388 sglq = NULL; 8399 sglq = NULL;
8389 else { 8400 else {
8390 if (pring->txq_cnt) { 8401 if (!list_empty(&pring->txq)) {
8391 if (!(flag & SLI_IOCB_RET_IOCB)) { 8402 if (!(flag & SLI_IOCB_RET_IOCB)) {
8392 __lpfc_sli_ringtx_put(phba, 8403 __lpfc_sli_ringtx_put(phba,
8393 pring, piocb); 8404 pring, piocb);
@@ -9055,7 +9066,6 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
9055 if (iocb->vport != vport) 9066 if (iocb->vport != vport)
9056 continue; 9067 continue;
9057 list_move_tail(&iocb->list, &completions); 9068 list_move_tail(&iocb->list, &completions);
9058 pring->txq_cnt--;
9059 } 9069 }
9060 9070
9061 /* Next issue ABTS for everything on the txcmplq */ 9071 /* Next issue ABTS for everything on the txcmplq */
@@ -9124,8 +9134,6 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
9124 * given to the FW yet. 9134 * given to the FW yet.
9125 */ 9135 */
9126 list_splice_init(&pring->txq, &completions); 9136 list_splice_init(&pring->txq, &completions);
9127 pring->txq_cnt = 0;
9128
9129 } 9137 }
9130 spin_unlock_irqrestore(&phba->hbalock, flags); 9138 spin_unlock_irqrestore(&phba->hbalock, flags);
9131 9139
@@ -9966,6 +9974,9 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
9966 long timeleft, timeout_req = 0; 9974 long timeleft, timeout_req = 0;
9967 int retval = IOCB_SUCCESS; 9975 int retval = IOCB_SUCCESS;
9968 uint32_t creg_val; 9976 uint32_t creg_val;
9977 struct lpfc_iocbq *iocb;
9978 int txq_cnt = 0;
9979 int txcmplq_cnt = 0;
9969 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 9980 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
9970 /* 9981 /*
9971 * If the caller has provided a response iocbq buffer, then context2 9982 * If the caller has provided a response iocbq buffer, then context2
@@ -10013,9 +10024,17 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
10013 retval = IOCB_TIMEDOUT; 10024 retval = IOCB_TIMEDOUT;
10014 } 10025 }
10015 } else if (retval == IOCB_BUSY) { 10026 } else if (retval == IOCB_BUSY) {
10016 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10027 if (phba->cfg_log_verbose & LOG_SLI) {
10017 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 10028 list_for_each_entry(iocb, &pring->txq, list) {
10018 phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt); 10029 txq_cnt++;
10030 }
10031 list_for_each_entry(iocb, &pring->txcmplq, list) {
10032 txcmplq_cnt++;
10033 }
10034 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10035 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
10036 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
10037 }
10019 return retval; 10038 return retval;
10020 } else { 10039 } else {
10021 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10040 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -11298,16 +11317,25 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11298 struct lpfc_iocbq *irspiocbq; 11317 struct lpfc_iocbq *irspiocbq;
11299 unsigned long iflags; 11318 unsigned long iflags;
11300 struct lpfc_sli_ring *pring = cq->pring; 11319 struct lpfc_sli_ring *pring = cq->pring;
11320 int txq_cnt = 0;
11321 int txcmplq_cnt = 0;
11322 int fcp_txcmplq_cnt = 0;
11301 11323
11302 /* Get an irspiocbq for later ELS response processing use */ 11324 /* Get an irspiocbq for later ELS response processing use */
11303 irspiocbq = lpfc_sli_get_iocbq(phba); 11325 irspiocbq = lpfc_sli_get_iocbq(phba);
11304 if (!irspiocbq) { 11326 if (!irspiocbq) {
11327 if (!list_empty(&pring->txq))
11328 txq_cnt++;
11329 if (!list_empty(&pring->txcmplq))
11330 txcmplq_cnt++;
11331 if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
11332 fcp_txcmplq_cnt++;
11305 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11333 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11306 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 11334 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
11307 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 11335 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
11308 pring->txq_cnt, phba->iocb_cnt, 11336 txq_cnt, phba->iocb_cnt,
11309 phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt, 11337 fcp_txcmplq_cnt,
11310 phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt); 11338 txcmplq_cnt);
11311 return false; 11339 return false;
11312 } 11340 }
11313 11341
@@ -15482,11 +15510,18 @@ lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
15482 LPFC_SLI4_FCF_TBL_INDX_MAX); 15510 LPFC_SLI4_FCF_TBL_INDX_MAX);
15483 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 15511 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15484 "3060 Last IDX %d\n", last_index); 15512 "3060 Last IDX %d\n", last_index);
15485 if (list_empty(&phba->fcf.fcf_pri_list)) { 15513
15514 /* Verify the priority list has 2 or more entries */
15515 spin_lock_irq(&phba->hbalock);
15516 if (list_empty(&phba->fcf.fcf_pri_list) ||
15517 list_is_singular(&phba->fcf.fcf_pri_list)) {
15518 spin_unlock_irq(&phba->hbalock);
15486 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15519 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15487 "3061 Last IDX %d\n", last_index); 15520 "3061 Last IDX %d\n", last_index);
15488 return 0; /* Empty rr list */ 15521 return 0; /* Empty rr list */
15489 } 15522 }
15523 spin_unlock_irq(&phba->hbalock);
15524
15490 next_fcf_pri = 0; 15525 next_fcf_pri = 0;
15491 /* 15526 /*
15492 * Clear the rr_bmask and set all of the bits that are at this 15527 * Clear the rr_bmask and set all of the bits that are at this
@@ -16245,14 +16280,19 @@ lpfc_drain_txq(struct lpfc_hba *phba)
16245 char *fail_msg = NULL; 16280 char *fail_msg = NULL;
16246 struct lpfc_sglq *sglq; 16281 struct lpfc_sglq *sglq;
16247 union lpfc_wqe wqe; 16282 union lpfc_wqe wqe;
16283 int txq_cnt = 0;
16248 16284
16249 spin_lock_irqsave(&phba->hbalock, iflags); 16285 spin_lock_irqsave(&phba->hbalock, iflags);
16250 if (pring->txq_cnt > pring->txq_max) 16286 list_for_each_entry(piocbq, &pring->txq, list) {
16251 pring->txq_max = pring->txq_cnt; 16287 txq_cnt++;
16288 }
16289
16290 if (txq_cnt > pring->txq_max)
16291 pring->txq_max = txq_cnt;
16252 16292
16253 spin_unlock_irqrestore(&phba->hbalock, iflags); 16293 spin_unlock_irqrestore(&phba->hbalock, iflags);
16254 16294
16255 while (pring->txq_cnt) { 16295 while (!list_empty(&pring->txq)) {
16256 spin_lock_irqsave(&phba->hbalock, iflags); 16296 spin_lock_irqsave(&phba->hbalock, iflags);
16257 16297
16258 piocbq = lpfc_sli_ringtx_get(phba, pring); 16298 piocbq = lpfc_sli_ringtx_get(phba, pring);
@@ -16260,7 +16300,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
16260 spin_unlock_irqrestore(&phba->hbalock, iflags); 16300 spin_unlock_irqrestore(&phba->hbalock, iflags);
16261 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16301 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16262 "2823 txq empty and txq_cnt is %d\n ", 16302 "2823 txq empty and txq_cnt is %d\n ",
16263 pring->txq_cnt); 16303 txq_cnt);
16264 break; 16304 break;
16265 } 16305 }
16266 sglq = __lpfc_sli_get_sglq(phba, piocbq); 16306 sglq = __lpfc_sli_get_sglq(phba, piocbq);
@@ -16269,6 +16309,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
16269 spin_unlock_irqrestore(&phba->hbalock, iflags); 16309 spin_unlock_irqrestore(&phba->hbalock, iflags);
16270 break; 16310 break;
16271 } 16311 }
16312 txq_cnt--;
16272 16313
16273 /* The xri and iocb resources secured, 16314 /* The xri and iocb resources secured,
16274 * attempt to issue request 16315 * attempt to issue request
@@ -16300,5 +16341,5 @@ lpfc_drain_txq(struct lpfc_hba *phba)
16300 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 16341 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
16301 IOERR_SLI_ABORTED); 16342 IOERR_SLI_ABORTED);
16302 16343
16303 return pring->txq_cnt; 16344 return txq_cnt;
16304} 16345}
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f3b7795a296b..664cd04f7cd8 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.37" 21#define LPFC_DRIVER_VERSION "8.3.38"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23 23
24/* Used for SLI 2/3 */ 24/* Used for SLI 2/3 */