aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorJames Smart <james.smart@emulex.com>2010-01-26 23:09:48 -0500
committerJames Bottomley <James.Bottomley@suse.de>2010-02-08 19:40:33 -0500
commit4cc0e56e977f12e6f400cbab3df7cf1e11d6f58a (patch)
tree7a7826363fd2b7630965fd845b23ac35feaa847f /drivers/scsi/lpfc
parentc79c1292df87fa9c63383ca551fa719c0c2fda7c (diff)
[SCSI] lpfc 8.3.8: (BSG3) Modify BSG commands to operate asynchronously
Modify the following BSG commands to operate asynchronously. - FC_BSG_RPT_ELS - FC_BSG_RPT_CT - LPFC_BSG_VENDOR_GET_CT_EVENT - LPFC_BSG_VENDOR_SET_CT_EVENT Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c863
1 files changed, 592 insertions, 271 deletions
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index dfb1f73252a1..a7e8921015eb 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -42,14 +42,169 @@
42#include "lpfc_vport.h" 42#include "lpfc_vport.h"
43#include "lpfc_version.h" 43#include "lpfc_version.h"
44 44
45struct lpfc_bsg_event {
46 struct list_head node;
47 struct kref kref;
48 wait_queue_head_t wq;
49
50 /* Event type and waiter identifiers */
51 uint32_t type_mask;
52 uint32_t req_id;
53 uint32_t reg_id;
54
55 /* next two flags are here for the auto-delete logic */
56 unsigned long wait_time_stamp;
57 int waiting;
58
59 /* seen and not seen events */
60 struct list_head events_to_get;
61 struct list_head events_to_see;
62
63 /* job waiting for this event to finish */
64 struct fc_bsg_job *set_job;
65};
66
67struct lpfc_bsg_iocb {
68 struct lpfc_iocbq *cmdiocbq;
69 struct lpfc_iocbq *rspiocbq;
70 struct lpfc_dmabuf *bmp;
71 struct lpfc_nodelist *ndlp;
72
73 /* job waiting for this iocb to finish */
74 struct fc_bsg_job *set_job;
75};
76
77#define TYPE_EVT 1
78#define TYPE_IOCB 2
79struct bsg_job_data {
80 uint32_t type;
81 union {
82 struct lpfc_bsg_event *evt;
83 struct lpfc_bsg_iocb iocb;
84 } context_un;
85};
86
87struct event_data {
88 struct list_head node;
89 uint32_t type;
90 uint32_t immed_dat;
91 void *data;
92 uint32_t len;
93};
94
95#define SLI_CT_ELX_LOOPBACK 0x10
96
97enum ELX_LOOPBACK_CMD {
98 ELX_LOOPBACK_XRI_SETUP,
99 ELX_LOOPBACK_DATA,
100};
101
102struct lpfc_dmabufext {
103 struct lpfc_dmabuf dma;
104 uint32_t size;
105 uint32_t flag;
106};
107
108/**
109 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
110 * @phba: Pointer to HBA context object.
111 * @cmdiocbq: Pointer to command iocb.
112 * @rspiocbq: Pointer to response iocb.
113 *
114 * This function is the completion handler for iocbs issued using
115 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
116 * ring event handler function without any lock held. This function
117 * can be called from both worker thread context and interrupt
118 * context. This function also can be called from another thread which
119 * cleans up the SLI layer objects.
120 * This function copies the contents of the response iocb to the
121 * response iocb memory object provided by the caller of
122 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
123 * sleeps for the iocb completion.
124 **/
125static void
126lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
127 struct lpfc_iocbq *cmdiocbq,
128 struct lpfc_iocbq *rspiocbq)
129{
130 unsigned long iflags;
131 struct bsg_job_data *dd_data;
132 struct fc_bsg_job *job;
133 IOCB_t *rsp;
134 struct lpfc_dmabuf *bmp;
135 struct lpfc_nodelist *ndlp;
136 struct lpfc_bsg_iocb *iocb;
137 unsigned long flags;
138 int rc = 0;
139
140 spin_lock_irqsave(&phba->ct_ev_lock, flags);
141 dd_data = cmdiocbq->context1;
142 if (!dd_data) {
143 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
144 return;
145 }
146
147 iocb = &dd_data->context_un.iocb;
148 job = iocb->set_job;
149 job->dd_data = NULL; /* so timeout handler does not reply */
150
151 spin_lock_irqsave(&phba->hbalock, iflags);
152 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
153 if (cmdiocbq->context2 && rspiocbq)
154 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
155 &rspiocbq->iocb, sizeof(IOCB_t));
156 spin_unlock_irqrestore(&phba->hbalock, iflags);
157
158 bmp = iocb->bmp;
159 rspiocbq = iocb->rspiocbq;
160 rsp = &rspiocbq->iocb;
161 ndlp = iocb->ndlp;
162
163 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
164 job->request_payload.sg_cnt, DMA_TO_DEVICE);
165 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
166 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
167
168 if (rsp->ulpStatus) {
169 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
170 switch (rsp->un.ulpWord[4] & 0xff) {
171 case IOERR_SEQUENCE_TIMEOUT:
172 rc = -ETIMEDOUT;
173 break;
174 case IOERR_INVALID_RPI:
175 rc = -EFAULT;
176 break;
177 default:
178 rc = -EACCES;
179 break;
180 }
181 } else
182 rc = -EACCES;
183 } else
184 job->reply->reply_payload_rcv_len =
185 rsp->un.genreq64.bdl.bdeSize;
186
187 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
188 lpfc_sli_release_iocbq(phba, rspiocbq);
189 lpfc_sli_release_iocbq(phba, cmdiocbq);
190 lpfc_nlp_put(ndlp);
191 kfree(bmp);
192 kfree(dd_data);
193 /* make error code available to userspace */
194 job->reply->result = rc;
195 /* complete the job back to userspace */
196 job->job_done(job);
197 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
198 return;
199}
200
45/** 201/**
46 * lpfc_bsg_rport_ct - send a CT command from a bsg request 202 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
47 * @job: fc_bsg_job to handle 203 * @job: fc_bsg_job to handle
48 */ 204 */
49static int 205static int
50lpfc_bsg_rport_ct(struct fc_bsg_job *job) 206lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
51{ 207{
52 struct Scsi_Host *shost = job->shost;
53 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 208 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
54 struct lpfc_hba *phba = vport->phba; 209 struct lpfc_hba *phba = vport->phba;
55 struct lpfc_rport_data *rdata = job->rport->dd_data; 210 struct lpfc_rport_data *rdata = job->rport->dd_data;
@@ -66,57 +221,60 @@ lpfc_bsg_rport_ct(struct fc_bsg_job *job)
66 struct scatterlist *sgel = NULL; 221 struct scatterlist *sgel = NULL;
67 int numbde; 222 int numbde;
68 dma_addr_t busaddr; 223 dma_addr_t busaddr;
224 struct bsg_job_data *dd_data;
225 uint32_t creg_val;
69 int rc = 0; 226 int rc = 0;
70 227
71 /* in case no data is transferred */ 228 /* in case no data is transferred */
72 job->reply->reply_payload_rcv_len = 0; 229 job->reply->reply_payload_rcv_len = 0;
73 230
231 /* allocate our bsg tracking structure */
232 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
233 if (!dd_data) {
234 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
235 "2733 Failed allocation of dd_data\n");
236 rc = -ENOMEM;
237 goto no_dd_data;
238 }
239
74 if (!lpfc_nlp_get(ndlp)) { 240 if (!lpfc_nlp_get(ndlp)) {
75 job->reply->result = -ENODEV; 241 rc = -ENODEV;
76 return 0; 242 goto no_ndlp;
243 }
244
245 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
246 if (!bmp) {
247 rc = -ENOMEM;
248 goto free_ndlp;
77 } 249 }
78 250
79 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { 251 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
80 rc = -ENODEV; 252 rc = -ENODEV;
81 goto free_ndlp_exit; 253 goto free_bmp;
82 } 254 }
83 255
84 spin_lock_irq(shost->host_lock);
85 cmdiocbq = lpfc_sli_get_iocbq(phba); 256 cmdiocbq = lpfc_sli_get_iocbq(phba);
86 if (!cmdiocbq) { 257 if (!cmdiocbq) {
87 rc = -ENOMEM; 258 rc = -ENOMEM;
88 spin_unlock_irq(shost->host_lock); 259 goto free_bmp;
89 goto free_ndlp_exit;
90 } 260 }
91 cmd = &cmdiocbq->iocb;
92 261
262 cmd = &cmdiocbq->iocb;
93 rspiocbq = lpfc_sli_get_iocbq(phba); 263 rspiocbq = lpfc_sli_get_iocbq(phba);
94 if (!rspiocbq) { 264 if (!rspiocbq) {
95 rc = -ENOMEM; 265 rc = -ENOMEM;
96 goto free_cmdiocbq; 266 goto free_cmdiocbq;
97 } 267 }
98 spin_unlock_irq(shost->host_lock);
99 268
100 rsp = &rspiocbq->iocb; 269 rsp = &rspiocbq->iocb;
101
102 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
103 if (!bmp) {
104 rc = -ENOMEM;
105 spin_lock_irq(shost->host_lock);
106 goto free_rspiocbq;
107 }
108
109 spin_lock_irq(shost->host_lock);
110 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 270 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
111 if (!bmp->virt) { 271 if (!bmp->virt) {
112 rc = -ENOMEM; 272 rc = -ENOMEM;
113 goto free_bmp; 273 goto free_rspiocbq;
114 } 274 }
115 spin_unlock_irq(shost->host_lock);
116 275
117 INIT_LIST_HEAD(&bmp->list); 276 INIT_LIST_HEAD(&bmp->list);
118 bpl = (struct ulp_bde64 *) bmp->virt; 277 bpl = (struct ulp_bde64 *) bmp->virt;
119
120 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 278 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
121 job->request_payload.sg_cnt, DMA_TO_DEVICE); 279 job->request_payload.sg_cnt, DMA_TO_DEVICE);
122 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 280 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
@@ -158,72 +316,146 @@ lpfc_bsg_rport_ct(struct fc_bsg_job *job)
158 cmd->ulpContext = ndlp->nlp_rpi; 316 cmd->ulpContext = ndlp->nlp_rpi;
159 cmd->ulpOwner = OWN_CHIP; 317 cmd->ulpOwner = OWN_CHIP;
160 cmdiocbq->vport = phba->pport; 318 cmdiocbq->vport = phba->pport;
161 cmdiocbq->context1 = NULL; 319 cmdiocbq->context3 = bmp;
162 cmdiocbq->context2 = NULL;
163 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 320 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
164
165 timeout = phba->fc_ratov * 2; 321 timeout = phba->fc_ratov * 2;
166 job->dd_data = cmdiocbq; 322 cmd->ulpTimeout = timeout;
167 323
168 rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, 324 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
169 timeout + LPFC_DRVR_TIMEOUT); 325 cmdiocbq->context1 = dd_data;
170 326 cmdiocbq->context2 = rspiocbq;
171 if (rc != IOCB_TIMEDOUT) { 327 dd_data->type = TYPE_IOCB;
172 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 328 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
173 job->request_payload.sg_cnt, DMA_TO_DEVICE); 329 dd_data->context_un.iocb.rspiocbq = rspiocbq;
174 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 330 dd_data->context_un.iocb.set_job = job;
175 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 331 dd_data->context_un.iocb.bmp = bmp;
332 dd_data->context_un.iocb.ndlp = ndlp;
333
334 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
335 creg_val = readl(phba->HCregaddr);
336 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
337 writel(creg_val, phba->HCregaddr);
338 readl(phba->HCregaddr); /* flush */
176 } 339 }
177 340
178 if (rc == IOCB_TIMEDOUT) { 341 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
179 lpfc_sli_release_iocbq(phba, rspiocbq);
180 rc = -EACCES;
181 goto free_ndlp_exit;
182 }
183 342
184 if (rc != IOCB_SUCCESS) { 343 if (rc == IOCB_SUCCESS)
185 rc = -EACCES; 344 return 0; /* done for now */
186 goto free_outdmp;
187 }
188 345
189 if (rsp->ulpStatus) { 346 /* iocb failed so cleanup */
190 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 347 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
191 switch (rsp->un.ulpWord[4] & 0xff) { 348 job->request_payload.sg_cnt, DMA_TO_DEVICE);
192 case IOERR_SEQUENCE_TIMEOUT: 349 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
193 rc = -ETIMEDOUT; 350 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
194 break;
195 case IOERR_INVALID_RPI:
196 rc = -EFAULT;
197 break;
198 default:
199 rc = -EACCES;
200 break;
201 }
202 goto free_outdmp;
203 }
204 } else
205 job->reply->reply_payload_rcv_len =
206 rsp->un.genreq64.bdl.bdeSize;
207 351
208free_outdmp:
209 spin_lock_irq(shost->host_lock);
210 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 352 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
211free_bmp: 353
212 kfree(bmp);
213free_rspiocbq: 354free_rspiocbq:
214 lpfc_sli_release_iocbq(phba, rspiocbq); 355 lpfc_sli_release_iocbq(phba, rspiocbq);
215free_cmdiocbq: 356free_cmdiocbq:
216 lpfc_sli_release_iocbq(phba, cmdiocbq); 357 lpfc_sli_release_iocbq(phba, cmdiocbq);
217 spin_unlock_irq(shost->host_lock); 358free_bmp:
218free_ndlp_exit: 359 kfree(bmp);
360free_ndlp:
219 lpfc_nlp_put(ndlp); 361 lpfc_nlp_put(ndlp);
362no_ndlp:
363 kfree(dd_data);
364no_dd_data:
365 /* make error code available to userspace */
366 job->reply->result = rc;
367 job->dd_data = NULL;
368 return rc;
369}
370
371/**
372 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
373 * @phba: Pointer to HBA context object.
374 * @cmdiocbq: Pointer to command iocb.
375 * @rspiocbq: Pointer to response iocb.
376 *
377 * This function is the completion handler for iocbs issued using
378 * lpfc_bsg_rport_els_cmp function. This function is called by the
379 * ring event handler function without any lock held. This function
380 * can be called from both worker thread context and interrupt
381 * context. This function also can be called from other thread which
382 * cleans up the SLI layer objects.
383 * This function copy the contents of the response iocb to the
384 * response iocb memory object provided by the caller of
385 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
386 * sleeps for the iocb completion.
387 **/
388static void
389lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
390 struct lpfc_iocbq *cmdiocbq,
391 struct lpfc_iocbq *rspiocbq)
392{
393 struct bsg_job_data *dd_data;
394 struct fc_bsg_job *job;
395 IOCB_t *rsp;
396 struct lpfc_nodelist *ndlp;
397 struct lpfc_dmabuf *pbuflist = NULL;
398 struct fc_bsg_ctels_reply *els_reply;
399 uint8_t *rjt_data;
400 unsigned long flags;
401 int rc = 0;
402
403 spin_lock_irqsave(&phba->ct_ev_lock, flags);
404 dd_data = cmdiocbq->context1;
405 /* normal completion and timeout crossed paths, already done */
406 if (!dd_data) {
407 spin_unlock_irqrestore(&phba->hbalock, flags);
408 return;
409 }
410
411 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
412 if (cmdiocbq->context2 && rspiocbq)
413 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
414 &rspiocbq->iocb, sizeof(IOCB_t));
415
416 job = dd_data->context_un.iocb.set_job;
417 cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
418 rspiocbq = dd_data->context_un.iocb.rspiocbq;
419 rsp = &rspiocbq->iocb;
420 ndlp = dd_data->context_un.iocb.ndlp;
421
422 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
423 job->request_payload.sg_cnt, DMA_TO_DEVICE);
424 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
425 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
426
427 if (job->reply->result == -EAGAIN)
428 rc = -EAGAIN;
429 else if (rsp->ulpStatus == IOSTAT_SUCCESS)
430 job->reply->reply_payload_rcv_len =
431 rsp->un.elsreq64.bdl.bdeSize;
432 else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
433 job->reply->reply_payload_rcv_len =
434 sizeof(struct fc_bsg_ctels_reply);
435 /* LS_RJT data returned in word 4 */
436 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
437 els_reply = &job->reply->reply_data.ctels_reply;
438 els_reply->status = FC_CTELS_STATUS_REJECT;
439 els_reply->rjt_data.action = rjt_data[3];
440 els_reply->rjt_data.reason_code = rjt_data[2];
441 els_reply->rjt_data.reason_explanation = rjt_data[1];
442 els_reply->rjt_data.vendor_unique = rjt_data[0];
443 } else
444 rc = -EIO;
220 445
446 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
447 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
448 lpfc_sli_release_iocbq(phba, rspiocbq);
449 lpfc_sli_release_iocbq(phba, cmdiocbq);
450 lpfc_nlp_put(ndlp);
451 kfree(dd_data);
221 /* make error code available to userspace */ 452 /* make error code available to userspace */
222 job->reply->result = rc; 453 job->reply->result = rc;
454 job->dd_data = NULL;
223 /* complete the job back to userspace */ 455 /* complete the job back to userspace */
224 job->job_done(job); 456 job->job_done(job);
225 457 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
226 return 0; 458 return;
227} 459}
228 460
229/** 461/**
@@ -237,7 +469,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
237 struct lpfc_hba *phba = vport->phba; 469 struct lpfc_hba *phba = vport->phba;
238 struct lpfc_rport_data *rdata = job->rport->dd_data; 470 struct lpfc_rport_data *rdata = job->rport->dd_data;
239 struct lpfc_nodelist *ndlp = rdata->pnode; 471 struct lpfc_nodelist *ndlp = rdata->pnode;
240
241 uint32_t elscmd; 472 uint32_t elscmd;
242 uint32_t cmdsize; 473 uint32_t cmdsize;
243 uint32_t rspsize; 474 uint32_t rspsize;
@@ -249,20 +480,30 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
249 struct lpfc_dmabuf *prsp; 480 struct lpfc_dmabuf *prsp;
250 struct lpfc_dmabuf *pbuflist = NULL; 481 struct lpfc_dmabuf *pbuflist = NULL;
251 struct ulp_bde64 *bpl; 482 struct ulp_bde64 *bpl;
252 int iocb_status;
253 int request_nseg; 483 int request_nseg;
254 int reply_nseg; 484 int reply_nseg;
255 struct scatterlist *sgel = NULL; 485 struct scatterlist *sgel = NULL;
256 int numbde; 486 int numbde;
257 dma_addr_t busaddr; 487 dma_addr_t busaddr;
488 struct bsg_job_data *dd_data;
489 uint32_t creg_val;
258 int rc = 0; 490 int rc = 0;
259 491
260 /* in case no data is transferred */ 492 /* in case no data is transferred */
261 job->reply->reply_payload_rcv_len = 0; 493 job->reply->reply_payload_rcv_len = 0;
262 494
495 /* allocate our bsg tracking structure */
496 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
497 if (!dd_data) {
498 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
499 "2735 Failed allocation of dd_data\n");
500 rc = -ENOMEM;
501 goto no_dd_data;
502 }
503
263 if (!lpfc_nlp_get(ndlp)) { 504 if (!lpfc_nlp_get(ndlp)) {
264 rc = -ENODEV; 505 rc = -ENODEV;
265 goto out; 506 goto free_dd_data;
266 } 507 }
267 508
268 elscmd = job->request->rqst_data.r_els.els_code; 509 elscmd = job->request->rqst_data.r_els.els_code;
@@ -272,24 +513,24 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
272 if (!rspiocbq) { 513 if (!rspiocbq) {
273 lpfc_nlp_put(ndlp); 514 lpfc_nlp_put(ndlp);
274 rc = -ENOMEM; 515 rc = -ENOMEM;
275 goto out; 516 goto free_dd_data;
276 } 517 }
277 518
278 rsp = &rspiocbq->iocb; 519 rsp = &rspiocbq->iocb;
279 rpi = ndlp->nlp_rpi; 520 rpi = ndlp->nlp_rpi;
280 521
281 cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, ndlp, 522 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
282 ndlp->nlp_DID, elscmd); 523 ndlp->nlp_DID, elscmd);
283
284 if (!cmdiocbq) { 524 if (!cmdiocbq) {
285 lpfc_sli_release_iocbq(phba, rspiocbq); 525 rc = -EIO;
286 return -EIO; 526 goto free_rspiocbq;
287 } 527 }
288 528
289 job->dd_data = cmdiocbq; 529 /* prep els iocb set context1 to the ndlp, context2 to the command
530 * dmabuf, context3 holds the data dmabuf
531 */
290 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2; 532 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
291 prsp = (struct lpfc_dmabuf *) pcmd->list.next; 533 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
292
293 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 534 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
294 kfree(pcmd); 535 kfree(pcmd);
295 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 536 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
@@ -301,7 +542,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
301 542
302 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 543 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
303 job->request_payload.sg_cnt, DMA_TO_DEVICE); 544 job->request_payload.sg_cnt, DMA_TO_DEVICE);
304
305 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 545 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
306 busaddr = sg_dma_address(sgel); 546 busaddr = sg_dma_address(sgel);
307 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 547 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
@@ -323,7 +563,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
323 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 563 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
324 bpl++; 564 bpl++;
325 } 565 }
326
327 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize = 566 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
328 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 567 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
329 cmdiocbq->iocb.ulpContext = rpi; 568 cmdiocbq->iocb.ulpContext = rpi;
@@ -331,102 +570,54 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
331 cmdiocbq->context1 = NULL; 570 cmdiocbq->context1 = NULL;
332 cmdiocbq->context2 = NULL; 571 cmdiocbq->context2 = NULL;
333 572
334 iocb_status = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 573 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
335 rspiocbq, (phba->fc_ratov * 2) 574 cmdiocbq->context1 = dd_data;
336 + LPFC_DRVR_TIMEOUT); 575 cmdiocbq->context2 = rspiocbq;
337 576 dd_data->type = TYPE_IOCB;
338 /* release the new ndlp once the iocb completes */ 577 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
578 dd_data->context_un.iocb.rspiocbq = rspiocbq;
579 dd_data->context_un.iocb.set_job = job;
580 dd_data->context_un.iocb.bmp = NULL;;
581 dd_data->context_un.iocb.ndlp = ndlp;
582
583 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
584 creg_val = readl(phba->HCregaddr);
585 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
586 writel(creg_val, phba->HCregaddr);
587 readl(phba->HCregaddr); /* flush */
588 }
589 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
339 lpfc_nlp_put(ndlp); 590 lpfc_nlp_put(ndlp);
340 if (iocb_status != IOCB_TIMEDOUT) { 591 if (rc == IOCB_SUCCESS)
341 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 592 return 0; /* done for now */
342 job->request_payload.sg_cnt, DMA_TO_DEVICE);
343 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
344 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
345 }
346
347 if (iocb_status == IOCB_SUCCESS) {
348 if (rsp->ulpStatus == IOSTAT_SUCCESS) {
349 job->reply->reply_payload_rcv_len =
350 rsp->un.elsreq64.bdl.bdeSize;
351 rc = 0;
352 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
353 struct fc_bsg_ctels_reply *els_reply;
354 /* LS_RJT data returned in word 4 */
355 uint8_t *rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
356
357 els_reply = &job->reply->reply_data.ctels_reply;
358 job->reply->result = 0;
359 els_reply->status = FC_CTELS_STATUS_REJECT;
360 els_reply->rjt_data.action = rjt_data[0];
361 els_reply->rjt_data.reason_code = rjt_data[1];
362 els_reply->rjt_data.reason_explanation = rjt_data[2];
363 els_reply->rjt_data.vendor_unique = rjt_data[3];
364 } else
365 rc = -EIO;
366 } else
367 rc = -EIO;
368
369 if (iocb_status != IOCB_TIMEDOUT)
370 lpfc_els_free_iocb(phba, cmdiocbq);
371
372 lpfc_sli_release_iocbq(phba, rspiocbq);
373
374out:
375 /* make error code available to userspace */
376 job->reply->result = rc;
377 /* complete the job back to userspace */
378 job->job_done(job);
379
380 return 0;
381}
382
383struct lpfc_ct_event {
384 struct list_head node;
385 int ref;
386 wait_queue_head_t wq;
387
388 /* Event type and waiter identifiers */
389 uint32_t type_mask;
390 uint32_t req_id;
391 uint32_t reg_id;
392 593
393 /* next two flags are here for the auto-delete logic */ 594 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
394 unsigned long wait_time_stamp; 595 job->request_payload.sg_cnt, DMA_TO_DEVICE);
395 int waiting; 596 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
597 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
396 598
397 /* seen and not seen events */ 599 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
398 struct list_head events_to_get;
399 struct list_head events_to_see;
400};
401 600
402struct event_data { 601 lpfc_sli_release_iocbq(phba, cmdiocbq);
403 struct list_head node;
404 uint32_t type;
405 uint32_t immed_dat;
406 void *data;
407 uint32_t len;
408};
409 602
410static struct lpfc_ct_event * 603free_rspiocbq:
411lpfc_ct_event_new(int ev_reg_id, uint32_t ev_req_id) 604 lpfc_sli_release_iocbq(phba, rspiocbq);
412{
413 struct lpfc_ct_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
414 if (!evt)
415 return NULL;
416 605
417 INIT_LIST_HEAD(&evt->events_to_get); 606free_dd_data:
418 INIT_LIST_HEAD(&evt->events_to_see); 607 kfree(dd_data);
419 evt->req_id = ev_req_id;
420 evt->reg_id = ev_reg_id;
421 evt->wait_time_stamp = jiffies;
422 init_waitqueue_head(&evt->wq);
423 608
424 return evt; 609no_dd_data:
610 /* make error code available to userspace */
611 job->reply->result = rc;
612 job->dd_data = NULL;
613 return rc;
425} 614}
426 615
427static void 616static void
428lpfc_ct_event_free(struct lpfc_ct_event *evt) 617lpfc_bsg_event_free(struct kref *kref)
429{ 618{
619 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
620 kref);
430 struct event_data *ed; 621 struct event_data *ed;
431 622
432 list_del(&evt->node); 623 list_del(&evt->node);
@@ -449,24 +640,62 @@ lpfc_ct_event_free(struct lpfc_ct_event *evt)
449} 640}
450 641
451static inline void 642static inline void
452lpfc_ct_event_ref(struct lpfc_ct_event *evt) 643lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
453{ 644{
454 evt->ref++; 645 kref_get(&evt->kref);
455} 646}
456 647
457static inline void 648static inline void
458lpfc_ct_event_unref(struct lpfc_ct_event *evt) 649lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
459{ 650{
460 if (--evt->ref < 0) 651 kref_put(&evt->kref, lpfc_bsg_event_free);
461 lpfc_ct_event_free(evt);
462} 652}
463 653
464#define SLI_CT_ELX_LOOPBACK 0x10 654static struct lpfc_bsg_event *
655lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
656{
657 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
465 658
466enum ELX_LOOPBACK_CMD { 659 if (!evt)
467 ELX_LOOPBACK_XRI_SETUP, 660 return NULL;
468 ELX_LOOPBACK_DATA, 661
469}; 662 INIT_LIST_HEAD(&evt->events_to_get);
663 INIT_LIST_HEAD(&evt->events_to_see);
664 evt->type_mask = ev_mask;
665 evt->req_id = ev_req_id;
666 evt->reg_id = ev_reg_id;
667 evt->wait_time_stamp = jiffies;
668 init_waitqueue_head(&evt->wq);
669 kref_init(&evt->kref);
670 return evt;
671}
672
673static int
674dfc_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
675{
676 struct lpfc_dmabufext *mlast;
677 struct pci_dev *pcidev;
678 struct list_head head, *curr, *next;
679
680 if ((!mlist) || (!lpfc_is_link_up(phba) &&
681 (phba->link_flag & LS_LOOPBACK_MODE))) {
682 return 0;
683 }
684
685 pcidev = phba->pcidev;
686 list_add_tail(&head, &mlist->dma.list);
687
688 list_for_each_safe(curr, next, &head) {
689 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
690 if (mlast->dma.virt)
691 dma_free_coherent(&pcidev->dev,
692 mlast->size,
693 mlast->dma.virt,
694 mlast->dma.phys);
695 kfree(mlast);
696 }
697 return 0;
698}
470 699
471/** 700/**
472 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command 701 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
@@ -475,7 +704,7 @@ enum ELX_LOOPBACK_CMD {
475 * @piocbq: 704 * @piocbq:
476 * 705 *
477 * This function is called when an unsolicited CT command is received. It 706 * This function is called when an unsolicited CT command is received. It
478 * forwards the event to any processes registerd to receive CT events. 707 * forwards the event to any processes registered to receive CT events.
479 */ 708 */
480int 709int
481lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 710lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
@@ -485,7 +714,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
485 uint32_t cmd; 714 uint32_t cmd;
486 uint32_t len; 715 uint32_t len;
487 struct lpfc_dmabuf *dmabuf = NULL; 716 struct lpfc_dmabuf *dmabuf = NULL;
488 struct lpfc_ct_event *evt; 717 struct lpfc_bsg_event *evt;
489 struct event_data *evt_dat = NULL; 718 struct event_data *evt_dat = NULL;
490 struct lpfc_iocbq *iocbq; 719 struct lpfc_iocbq *iocbq;
491 size_t offset = 0; 720 size_t offset = 0;
@@ -497,7 +726,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
497 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; 726 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
498 struct lpfc_hbq_entry *hbqe; 727 struct lpfc_hbq_entry *hbqe;
499 struct lpfc_sli_ct_request *ct_req; 728 struct lpfc_sli_ct_request *ct_req;
729 struct fc_bsg_job *job = NULL;
500 unsigned long flags; 730 unsigned long flags;
731 int size = 0;
501 732
502 INIT_LIST_HEAD(&head); 733 INIT_LIST_HEAD(&head);
503 list_add_tail(&head, &piocbq->list); 734 list_add_tail(&head, &piocbq->list);
@@ -506,6 +737,10 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
506 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) 737 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
507 goto error_ct_unsol_exit; 738 goto error_ct_unsol_exit;
508 739
740 if (phba->link_state == LPFC_HBA_ERROR ||
741 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
742 goto error_ct_unsol_exit;
743
509 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 744 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
510 dmabuf = bdeBuf1; 745 dmabuf = bdeBuf1;
511 else { 746 else {
@@ -513,7 +748,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
513 piocbq->iocb.un.cont64[0].addrLow); 748 piocbq->iocb.un.cont64[0].addrLow);
514 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); 749 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
515 } 750 }
516 751 if (dmabuf == NULL)
752 goto error_ct_unsol_exit;
517 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; 753 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
518 evt_req_id = ct_req->FsType; 754 evt_req_id = ct_req->FsType;
519 cmd = ct_req->CommandResponse.bits.CmdRsp; 755 cmd = ct_req->CommandResponse.bits.CmdRsp;
@@ -523,22 +759,22 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
523 759
524 spin_lock_irqsave(&phba->ct_ev_lock, flags); 760 spin_lock_irqsave(&phba->ct_ev_lock, flags);
525 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 761 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
526 if (evt->req_id != evt_req_id) 762 if (!(evt->type_mask & FC_REG_CT_EVENT) ||
763 evt->req_id != evt_req_id)
527 continue; 764 continue;
528 765
529 lpfc_ct_event_ref(evt); 766 lpfc_bsg_event_ref(evt);
530 767 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
531 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); 768 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
532 if (!evt_dat) { 769 if (evt_dat == NULL) {
533 lpfc_ct_event_unref(evt); 770 spin_lock_irqsave(&phba->ct_ev_lock, flags);
771 lpfc_bsg_event_unref(evt);
534 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 772 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
535 "2614 Memory allocation failed for " 773 "2614 Memory allocation failed for "
536 "CT event\n"); 774 "CT event\n");
537 break; 775 break;
538 } 776 }
539 777
540 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
541
542 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 778 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
543 /* take accumulated byte count from the last iocbq */ 779 /* take accumulated byte count from the last iocbq */
544 iocbq = list_entry(head.prev, typeof(*iocbq), list); 780 iocbq = list_entry(head.prev, typeof(*iocbq), list);
@@ -552,25 +788,25 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
552 } 788 }
553 789
554 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); 790 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
555 if (!evt_dat->data) { 791 if (evt_dat->data == NULL) {
556 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 792 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
557 "2615 Memory allocation failed for " 793 "2615 Memory allocation failed for "
558 "CT event data, size %d\n", 794 "CT event data, size %d\n",
559 evt_dat->len); 795 evt_dat->len);
560 kfree(evt_dat); 796 kfree(evt_dat);
561 spin_lock_irqsave(&phba->ct_ev_lock, flags); 797 spin_lock_irqsave(&phba->ct_ev_lock, flags);
562 lpfc_ct_event_unref(evt); 798 lpfc_bsg_event_unref(evt);
563 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 799 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
564 goto error_ct_unsol_exit; 800 goto error_ct_unsol_exit;
565 } 801 }
566 802
567 list_for_each_entry(iocbq, &head, list) { 803 list_for_each_entry(iocbq, &head, list) {
804 size = 0;
568 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 805 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
569 bdeBuf1 = iocbq->context2; 806 bdeBuf1 = iocbq->context2;
570 bdeBuf2 = iocbq->context3; 807 bdeBuf2 = iocbq->context3;
571 } 808 }
572 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { 809 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
573 int size = 0;
574 if (phba->sli3_options & 810 if (phba->sli3_options &
575 LPFC_SLI3_HBQ_ENABLED) { 811 LPFC_SLI3_HBQ_ENABLED) {
576 if (i == 0) { 812 if (i == 0) {
@@ -605,7 +841,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
605 kfree(evt_dat); 841 kfree(evt_dat);
606 spin_lock_irqsave(&phba->ct_ev_lock, 842 spin_lock_irqsave(&phba->ct_ev_lock,
607 flags); 843 flags);
608 lpfc_ct_event_unref(evt); 844 lpfc_bsg_event_unref(evt);
609 spin_unlock_irqrestore( 845 spin_unlock_irqrestore(
610 &phba->ct_ev_lock, flags); 846 &phba->ct_ev_lock, flags);
611 goto error_ct_unsol_exit; 847 goto error_ct_unsol_exit;
@@ -620,15 +856,24 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
620 dmabuf); 856 dmabuf);
621 } else { 857 } else {
622 switch (cmd) { 858 switch (cmd) {
859 case ELX_LOOPBACK_DATA:
860 dfc_cmd_data_free(phba,
861 (struct lpfc_dmabufext *)
862 dmabuf);
863 break;
623 case ELX_LOOPBACK_XRI_SETUP: 864 case ELX_LOOPBACK_XRI_SETUP:
624 if (!(phba->sli3_options & 865 if ((phba->sli_rev ==
625 LPFC_SLI3_HBQ_ENABLED)) 866 LPFC_SLI_REV2) ||
867 (phba->sli3_options &
868 LPFC_SLI3_HBQ_ENABLED
869 )) {
870 lpfc_in_buf_free(phba,
871 dmabuf);
872 } else {
626 lpfc_post_buffer(phba, 873 lpfc_post_buffer(phba,
627 pring, 874 pring,
628 1); 875 1);
629 else 876 }
630 lpfc_in_buf_free(phba,
631 dmabuf);
632 break; 877 break;
633 default: 878 default:
634 if (!(phba->sli3_options & 879 if (!(phba->sli3_options &
@@ -655,49 +900,79 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
655 900
656 evt_dat->type = FC_REG_CT_EVENT; 901 evt_dat->type = FC_REG_CT_EVENT;
657 list_add(&evt_dat->node, &evt->events_to_see); 902 list_add(&evt_dat->node, &evt->events_to_see);
658 wake_up_interruptible(&evt->wq); 903 if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
659 lpfc_ct_event_unref(evt); 904 wake_up_interruptible(&evt->wq);
660 if (evt_req_id == SLI_CT_ELX_LOOPBACK) 905 lpfc_bsg_event_unref(evt);
661 break; 906 break;
907 }
908
909 list_move(evt->events_to_see.prev, &evt->events_to_get);
910 lpfc_bsg_event_unref(evt);
911
912 job = evt->set_job;
913 evt->set_job = NULL;
914 if (job) {
915 job->reply->reply_payload_rcv_len = size;
916 /* make error code available to userspace */
917 job->reply->result = 0;
918 job->dd_data = NULL;
919 /* complete the job back to userspace */
920 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
921 job->job_done(job);
922 spin_lock_irqsave(&phba->ct_ev_lock, flags);
923 }
662 } 924 }
663 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 925 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
664 926
665error_ct_unsol_exit: 927error_ct_unsol_exit:
666 if (!list_empty(&head)) 928 if (!list_empty(&head))
667 list_del(&head); 929 list_del(&head);
668 930 if (evt_req_id == SLI_CT_ELX_LOOPBACK)
931 return 0;
669 return 1; 932 return 1;
670} 933}
671 934
672/** 935/**
673 * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command 936 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
674 * @job: SET_EVENT fc_bsg_job 937 * @job: SET_EVENT fc_bsg_job
675 */ 938 */
676static int 939static int
677lpfc_bsg_set_event(struct fc_bsg_job *job) 940lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
678{ 941{
679 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 942 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
680 struct lpfc_hba *phba = vport->phba; 943 struct lpfc_hba *phba = vport->phba;
681 struct set_ct_event *event_req; 944 struct set_ct_event *event_req;
682 struct lpfc_ct_event *evt; 945 struct lpfc_bsg_event *evt;
683 unsigned long flags;
684 int rc = 0; 946 int rc = 0;
947 struct bsg_job_data *dd_data = NULL;
948 uint32_t ev_mask;
949 unsigned long flags;
685 950
686 if (job->request_len < 951 if (job->request_len <
687 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { 952 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
688 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 953 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
689 "2612 Received SET_CT_EVENT below minimum " 954 "2612 Received SET_CT_EVENT below minimum "
690 "size\n"); 955 "size\n");
691 return -EINVAL; 956 rc = -EINVAL;
957 goto job_error;
958 }
959
960 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
961 if (dd_data == NULL) {
962 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
963 "2734 Failed allocation of dd_data\n");
964 rc = -ENOMEM;
965 goto job_error;
692 } 966 }
693 967
694 event_req = (struct set_ct_event *) 968 event_req = (struct set_ct_event *)
695 job->request->rqst_data.h_vendor.vendor_cmd; 969 job->request->rqst_data.h_vendor.vendor_cmd;
696 970 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
971 FC_REG_EVENT_MASK);
697 spin_lock_irqsave(&phba->ct_ev_lock, flags); 972 spin_lock_irqsave(&phba->ct_ev_lock, flags);
698 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 973 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
699 if (evt->reg_id == event_req->ev_reg_id) { 974 if (evt->reg_id == event_req->ev_reg_id) {
700 lpfc_ct_event_ref(evt); 975 lpfc_bsg_event_ref(evt);
701 evt->wait_time_stamp = jiffies; 976 evt->wait_time_stamp = jiffies;
702 break; 977 break;
703 } 978 }
@@ -706,73 +981,63 @@ lpfc_bsg_set_event(struct fc_bsg_job *job)
706 981
707 if (&evt->node == &phba->ct_ev_waiters) { 982 if (&evt->node == &phba->ct_ev_waiters) {
708 /* no event waiting struct yet - first call */ 983 /* no event waiting struct yet - first call */
709 evt = lpfc_ct_event_new(event_req->ev_reg_id, 984 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
710 event_req->ev_req_id); 985 event_req->ev_req_id);
711 if (!evt) { 986 if (!evt) {
712 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 987 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
713 "2617 Failed allocation of event " 988 "2617 Failed allocation of event "
714 "waiter\n"); 989 "waiter\n");
715 return -ENOMEM; 990 rc = -ENOMEM;
991 goto job_error;
716 } 992 }
717 993
718 spin_lock_irqsave(&phba->ct_ev_lock, flags); 994 spin_lock_irqsave(&phba->ct_ev_lock, flags);
719 list_add(&evt->node, &phba->ct_ev_waiters); 995 list_add(&evt->node, &phba->ct_ev_waiters);
720 lpfc_ct_event_ref(evt); 996 lpfc_bsg_event_ref(evt);
997 evt->wait_time_stamp = jiffies;
721 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 998 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
722 } 999 }
723 1000
724 evt->waiting = 1;
725 if (wait_event_interruptible(evt->wq,
726 !list_empty(&evt->events_to_see))) {
727 spin_lock_irqsave(&phba->ct_ev_lock, flags);
728 lpfc_ct_event_unref(evt); /* release ref */
729 lpfc_ct_event_unref(evt); /* delete */
730 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
731 rc = -EINTR;
732 goto set_event_out;
733 }
734
735 evt->wait_time_stamp = jiffies;
736 evt->waiting = 0;
737
738 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1001 spin_lock_irqsave(&phba->ct_ev_lock, flags);
739 list_move(evt->events_to_see.prev, &evt->events_to_get); 1002 evt->waiting = 1;
740 lpfc_ct_event_unref(evt); /* release ref */ 1003 dd_data->type = TYPE_EVT;
1004 dd_data->context_un.evt = evt;
1005 evt->set_job = job; /* for unsolicited command */
1006 job->dd_data = dd_data; /* for fc transport timeout callback*/
741 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1007 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1008 return 0; /* call job done later */
742 1009
743set_event_out: 1010job_error:
744 /* set_event carries no reply payload */ 1011 if (dd_data != NULL)
745 job->reply->reply_payload_rcv_len = 0; 1012 kfree(dd_data);
746 /* make error code available to userspace */
747 job->reply->result = rc;
748 /* complete the job back to userspace */
749 job->job_done(job);
750 1013
751 return 0; 1014 job->dd_data = NULL;
1015 return rc;
752} 1016}
753 1017
754/** 1018/**
755 * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command 1019 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
756 * @job: GET_EVENT fc_bsg_job 1020 * @job: GET_EVENT fc_bsg_job
757 */ 1021 */
758static int 1022static int
759lpfc_bsg_get_event(struct fc_bsg_job *job) 1023lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
760{ 1024{
761 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1025 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
762 struct lpfc_hba *phba = vport->phba; 1026 struct lpfc_hba *phba = vport->phba;
763 struct get_ct_event *event_req; 1027 struct get_ct_event *event_req;
764 struct get_ct_event_reply *event_reply; 1028 struct get_ct_event_reply *event_reply;
765 struct lpfc_ct_event *evt; 1029 struct lpfc_bsg_event *evt;
766 struct event_data *evt_dat = NULL; 1030 struct event_data *evt_dat = NULL;
767 unsigned long flags; 1031 unsigned long flags;
768 int rc = 0; 1032 uint32_t rc = 0;
769 1033
770 if (job->request_len < 1034 if (job->request_len <
771 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { 1035 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
772 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1036 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
773 "2613 Received GET_CT_EVENT request below " 1037 "2613 Received GET_CT_EVENT request below "
774 "minimum size\n"); 1038 "minimum size\n");
775 return -EINVAL; 1039 rc = -EINVAL;
1040 goto job_error;
776 } 1041 }
777 1042
778 event_req = (struct get_ct_event *) 1043 event_req = (struct get_ct_event *)
@@ -780,13 +1045,12 @@ lpfc_bsg_get_event(struct fc_bsg_job *job)
780 1045
781 event_reply = (struct get_ct_event_reply *) 1046 event_reply = (struct get_ct_event_reply *)
782 job->reply->reply_data.vendor_reply.vendor_rsp; 1047 job->reply->reply_data.vendor_reply.vendor_rsp;
783
784 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1048 spin_lock_irqsave(&phba->ct_ev_lock, flags);
785 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1049 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
786 if (evt->reg_id == event_req->ev_reg_id) { 1050 if (evt->reg_id == event_req->ev_reg_id) {
787 if (list_empty(&evt->events_to_get)) 1051 if (list_empty(&evt->events_to_get))
788 break; 1052 break;
789 lpfc_ct_event_ref(evt); 1053 lpfc_bsg_event_ref(evt);
790 evt->wait_time_stamp = jiffies; 1054 evt->wait_time_stamp = jiffies;
791 evt_dat = list_entry(evt->events_to_get.prev, 1055 evt_dat = list_entry(evt->events_to_get.prev,
792 struct event_data, node); 1056 struct event_data, node);
@@ -796,44 +1060,49 @@ lpfc_bsg_get_event(struct fc_bsg_job *job)
796 } 1060 }
797 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1061 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
798 1062
799 if (!evt_dat) { 1063 /* The app may continue to ask for event data until it gets
1064 * an error indicating that there isn't anymore
1065 */
1066 if (evt_dat == NULL) {
800 job->reply->reply_payload_rcv_len = 0; 1067 job->reply->reply_payload_rcv_len = 0;
801 rc = -ENOENT; 1068 rc = -ENOENT;
802 goto error_get_event_exit; 1069 goto job_error;
803 } 1070 }
804 1071
805 if (evt_dat->len > job->reply_payload.payload_len) { 1072 if (evt_dat->len > job->request_payload.payload_len) {
806 evt_dat->len = job->reply_payload.payload_len; 1073 evt_dat->len = job->request_payload.payload_len;
807 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1074 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
808 "2618 Truncated event data at %d " 1075 "2618 Truncated event data at %d "
809 "bytes\n", 1076 "bytes\n",
810 job->reply_payload.payload_len); 1077 job->request_payload.payload_len);
811 } 1078 }
812 1079
1080 event_reply->type = evt_dat->type;
813 event_reply->immed_data = evt_dat->immed_dat; 1081 event_reply->immed_data = evt_dat->immed_dat;
814
815 if (evt_dat->len > 0) 1082 if (evt_dat->len > 0)
816 job->reply->reply_payload_rcv_len = 1083 job->reply->reply_payload_rcv_len =
817 sg_copy_from_buffer(job->reply_payload.sg_list, 1084 sg_copy_from_buffer(job->request_payload.sg_list,
818 job->reply_payload.sg_cnt, 1085 job->request_payload.sg_cnt,
819 evt_dat->data, evt_dat->len); 1086 evt_dat->data, evt_dat->len);
820 else 1087 else
821 job->reply->reply_payload_rcv_len = 0; 1088 job->reply->reply_payload_rcv_len = 0;
822 rc = 0;
823 1089
824 if (evt_dat) 1090 if (evt_dat) {
825 kfree(evt_dat->data); 1091 kfree(evt_dat->data);
826 kfree(evt_dat); 1092 kfree(evt_dat);
1093 }
1094
827 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1095 spin_lock_irqsave(&phba->ct_ev_lock, flags);
828 lpfc_ct_event_unref(evt); 1096 lpfc_bsg_event_unref(evt);
829 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1097 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
830 1098 job->dd_data = NULL;
831error_get_event_exit: 1099 job->reply->result = 0;
832 /* make error code available to userspace */
833 job->reply->result = rc;
834 /* complete the job back to userspace */
835 job->job_done(job); 1100 job->job_done(job);
1101 return 0;
836 1102
1103job_error:
1104 job->dd_data = NULL;
1105 job->reply->result = rc;
837 return rc; 1106 return rc;
838} 1107}
839 1108
@@ -845,19 +1114,25 @@ static int
845lpfc_bsg_hst_vendor(struct fc_bsg_job *job) 1114lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
846{ 1115{
847 int command = job->request->rqst_data.h_vendor.vendor_cmd[0]; 1116 int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
1117 int rc;
848 1118
849 switch (command) { 1119 switch (command) {
850 case LPFC_BSG_VENDOR_SET_CT_EVENT: 1120 case LPFC_BSG_VENDOR_SET_CT_EVENT:
851 return lpfc_bsg_set_event(job); 1121 rc = lpfc_bsg_hba_set_event(job);
852 break; 1122 break;
853 1123
854 case LPFC_BSG_VENDOR_GET_CT_EVENT: 1124 case LPFC_BSG_VENDOR_GET_CT_EVENT:
855 return lpfc_bsg_get_event(job); 1125 rc = lpfc_bsg_hba_get_event(job);
856 break; 1126 break;
857
858 default: 1127 default:
859 return -EINVAL; 1128 rc = -EINVAL;
1129 job->reply->reply_payload_rcv_len = 0;
1130 /* make error code available to userspace */
1131 job->reply->result = rc;
1132 break;
860 } 1133 }
1134
1135 return rc;
861} 1136}
862 1137
863/** 1138/**
@@ -868,10 +1143,9 @@ int
868lpfc_bsg_request(struct fc_bsg_job *job) 1143lpfc_bsg_request(struct fc_bsg_job *job)
869{ 1144{
870 uint32_t msgcode; 1145 uint32_t msgcode;
871 int rc = -EINVAL; 1146 int rc;
872 1147
873 msgcode = job->request->msgcode; 1148 msgcode = job->request->msgcode;
874
875 switch (msgcode) { 1149 switch (msgcode) {
876 case FC_BSG_HST_VENDOR: 1150 case FC_BSG_HST_VENDOR:
877 rc = lpfc_bsg_hst_vendor(job); 1151 rc = lpfc_bsg_hst_vendor(job);
@@ -880,9 +1154,13 @@ lpfc_bsg_request(struct fc_bsg_job *job)
880 rc = lpfc_bsg_rport_els(job); 1154 rc = lpfc_bsg_rport_els(job);
881 break; 1155 break;
882 case FC_BSG_RPT_CT: 1156 case FC_BSG_RPT_CT:
883 rc = lpfc_bsg_rport_ct(job); 1157 rc = lpfc_bsg_send_mgmt_cmd(job);
884 break; 1158 break;
885 default: 1159 default:
1160 rc = -EINVAL;
1161 job->reply->reply_payload_rcv_len = 0;
1162 /* make error code available to userspace */
1163 job->reply->result = rc;
886 break; 1164 break;
887 } 1165 }
888 1166
@@ -901,11 +1179,54 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
901{ 1179{
902 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1180 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
903 struct lpfc_hba *phba = vport->phba; 1181 struct lpfc_hba *phba = vport->phba;
904 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)job->dd_data; 1182 struct lpfc_iocbq *cmdiocb;
1183 struct lpfc_bsg_event *evt;
1184 struct lpfc_bsg_iocb *iocb;
905 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 1185 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
1186 struct bsg_job_data *dd_data;
1187 unsigned long flags;
906 1188
907 if (cmdiocb) 1189 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1190 dd_data = (struct bsg_job_data *)job->dd_data;
1191 /* timeout and completion crossed paths if no dd_data */
1192 if (!dd_data) {
1193 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1194 return 0;
1195 }
1196
1197 switch (dd_data->type) {
1198 case TYPE_IOCB:
1199 iocb = &dd_data->context_un.iocb;
1200 cmdiocb = iocb->cmdiocbq;
1201 /* hint to completion handler that the job timed out */
1202 job->reply->result = -EAGAIN;
1203 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1204 /* this will call our completion handler */
1205 spin_lock_irq(&phba->hbalock);
908 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 1206 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
1207 spin_unlock_irq(&phba->hbalock);
1208 break;
1209 case TYPE_EVT:
1210 evt = dd_data->context_un.evt;
1211 /* this event has no job anymore */
1212 evt->set_job = NULL;
1213 job->dd_data = NULL;
1214 job->reply->reply_payload_rcv_len = 0;
1215 /* Return -EAGAIN which is our way of signallying the
1216 * app to retry.
1217 */
1218 job->reply->result = -EAGAIN;
1219 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1220 job->job_done(job);
1221 break;
1222 default:
1223 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1224 break;
1225 }
909 1226
1227 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
1228 * otherwise an error message will be displayed on the console
1229 * so always return success (zero)
1230 */
910 return 0; 1231 return 0;
911} 1232}