diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_bsg.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_bsg.c | 2811 |
1 files changed, 2514 insertions, 297 deletions
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index da6bf5aac9dd..d62b3e467926 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2009-2010 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -21,17 +21,21 @@ | |||
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
22 | #include <linux/mempool.h> | 22 | #include <linux/mempool.h> |
23 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
24 | #include <linux/slab.h> | ||
25 | #include <linux/delay.h> | ||
24 | 26 | ||
25 | #include <scsi/scsi.h> | 27 | #include <scsi/scsi.h> |
26 | #include <scsi/scsi_host.h> | 28 | #include <scsi/scsi_host.h> |
27 | #include <scsi/scsi_transport_fc.h> | 29 | #include <scsi/scsi_transport_fc.h> |
28 | #include <scsi/scsi_bsg_fc.h> | 30 | #include <scsi/scsi_bsg_fc.h> |
31 | #include <scsi/fc/fc_fs.h> | ||
29 | 32 | ||
30 | #include "lpfc_hw4.h" | 33 | #include "lpfc_hw4.h" |
31 | #include "lpfc_hw.h" | 34 | #include "lpfc_hw.h" |
32 | #include "lpfc_sli.h" | 35 | #include "lpfc_sli.h" |
33 | #include "lpfc_sli4.h" | 36 | #include "lpfc_sli4.h" |
34 | #include "lpfc_nl.h" | 37 | #include "lpfc_nl.h" |
38 | #include "lpfc_bsg.h" | ||
35 | #include "lpfc_disc.h" | 39 | #include "lpfc_disc.h" |
36 | #include "lpfc_scsi.h" | 40 | #include "lpfc_scsi.h" |
37 | #include "lpfc.h" | 41 | #include "lpfc.h" |
@@ -40,14 +44,196 @@ | |||
40 | #include "lpfc_vport.h" | 44 | #include "lpfc_vport.h" |
41 | #include "lpfc_version.h" | 45 | #include "lpfc_version.h" |
42 | 46 | ||
47 | struct lpfc_bsg_event { | ||
48 | struct list_head node; | ||
49 | struct kref kref; | ||
50 | wait_queue_head_t wq; | ||
51 | |||
52 | /* Event type and waiter identifiers */ | ||
53 | uint32_t type_mask; | ||
54 | uint32_t req_id; | ||
55 | uint32_t reg_id; | ||
56 | |||
57 | /* next two flags are here for the auto-delete logic */ | ||
58 | unsigned long wait_time_stamp; | ||
59 | int waiting; | ||
60 | |||
61 | /* seen and not seen events */ | ||
62 | struct list_head events_to_get; | ||
63 | struct list_head events_to_see; | ||
64 | |||
65 | /* job waiting for this event to finish */ | ||
66 | struct fc_bsg_job *set_job; | ||
67 | }; | ||
68 | |||
69 | struct lpfc_bsg_iocb { | ||
70 | struct lpfc_iocbq *cmdiocbq; | ||
71 | struct lpfc_iocbq *rspiocbq; | ||
72 | struct lpfc_dmabuf *bmp; | ||
73 | struct lpfc_nodelist *ndlp; | ||
74 | |||
75 | /* job waiting for this iocb to finish */ | ||
76 | struct fc_bsg_job *set_job; | ||
77 | }; | ||
78 | |||
79 | struct lpfc_bsg_mbox { | ||
80 | LPFC_MBOXQ_t *pmboxq; | ||
81 | MAILBOX_t *mb; | ||
82 | |||
83 | /* job waiting for this mbox command to finish */ | ||
84 | struct fc_bsg_job *set_job; | ||
85 | }; | ||
86 | |||
87 | #define MENLO_DID 0x0000FC0E | ||
88 | |||
89 | struct lpfc_bsg_menlo { | ||
90 | struct lpfc_iocbq *cmdiocbq; | ||
91 | struct lpfc_iocbq *rspiocbq; | ||
92 | struct lpfc_dmabuf *bmp; | ||
93 | |||
94 | /* job waiting for this iocb to finish */ | ||
95 | struct fc_bsg_job *set_job; | ||
96 | }; | ||
97 | |||
98 | #define TYPE_EVT 1 | ||
99 | #define TYPE_IOCB 2 | ||
100 | #define TYPE_MBOX 3 | ||
101 | #define TYPE_MENLO 4 | ||
102 | struct bsg_job_data { | ||
103 | uint32_t type; | ||
104 | union { | ||
105 | struct lpfc_bsg_event *evt; | ||
106 | struct lpfc_bsg_iocb iocb; | ||
107 | struct lpfc_bsg_mbox mbox; | ||
108 | struct lpfc_bsg_menlo menlo; | ||
109 | } context_un; | ||
110 | }; | ||
111 | |||
112 | struct event_data { | ||
113 | struct list_head node; | ||
114 | uint32_t type; | ||
115 | uint32_t immed_dat; | ||
116 | void *data; | ||
117 | uint32_t len; | ||
118 | }; | ||
119 | |||
120 | #define BUF_SZ_4K 4096 | ||
121 | #define SLI_CT_ELX_LOOPBACK 0x10 | ||
122 | |||
123 | enum ELX_LOOPBACK_CMD { | ||
124 | ELX_LOOPBACK_XRI_SETUP, | ||
125 | ELX_LOOPBACK_DATA, | ||
126 | }; | ||
127 | |||
128 | #define ELX_LOOPBACK_HEADER_SZ \ | ||
129 | (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un) | ||
130 | |||
131 | struct lpfc_dmabufext { | ||
132 | struct lpfc_dmabuf dma; | ||
133 | uint32_t size; | ||
134 | uint32_t flag; | ||
135 | }; | ||
136 | |||
137 | /** | ||
138 | * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler | ||
139 | * @phba: Pointer to HBA context object. | ||
140 | * @cmdiocbq: Pointer to command iocb. | ||
141 | * @rspiocbq: Pointer to response iocb. | ||
142 | * | ||
143 | * This function is the completion handler for iocbs issued using | ||
144 | * lpfc_bsg_send_mgmt_cmd function. This function is called by the | ||
145 | * ring event handler function without any lock held. This function | ||
146 | * can be called from both worker thread context and interrupt | ||
147 | * context. This function also can be called from another thread which | ||
148 | * cleans up the SLI layer objects. | ||
149 | * This function copies the contents of the response iocb to the | ||
150 | * response iocb memory object provided by the caller of | ||
151 | * lpfc_sli_issue_iocb_wait and then wakes up the thread which | ||
152 | * sleeps for the iocb completion. | ||
153 | **/ | ||
154 | static void | ||
155 | lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, | ||
156 | struct lpfc_iocbq *cmdiocbq, | ||
157 | struct lpfc_iocbq *rspiocbq) | ||
158 | { | ||
159 | unsigned long iflags; | ||
160 | struct bsg_job_data *dd_data; | ||
161 | struct fc_bsg_job *job; | ||
162 | IOCB_t *rsp; | ||
163 | struct lpfc_dmabuf *bmp; | ||
164 | struct lpfc_nodelist *ndlp; | ||
165 | struct lpfc_bsg_iocb *iocb; | ||
166 | unsigned long flags; | ||
167 | int rc = 0; | ||
168 | |||
169 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
170 | dd_data = cmdiocbq->context1; | ||
171 | if (!dd_data) { | ||
172 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
173 | return; | ||
174 | } | ||
175 | |||
176 | iocb = &dd_data->context_un.iocb; | ||
177 | job = iocb->set_job; | ||
178 | job->dd_data = NULL; /* so timeout handler does not reply */ | ||
179 | |||
180 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
181 | cmdiocbq->iocb_flag |= LPFC_IO_WAKE; | ||
182 | if (cmdiocbq->context2 && rspiocbq) | ||
183 | memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, | ||
184 | &rspiocbq->iocb, sizeof(IOCB_t)); | ||
185 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
186 | |||
187 | bmp = iocb->bmp; | ||
188 | rspiocbq = iocb->rspiocbq; | ||
189 | rsp = &rspiocbq->iocb; | ||
190 | ndlp = iocb->ndlp; | ||
191 | |||
192 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, | ||
193 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
194 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, | ||
195 | job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
196 | |||
197 | if (rsp->ulpStatus) { | ||
198 | if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { | ||
199 | switch (rsp->un.ulpWord[4] & 0xff) { | ||
200 | case IOERR_SEQUENCE_TIMEOUT: | ||
201 | rc = -ETIMEDOUT; | ||
202 | break; | ||
203 | case IOERR_INVALID_RPI: | ||
204 | rc = -EFAULT; | ||
205 | break; | ||
206 | default: | ||
207 | rc = -EACCES; | ||
208 | break; | ||
209 | } | ||
210 | } else | ||
211 | rc = -EACCES; | ||
212 | } else | ||
213 | job->reply->reply_payload_rcv_len = | ||
214 | rsp->un.genreq64.bdl.bdeSize; | ||
215 | |||
216 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); | ||
217 | lpfc_sli_release_iocbq(phba, rspiocbq); | ||
218 | lpfc_sli_release_iocbq(phba, cmdiocbq); | ||
219 | lpfc_nlp_put(ndlp); | ||
220 | kfree(bmp); | ||
221 | kfree(dd_data); | ||
222 | /* make error code available to userspace */ | ||
223 | job->reply->result = rc; | ||
224 | /* complete the job back to userspace */ | ||
225 | job->job_done(job); | ||
226 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
227 | return; | ||
228 | } | ||
229 | |||
43 | /** | 230 | /** |
44 | * lpfc_bsg_rport_ct - send a CT command from a bsg request | 231 | * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request |
45 | * @job: fc_bsg_job to handle | 232 | * @job: fc_bsg_job to handle |
46 | */ | 233 | **/ |
47 | static int | 234 | static int |
48 | lpfc_bsg_rport_ct(struct fc_bsg_job *job) | 235 | lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) |
49 | { | 236 | { |
50 | struct Scsi_Host *shost = job->shost; | ||
51 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | 237 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; |
52 | struct lpfc_hba *phba = vport->phba; | 238 | struct lpfc_hba *phba = vport->phba; |
53 | struct lpfc_rport_data *rdata = job->rport->dd_data; | 239 | struct lpfc_rport_data *rdata = job->rport->dd_data; |
@@ -64,57 +250,60 @@ lpfc_bsg_rport_ct(struct fc_bsg_job *job) | |||
64 | struct scatterlist *sgel = NULL; | 250 | struct scatterlist *sgel = NULL; |
65 | int numbde; | 251 | int numbde; |
66 | dma_addr_t busaddr; | 252 | dma_addr_t busaddr; |
253 | struct bsg_job_data *dd_data; | ||
254 | uint32_t creg_val; | ||
67 | int rc = 0; | 255 | int rc = 0; |
68 | 256 | ||
69 | /* in case no data is transferred */ | 257 | /* in case no data is transferred */ |
70 | job->reply->reply_payload_rcv_len = 0; | 258 | job->reply->reply_payload_rcv_len = 0; |
71 | 259 | ||
260 | /* allocate our bsg tracking structure */ | ||
261 | dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); | ||
262 | if (!dd_data) { | ||
263 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
264 | "2733 Failed allocation of dd_data\n"); | ||
265 | rc = -ENOMEM; | ||
266 | goto no_dd_data; | ||
267 | } | ||
268 | |||
72 | if (!lpfc_nlp_get(ndlp)) { | 269 | if (!lpfc_nlp_get(ndlp)) { |
73 | job->reply->result = -ENODEV; | 270 | rc = -ENODEV; |
74 | return 0; | 271 | goto no_ndlp; |
272 | } | ||
273 | |||
274 | bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
275 | if (!bmp) { | ||
276 | rc = -ENOMEM; | ||
277 | goto free_ndlp; | ||
75 | } | 278 | } |
76 | 279 | ||
77 | if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { | 280 | if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { |
78 | rc = -ENODEV; | 281 | rc = -ENODEV; |
79 | goto free_ndlp_exit; | 282 | goto free_bmp; |
80 | } | 283 | } |
81 | 284 | ||
82 | spin_lock_irq(shost->host_lock); | ||
83 | cmdiocbq = lpfc_sli_get_iocbq(phba); | 285 | cmdiocbq = lpfc_sli_get_iocbq(phba); |
84 | if (!cmdiocbq) { | 286 | if (!cmdiocbq) { |
85 | rc = -ENOMEM; | 287 | rc = -ENOMEM; |
86 | spin_unlock_irq(shost->host_lock); | 288 | goto free_bmp; |
87 | goto free_ndlp_exit; | ||
88 | } | 289 | } |
89 | cmd = &cmdiocbq->iocb; | ||
90 | 290 | ||
291 | cmd = &cmdiocbq->iocb; | ||
91 | rspiocbq = lpfc_sli_get_iocbq(phba); | 292 | rspiocbq = lpfc_sli_get_iocbq(phba); |
92 | if (!rspiocbq) { | 293 | if (!rspiocbq) { |
93 | rc = -ENOMEM; | 294 | rc = -ENOMEM; |
94 | goto free_cmdiocbq; | 295 | goto free_cmdiocbq; |
95 | } | 296 | } |
96 | spin_unlock_irq(shost->host_lock); | ||
97 | 297 | ||
98 | rsp = &rspiocbq->iocb; | 298 | rsp = &rspiocbq->iocb; |
99 | |||
100 | bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
101 | if (!bmp) { | ||
102 | rc = -ENOMEM; | ||
103 | spin_lock_irq(shost->host_lock); | ||
104 | goto free_rspiocbq; | ||
105 | } | ||
106 | |||
107 | spin_lock_irq(shost->host_lock); | ||
108 | bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); | 299 | bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); |
109 | if (!bmp->virt) { | 300 | if (!bmp->virt) { |
110 | rc = -ENOMEM; | 301 | rc = -ENOMEM; |
111 | goto free_bmp; | 302 | goto free_rspiocbq; |
112 | } | 303 | } |
113 | spin_unlock_irq(shost->host_lock); | ||
114 | 304 | ||
115 | INIT_LIST_HEAD(&bmp->list); | 305 | INIT_LIST_HEAD(&bmp->list); |
116 | bpl = (struct ulp_bde64 *) bmp->virt; | 306 | bpl = (struct ulp_bde64 *) bmp->virt; |
117 | |||
118 | request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, | 307 | request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, |
119 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | 308 | job->request_payload.sg_cnt, DMA_TO_DEVICE); |
120 | for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { | 309 | for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { |
@@ -148,86 +337,160 @@ lpfc_bsg_rport_ct(struct fc_bsg_job *job) | |||
148 | cmd->ulpCommand = CMD_GEN_REQUEST64_CR; | 337 | cmd->ulpCommand = CMD_GEN_REQUEST64_CR; |
149 | cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); | 338 | cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); |
150 | cmd->un.genreq64.w5.hcsw.Dfctl = 0; | 339 | cmd->un.genreq64.w5.hcsw.Dfctl = 0; |
151 | cmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL; | 340 | cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; |
152 | cmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; | 341 | cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; |
153 | cmd->ulpBdeCount = 1; | 342 | cmd->ulpBdeCount = 1; |
154 | cmd->ulpLe = 1; | 343 | cmd->ulpLe = 1; |
155 | cmd->ulpClass = CLASS3; | 344 | cmd->ulpClass = CLASS3; |
156 | cmd->ulpContext = ndlp->nlp_rpi; | 345 | cmd->ulpContext = ndlp->nlp_rpi; |
157 | cmd->ulpOwner = OWN_CHIP; | 346 | cmd->ulpOwner = OWN_CHIP; |
158 | cmdiocbq->vport = phba->pport; | 347 | cmdiocbq->vport = phba->pport; |
159 | cmdiocbq->context1 = NULL; | 348 | cmdiocbq->context3 = bmp; |
160 | cmdiocbq->context2 = NULL; | ||
161 | cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; | 349 | cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; |
162 | |||
163 | timeout = phba->fc_ratov * 2; | 350 | timeout = phba->fc_ratov * 2; |
164 | job->dd_data = cmdiocbq; | 351 | cmd->ulpTimeout = timeout; |
165 | 352 | ||
166 | rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, | 353 | cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; |
167 | timeout + LPFC_DRVR_TIMEOUT); | 354 | cmdiocbq->context1 = dd_data; |
168 | 355 | cmdiocbq->context2 = rspiocbq; | |
169 | if (rc != IOCB_TIMEDOUT) { | 356 | dd_data->type = TYPE_IOCB; |
170 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, | 357 | dd_data->context_un.iocb.cmdiocbq = cmdiocbq; |
171 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | 358 | dd_data->context_un.iocb.rspiocbq = rspiocbq; |
172 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, | 359 | dd_data->context_un.iocb.set_job = job; |
173 | job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | 360 | dd_data->context_un.iocb.bmp = bmp; |
361 | dd_data->context_un.iocb.ndlp = ndlp; | ||
362 | |||
363 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | ||
364 | creg_val = readl(phba->HCregaddr); | ||
365 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | ||
366 | writel(creg_val, phba->HCregaddr); | ||
367 | readl(phba->HCregaddr); /* flush */ | ||
174 | } | 368 | } |
175 | 369 | ||
176 | if (rc == IOCB_TIMEDOUT) { | 370 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); |
177 | lpfc_sli_release_iocbq(phba, rspiocbq); | ||
178 | rc = -EACCES; | ||
179 | goto free_ndlp_exit; | ||
180 | } | ||
181 | 371 | ||
182 | if (rc != IOCB_SUCCESS) { | 372 | if (rc == IOCB_SUCCESS) |
183 | rc = -EACCES; | 373 | return 0; /* done for now */ |
184 | goto free_outdmp; | ||
185 | } | ||
186 | 374 | ||
187 | if (rsp->ulpStatus) { | 375 | /* iocb failed so cleanup */ |
188 | if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { | 376 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, |
189 | switch (rsp->un.ulpWord[4] & 0xff) { | 377 | job->request_payload.sg_cnt, DMA_TO_DEVICE); |
190 | case IOERR_SEQUENCE_TIMEOUT: | 378 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, |
191 | rc = -ETIMEDOUT; | 379 | job->reply_payload.sg_cnt, DMA_FROM_DEVICE); |
192 | break; | ||
193 | case IOERR_INVALID_RPI: | ||
194 | rc = -EFAULT; | ||
195 | break; | ||
196 | default: | ||
197 | rc = -EACCES; | ||
198 | break; | ||
199 | } | ||
200 | goto free_outdmp; | ||
201 | } | ||
202 | } else | ||
203 | job->reply->reply_payload_rcv_len = | ||
204 | rsp->un.genreq64.bdl.bdeSize; | ||
205 | 380 | ||
206 | free_outdmp: | ||
207 | spin_lock_irq(shost->host_lock); | ||
208 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); | 381 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); |
209 | free_bmp: | 382 | |
210 | kfree(bmp); | ||
211 | free_rspiocbq: | 383 | free_rspiocbq: |
212 | lpfc_sli_release_iocbq(phba, rspiocbq); | 384 | lpfc_sli_release_iocbq(phba, rspiocbq); |
213 | free_cmdiocbq: | 385 | free_cmdiocbq: |
214 | lpfc_sli_release_iocbq(phba, cmdiocbq); | 386 | lpfc_sli_release_iocbq(phba, cmdiocbq); |
215 | spin_unlock_irq(shost->host_lock); | 387 | free_bmp: |
216 | free_ndlp_exit: | 388 | kfree(bmp); |
389 | free_ndlp: | ||
217 | lpfc_nlp_put(ndlp); | 390 | lpfc_nlp_put(ndlp); |
391 | no_ndlp: | ||
392 | kfree(dd_data); | ||
393 | no_dd_data: | ||
394 | /* make error code available to userspace */ | ||
395 | job->reply->result = rc; | ||
396 | job->dd_data = NULL; | ||
397 | return rc; | ||
398 | } | ||
218 | 399 | ||
400 | /** | ||
401 | * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler | ||
402 | * @phba: Pointer to HBA context object. | ||
403 | * @cmdiocbq: Pointer to command iocb. | ||
404 | * @rspiocbq: Pointer to response iocb. | ||
405 | * | ||
406 | * This function is the completion handler for iocbs issued using | ||
407 | * lpfc_bsg_rport_els_cmp function. This function is called by the | ||
408 | * ring event handler function without any lock held. This function | ||
409 | * can be called from both worker thread context and interrupt | ||
410 | * context. This function also can be called from other thread which | ||
411 | * cleans up the SLI layer objects. | ||
412 | * This function copies the contents of the response iocb to the | ||
413 | * response iocb memory object provided by the caller of | ||
414 | * lpfc_sli_issue_iocb_wait and then wakes up the thread which | ||
415 | * sleeps for the iocb completion. | ||
416 | **/ | ||
417 | static void | ||
418 | lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, | ||
419 | struct lpfc_iocbq *cmdiocbq, | ||
420 | struct lpfc_iocbq *rspiocbq) | ||
421 | { | ||
422 | struct bsg_job_data *dd_data; | ||
423 | struct fc_bsg_job *job; | ||
424 | IOCB_t *rsp; | ||
425 | struct lpfc_nodelist *ndlp; | ||
426 | struct lpfc_dmabuf *pbuflist = NULL; | ||
427 | struct fc_bsg_ctels_reply *els_reply; | ||
428 | uint8_t *rjt_data; | ||
429 | unsigned long flags; | ||
430 | int rc = 0; | ||
431 | |||
432 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
433 | dd_data = cmdiocbq->context1; | ||
434 | /* normal completion and timeout crossed paths, already done */ | ||
435 | if (!dd_data) { | ||
436 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
437 | return; | ||
438 | } | ||
439 | |||
440 | cmdiocbq->iocb_flag |= LPFC_IO_WAKE; | ||
441 | if (cmdiocbq->context2 && rspiocbq) | ||
442 | memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, | ||
443 | &rspiocbq->iocb, sizeof(IOCB_t)); | ||
444 | |||
445 | job = dd_data->context_un.iocb.set_job; | ||
446 | cmdiocbq = dd_data->context_un.iocb.cmdiocbq; | ||
447 | rspiocbq = dd_data->context_un.iocb.rspiocbq; | ||
448 | rsp = &rspiocbq->iocb; | ||
449 | ndlp = dd_data->context_un.iocb.ndlp; | ||
450 | |||
451 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, | ||
452 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
453 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, | ||
454 | job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
455 | |||
456 | if (job->reply->result == -EAGAIN) | ||
457 | rc = -EAGAIN; | ||
458 | else if (rsp->ulpStatus == IOSTAT_SUCCESS) | ||
459 | job->reply->reply_payload_rcv_len = | ||
460 | rsp->un.elsreq64.bdl.bdeSize; | ||
461 | else if (rsp->ulpStatus == IOSTAT_LS_RJT) { | ||
462 | job->reply->reply_payload_rcv_len = | ||
463 | sizeof(struct fc_bsg_ctels_reply); | ||
464 | /* LS_RJT data returned in word 4 */ | ||
465 | rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; | ||
466 | els_reply = &job->reply->reply_data.ctels_reply; | ||
467 | els_reply->status = FC_CTELS_STATUS_REJECT; | ||
468 | els_reply->rjt_data.action = rjt_data[3]; | ||
469 | els_reply->rjt_data.reason_code = rjt_data[2]; | ||
470 | els_reply->rjt_data.reason_explanation = rjt_data[1]; | ||
471 | els_reply->rjt_data.vendor_unique = rjt_data[0]; | ||
472 | } else | ||
473 | rc = -EIO; | ||
474 | |||
475 | pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3; | ||
476 | lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys); | ||
477 | lpfc_sli_release_iocbq(phba, rspiocbq); | ||
478 | lpfc_sli_release_iocbq(phba, cmdiocbq); | ||
479 | lpfc_nlp_put(ndlp); | ||
480 | kfree(dd_data); | ||
219 | /* make error code available to userspace */ | 481 | /* make error code available to userspace */ |
220 | job->reply->result = rc; | 482 | job->reply->result = rc; |
483 | job->dd_data = NULL; | ||
221 | /* complete the job back to userspace */ | 484 | /* complete the job back to userspace */ |
222 | job->job_done(job); | 485 | job->job_done(job); |
223 | 486 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | |
224 | return 0; | 487 | return; |
225 | } | 488 | } |
226 | 489 | ||
227 | /** | 490 | /** |
228 | * lpfc_bsg_rport_els - send an ELS command from a bsg request | 491 | * lpfc_bsg_rport_els - send an ELS command from a bsg request |
229 | * @job: fc_bsg_job to handle | 492 | * @job: fc_bsg_job to handle |
230 | */ | 493 | **/ |
231 | static int | 494 | static int |
232 | lpfc_bsg_rport_els(struct fc_bsg_job *job) | 495 | lpfc_bsg_rport_els(struct fc_bsg_job *job) |
233 | { | 496 | { |
@@ -235,7 +498,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
235 | struct lpfc_hba *phba = vport->phba; | 498 | struct lpfc_hba *phba = vport->phba; |
236 | struct lpfc_rport_data *rdata = job->rport->dd_data; | 499 | struct lpfc_rport_data *rdata = job->rport->dd_data; |
237 | struct lpfc_nodelist *ndlp = rdata->pnode; | 500 | struct lpfc_nodelist *ndlp = rdata->pnode; |
238 | |||
239 | uint32_t elscmd; | 501 | uint32_t elscmd; |
240 | uint32_t cmdsize; | 502 | uint32_t cmdsize; |
241 | uint32_t rspsize; | 503 | uint32_t rspsize; |
@@ -247,20 +509,30 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
247 | struct lpfc_dmabuf *prsp; | 509 | struct lpfc_dmabuf *prsp; |
248 | struct lpfc_dmabuf *pbuflist = NULL; | 510 | struct lpfc_dmabuf *pbuflist = NULL; |
249 | struct ulp_bde64 *bpl; | 511 | struct ulp_bde64 *bpl; |
250 | int iocb_status; | ||
251 | int request_nseg; | 512 | int request_nseg; |
252 | int reply_nseg; | 513 | int reply_nseg; |
253 | struct scatterlist *sgel = NULL; | 514 | struct scatterlist *sgel = NULL; |
254 | int numbde; | 515 | int numbde; |
255 | dma_addr_t busaddr; | 516 | dma_addr_t busaddr; |
517 | struct bsg_job_data *dd_data; | ||
518 | uint32_t creg_val; | ||
256 | int rc = 0; | 519 | int rc = 0; |
257 | 520 | ||
258 | /* in case no data is transferred */ | 521 | /* in case no data is transferred */ |
259 | job->reply->reply_payload_rcv_len = 0; | 522 | job->reply->reply_payload_rcv_len = 0; |
260 | 523 | ||
524 | /* allocate our bsg tracking structure */ | ||
525 | dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); | ||
526 | if (!dd_data) { | ||
527 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
528 | "2735 Failed allocation of dd_data\n"); | ||
529 | rc = -ENOMEM; | ||
530 | goto no_dd_data; | ||
531 | } | ||
532 | |||
261 | if (!lpfc_nlp_get(ndlp)) { | 533 | if (!lpfc_nlp_get(ndlp)) { |
262 | rc = -ENODEV; | 534 | rc = -ENODEV; |
263 | goto out; | 535 | goto free_dd_data; |
264 | } | 536 | } |
265 | 537 | ||
266 | elscmd = job->request->rqst_data.r_els.els_code; | 538 | elscmd = job->request->rqst_data.r_els.els_code; |
@@ -270,24 +542,24 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
270 | if (!rspiocbq) { | 542 | if (!rspiocbq) { |
271 | lpfc_nlp_put(ndlp); | 543 | lpfc_nlp_put(ndlp); |
272 | rc = -ENOMEM; | 544 | rc = -ENOMEM; |
273 | goto out; | 545 | goto free_dd_data; |
274 | } | 546 | } |
275 | 547 | ||
276 | rsp = &rspiocbq->iocb; | 548 | rsp = &rspiocbq->iocb; |
277 | rpi = ndlp->nlp_rpi; | 549 | rpi = ndlp->nlp_rpi; |
278 | 550 | ||
279 | cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, ndlp, | 551 | cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, |
280 | ndlp->nlp_DID, elscmd); | 552 | ndlp->nlp_DID, elscmd); |
281 | |||
282 | if (!cmdiocbq) { | 553 | if (!cmdiocbq) { |
283 | lpfc_sli_release_iocbq(phba, rspiocbq); | 554 | rc = -EIO; |
284 | return -EIO; | 555 | goto free_rspiocbq; |
285 | } | 556 | } |
286 | 557 | ||
287 | job->dd_data = cmdiocbq; | 558 | /* prep els iocb set context1 to the ndlp, context2 to the command |
559 | * dmabuf, context3 holds the data dmabuf | ||
560 | */ | ||
288 | pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2; | 561 | pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2; |
289 | prsp = (struct lpfc_dmabuf *) pcmd->list.next; | 562 | prsp = (struct lpfc_dmabuf *) pcmd->list.next; |
290 | |||
291 | lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); | 563 | lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); |
292 | kfree(pcmd); | 564 | kfree(pcmd); |
293 | lpfc_mbuf_free(phba, prsp->virt, prsp->phys); | 565 | lpfc_mbuf_free(phba, prsp->virt, prsp->phys); |
@@ -299,7 +571,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
299 | 571 | ||
300 | request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, | 572 | request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, |
301 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | 573 | job->request_payload.sg_cnt, DMA_TO_DEVICE); |
302 | |||
303 | for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { | 574 | for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { |
304 | busaddr = sg_dma_address(sgel); | 575 | busaddr = sg_dma_address(sgel); |
305 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; | 576 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
@@ -321,7 +592,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
321 | bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); | 592 | bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); |
322 | bpl++; | 593 | bpl++; |
323 | } | 594 | } |
324 | |||
325 | cmdiocbq->iocb.un.elsreq64.bdl.bdeSize = | 595 | cmdiocbq->iocb.un.elsreq64.bdl.bdeSize = |
326 | (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); | 596 | (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); |
327 | cmdiocbq->iocb.ulpContext = rpi; | 597 | cmdiocbq->iocb.ulpContext = rpi; |
@@ -329,102 +599,62 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
329 | cmdiocbq->context1 = NULL; | 599 | cmdiocbq->context1 = NULL; |
330 | cmdiocbq->context2 = NULL; | 600 | cmdiocbq->context2 = NULL; |
331 | 601 | ||
332 | iocb_status = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, | 602 | cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp; |
333 | rspiocbq, (phba->fc_ratov * 2) | 603 | cmdiocbq->context1 = dd_data; |
334 | + LPFC_DRVR_TIMEOUT); | 604 | cmdiocbq->context2 = rspiocbq; |
335 | 605 | dd_data->type = TYPE_IOCB; | |
336 | /* release the new ndlp once the iocb completes */ | 606 | dd_data->context_un.iocb.cmdiocbq = cmdiocbq; |
337 | lpfc_nlp_put(ndlp); | 607 | dd_data->context_un.iocb.rspiocbq = rspiocbq; |
338 | if (iocb_status != IOCB_TIMEDOUT) { | 608 | dd_data->context_un.iocb.set_job = job; |
339 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, | 609 | dd_data->context_un.iocb.bmp = NULL;; |
340 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | 610 | dd_data->context_un.iocb.ndlp = ndlp; |
341 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, | 611 | |
342 | job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | 612 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
613 | creg_val = readl(phba->HCregaddr); | ||
614 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | ||
615 | writel(creg_val, phba->HCregaddr); | ||
616 | readl(phba->HCregaddr); /* flush */ | ||
343 | } | 617 | } |
618 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); | ||
619 | lpfc_nlp_put(ndlp); | ||
620 | if (rc == IOCB_SUCCESS) | ||
621 | return 0; /* done for now */ | ||
344 | 622 | ||
345 | if (iocb_status == IOCB_SUCCESS) { | 623 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, |
346 | if (rsp->ulpStatus == IOSTAT_SUCCESS) { | 624 | job->request_payload.sg_cnt, DMA_TO_DEVICE); |
347 | job->reply->reply_payload_rcv_len = | 625 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, |
348 | rsp->un.elsreq64.bdl.bdeSize; | 626 | job->reply_payload.sg_cnt, DMA_FROM_DEVICE); |
349 | rc = 0; | ||
350 | } else if (rsp->ulpStatus == IOSTAT_LS_RJT) { | ||
351 | struct fc_bsg_ctels_reply *els_reply; | ||
352 | /* LS_RJT data returned in word 4 */ | ||
353 | uint8_t *rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; | ||
354 | |||
355 | els_reply = &job->reply->reply_data.ctels_reply; | ||
356 | job->reply->result = 0; | ||
357 | els_reply->status = FC_CTELS_STATUS_REJECT; | ||
358 | els_reply->rjt_data.action = rjt_data[0]; | ||
359 | els_reply->rjt_data.reason_code = rjt_data[1]; | ||
360 | els_reply->rjt_data.reason_explanation = rjt_data[2]; | ||
361 | els_reply->rjt_data.vendor_unique = rjt_data[3]; | ||
362 | } else | ||
363 | rc = -EIO; | ||
364 | } else | ||
365 | rc = -EIO; | ||
366 | 627 | ||
367 | if (iocb_status != IOCB_TIMEDOUT) | 628 | lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys); |
368 | lpfc_els_free_iocb(phba, cmdiocbq); | 629 | |
630 | lpfc_sli_release_iocbq(phba, cmdiocbq); | ||
369 | 631 | ||
632 | free_rspiocbq: | ||
370 | lpfc_sli_release_iocbq(phba, rspiocbq); | 633 | lpfc_sli_release_iocbq(phba, rspiocbq); |
371 | 634 | ||
372 | out: | 635 | free_dd_data: |
636 | kfree(dd_data); | ||
637 | |||
638 | no_dd_data: | ||
373 | /* make error code available to userspace */ | 639 | /* make error code available to userspace */ |
374 | job->reply->result = rc; | 640 | job->reply->result = rc; |
375 | /* complete the job back to userspace */ | 641 | job->dd_data = NULL; |
376 | job->job_done(job); | 642 | return rc; |
377 | |||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | struct lpfc_ct_event { | ||
382 | struct list_head node; | ||
383 | int ref; | ||
384 | wait_queue_head_t wq; | ||
385 | |||
386 | /* Event type and waiter identifiers */ | ||
387 | uint32_t type_mask; | ||
388 | uint32_t req_id; | ||
389 | uint32_t reg_id; | ||
390 | |||
391 | /* next two flags are here for the auto-delete logic */ | ||
392 | unsigned long wait_time_stamp; | ||
393 | int waiting; | ||
394 | |||
395 | /* seen and not seen events */ | ||
396 | struct list_head events_to_get; | ||
397 | struct list_head events_to_see; | ||
398 | }; | ||
399 | |||
400 | struct event_data { | ||
401 | struct list_head node; | ||
402 | uint32_t type; | ||
403 | uint32_t immed_dat; | ||
404 | void *data; | ||
405 | uint32_t len; | ||
406 | }; | ||
407 | |||
408 | static struct lpfc_ct_event * | ||
409 | lpfc_ct_event_new(int ev_reg_id, uint32_t ev_req_id) | ||
410 | { | ||
411 | struct lpfc_ct_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL); | ||
412 | if (!evt) | ||
413 | return NULL; | ||
414 | |||
415 | INIT_LIST_HEAD(&evt->events_to_get); | ||
416 | INIT_LIST_HEAD(&evt->events_to_see); | ||
417 | evt->req_id = ev_req_id; | ||
418 | evt->reg_id = ev_reg_id; | ||
419 | evt->wait_time_stamp = jiffies; | ||
420 | init_waitqueue_head(&evt->wq); | ||
421 | |||
422 | return evt; | ||
423 | } | 643 | } |
424 | 644 | ||
645 | /** | ||
646 | * lpfc_bsg_event_free - frees an allocated event structure | ||
647 | * @kref: Pointer to a kref. | ||
648 | * | ||
649 | * Called from kref_put. Back cast the kref into an event structure address. | ||
650 | * Free any events to get, delete associated nodes, free any events to see, | ||
651 | * free any data then free the event itself. | ||
652 | **/ | ||
425 | static void | 653 | static void |
426 | lpfc_ct_event_free(struct lpfc_ct_event *evt) | 654 | lpfc_bsg_event_free(struct kref *kref) |
427 | { | 655 | { |
656 | struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event, | ||
657 | kref); | ||
428 | struct event_data *ed; | 658 | struct event_data *ed; |
429 | 659 | ||
430 | list_del(&evt->node); | 660 | list_del(&evt->node); |
@@ -446,25 +676,82 @@ lpfc_ct_event_free(struct lpfc_ct_event *evt) | |||
446 | kfree(evt); | 676 | kfree(evt); |
447 | } | 677 | } |
448 | 678 | ||
679 | /** | ||
680 | * lpfc_bsg_event_ref - increments the kref for an event | ||
681 | * @evt: Pointer to an event structure. | ||
682 | **/ | ||
449 | static inline void | 683 | static inline void |
450 | lpfc_ct_event_ref(struct lpfc_ct_event *evt) | 684 | lpfc_bsg_event_ref(struct lpfc_bsg_event *evt) |
451 | { | 685 | { |
452 | evt->ref++; | 686 | kref_get(&evt->kref); |
453 | } | 687 | } |
454 | 688 | ||
689 | /** | ||
690 | * lpfc_bsg_event_unref - Uses kref_put to free an event structure | ||
691 | * @evt: Pointer to an event structure. | ||
692 | **/ | ||
455 | static inline void | 693 | static inline void |
456 | lpfc_ct_event_unref(struct lpfc_ct_event *evt) | 694 | lpfc_bsg_event_unref(struct lpfc_bsg_event *evt) |
457 | { | 695 | { |
458 | if (--evt->ref < 0) | 696 | kref_put(&evt->kref, lpfc_bsg_event_free); |
459 | lpfc_ct_event_free(evt); | ||
460 | } | 697 | } |
461 | 698 | ||
462 | #define SLI_CT_ELX_LOOPBACK 0x10 | 699 | /** |
700 | * lpfc_bsg_event_new - allocate and initialize a event structure | ||
701 | * @ev_mask: Mask of events. | ||
702 | * @ev_reg_id: Event reg id. | ||
703 | * @ev_req_id: Event request id. | ||
704 | **/ | ||
705 | static struct lpfc_bsg_event * | ||
706 | lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id) | ||
707 | { | ||
708 | struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL); | ||
463 | 709 | ||
464 | enum ELX_LOOPBACK_CMD { | 710 | if (!evt) |
465 | ELX_LOOPBACK_XRI_SETUP, | 711 | return NULL; |
466 | ELX_LOOPBACK_DATA, | 712 | |
467 | }; | 713 | INIT_LIST_HEAD(&evt->events_to_get); |
714 | INIT_LIST_HEAD(&evt->events_to_see); | ||
715 | evt->type_mask = ev_mask; | ||
716 | evt->req_id = ev_req_id; | ||
717 | evt->reg_id = ev_reg_id; | ||
718 | evt->wait_time_stamp = jiffies; | ||
719 | init_waitqueue_head(&evt->wq); | ||
720 | kref_init(&evt->kref); | ||
721 | return evt; | ||
722 | } | ||
723 | |||
724 | /** | ||
725 | * diag_cmd_data_free - Frees an lpfc dma buffer extension | ||
726 | * @phba: Pointer to HBA context object. | ||
727 | * @mlist: Pointer to an lpfc dma buffer extension. | ||
728 | **/ | ||
729 | static int | ||
730 | diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist) | ||
731 | { | ||
732 | struct lpfc_dmabufext *mlast; | ||
733 | struct pci_dev *pcidev; | ||
734 | struct list_head head, *curr, *next; | ||
735 | |||
736 | if ((!mlist) || (!lpfc_is_link_up(phba) && | ||
737 | (phba->link_flag & LS_LOOPBACK_MODE))) { | ||
738 | return 0; | ||
739 | } | ||
740 | |||
741 | pcidev = phba->pcidev; | ||
742 | list_add_tail(&head, &mlist->dma.list); | ||
743 | |||
744 | list_for_each_safe(curr, next, &head) { | ||
745 | mlast = list_entry(curr, struct lpfc_dmabufext , dma.list); | ||
746 | if (mlast->dma.virt) | ||
747 | dma_free_coherent(&pcidev->dev, | ||
748 | mlast->size, | ||
749 | mlast->dma.virt, | ||
750 | mlast->dma.phys); | ||
751 | kfree(mlast); | ||
752 | } | ||
753 | return 0; | ||
754 | } | ||
468 | 755 | ||
469 | /** | 756 | /** |
470 | * lpfc_bsg_ct_unsol_event - process an unsolicited CT command | 757 | * lpfc_bsg_ct_unsol_event - process an unsolicited CT command |
@@ -473,9 +760,9 @@ enum ELX_LOOPBACK_CMD { | |||
473 | * @piocbq: | 760 | * @piocbq: |
474 | * | 761 | * |
475 | * This function is called when an unsolicited CT command is received. It | 762 | * This function is called when an unsolicited CT command is received. It |
476 | * forwards the event to any processes registerd to receive CT events. | 763 | * forwards the event to any processes registered to receive CT events. |
477 | */ | 764 | **/ |
478 | void | 765 | int |
479 | lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 766 | lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
480 | struct lpfc_iocbq *piocbq) | 767 | struct lpfc_iocbq *piocbq) |
481 | { | 768 | { |
@@ -483,7 +770,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
483 | uint32_t cmd; | 770 | uint32_t cmd; |
484 | uint32_t len; | 771 | uint32_t len; |
485 | struct lpfc_dmabuf *dmabuf = NULL; | 772 | struct lpfc_dmabuf *dmabuf = NULL; |
486 | struct lpfc_ct_event *evt; | 773 | struct lpfc_bsg_event *evt; |
487 | struct event_data *evt_dat = NULL; | 774 | struct event_data *evt_dat = NULL; |
488 | struct lpfc_iocbq *iocbq; | 775 | struct lpfc_iocbq *iocbq; |
489 | size_t offset = 0; | 776 | size_t offset = 0; |
@@ -495,6 +782,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
495 | struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; | 782 | struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; |
496 | struct lpfc_hbq_entry *hbqe; | 783 | struct lpfc_hbq_entry *hbqe; |
497 | struct lpfc_sli_ct_request *ct_req; | 784 | struct lpfc_sli_ct_request *ct_req; |
785 | struct fc_bsg_job *job = NULL; | ||
786 | unsigned long flags; | ||
787 | int size = 0; | ||
498 | 788 | ||
499 | INIT_LIST_HEAD(&head); | 789 | INIT_LIST_HEAD(&head); |
500 | list_add_tail(&head, &piocbq->list); | 790 | list_add_tail(&head, &piocbq->list); |
@@ -503,6 +793,10 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
503 | piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) | 793 | piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) |
504 | goto error_ct_unsol_exit; | 794 | goto error_ct_unsol_exit; |
505 | 795 | ||
796 | if (phba->link_state == LPFC_HBA_ERROR || | ||
797 | (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) | ||
798 | goto error_ct_unsol_exit; | ||
799 | |||
506 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) | 800 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) |
507 | dmabuf = bdeBuf1; | 801 | dmabuf = bdeBuf1; |
508 | else { | 802 | else { |
@@ -510,7 +804,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
510 | piocbq->iocb.un.cont64[0].addrLow); | 804 | piocbq->iocb.un.cont64[0].addrLow); |
511 | dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); | 805 | dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); |
512 | } | 806 | } |
513 | 807 | if (dmabuf == NULL) | |
808 | goto error_ct_unsol_exit; | ||
514 | ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; | 809 | ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; |
515 | evt_req_id = ct_req->FsType; | 810 | evt_req_id = ct_req->FsType; |
516 | cmd = ct_req->CommandResponse.bits.CmdRsp; | 811 | cmd = ct_req->CommandResponse.bits.CmdRsp; |
@@ -518,24 +813,24 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
518 | if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) | 813 | if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) |
519 | lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); | 814 | lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); |
520 | 815 | ||
521 | mutex_lock(&phba->ct_event_mutex); | 816 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
522 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { | 817 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { |
523 | if (evt->req_id != evt_req_id) | 818 | if (!(evt->type_mask & FC_REG_CT_EVENT) || |
819 | evt->req_id != evt_req_id) | ||
524 | continue; | 820 | continue; |
525 | 821 | ||
526 | lpfc_ct_event_ref(evt); | 822 | lpfc_bsg_event_ref(evt); |
527 | 823 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | |
528 | evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); | 824 | evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); |
529 | if (!evt_dat) { | 825 | if (evt_dat == NULL) { |
530 | lpfc_ct_event_unref(evt); | 826 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
827 | lpfc_bsg_event_unref(evt); | ||
531 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | 828 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, |
532 | "2614 Memory allocation failed for " | 829 | "2614 Memory allocation failed for " |
533 | "CT event\n"); | 830 | "CT event\n"); |
534 | break; | 831 | break; |
535 | } | 832 | } |
536 | 833 | ||
537 | mutex_unlock(&phba->ct_event_mutex); | ||
538 | |||
539 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { | 834 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { |
540 | /* take accumulated byte count from the last iocbq */ | 835 | /* take accumulated byte count from the last iocbq */ |
541 | iocbq = list_entry(head.prev, typeof(*iocbq), list); | 836 | iocbq = list_entry(head.prev, typeof(*iocbq), list); |
@@ -549,25 +844,25 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
549 | } | 844 | } |
550 | 845 | ||
551 | evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); | 846 | evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); |
552 | if (!evt_dat->data) { | 847 | if (evt_dat->data == NULL) { |
553 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | 848 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, |
554 | "2615 Memory allocation failed for " | 849 | "2615 Memory allocation failed for " |
555 | "CT event data, size %d\n", | 850 | "CT event data, size %d\n", |
556 | evt_dat->len); | 851 | evt_dat->len); |
557 | kfree(evt_dat); | 852 | kfree(evt_dat); |
558 | mutex_lock(&phba->ct_event_mutex); | 853 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
559 | lpfc_ct_event_unref(evt); | 854 | lpfc_bsg_event_unref(evt); |
560 | mutex_unlock(&phba->ct_event_mutex); | 855 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
561 | goto error_ct_unsol_exit; | 856 | goto error_ct_unsol_exit; |
562 | } | 857 | } |
563 | 858 | ||
564 | list_for_each_entry(iocbq, &head, list) { | 859 | list_for_each_entry(iocbq, &head, list) { |
860 | size = 0; | ||
565 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { | 861 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { |
566 | bdeBuf1 = iocbq->context2; | 862 | bdeBuf1 = iocbq->context2; |
567 | bdeBuf2 = iocbq->context3; | 863 | bdeBuf2 = iocbq->context3; |
568 | } | 864 | } |
569 | for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { | 865 | for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { |
570 | int size = 0; | ||
571 | if (phba->sli3_options & | 866 | if (phba->sli3_options & |
572 | LPFC_SLI3_HBQ_ENABLED) { | 867 | LPFC_SLI3_HBQ_ENABLED) { |
573 | if (i == 0) { | 868 | if (i == 0) { |
@@ -600,9 +895,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
600 | iocbq); | 895 | iocbq); |
601 | kfree(evt_dat->data); | 896 | kfree(evt_dat->data); |
602 | kfree(evt_dat); | 897 | kfree(evt_dat); |
603 | mutex_lock(&phba->ct_event_mutex); | 898 | spin_lock_irqsave(&phba->ct_ev_lock, |
604 | lpfc_ct_event_unref(evt); | 899 | flags); |
605 | mutex_unlock(&phba->ct_event_mutex); | 900 | lpfc_bsg_event_unref(evt); |
901 | spin_unlock_irqrestore( | ||
902 | &phba->ct_ev_lock, flags); | ||
606 | goto error_ct_unsol_exit; | 903 | goto error_ct_unsol_exit; |
607 | } | 904 | } |
608 | memcpy((char *)(evt_dat->data) + offset, | 905 | memcpy((char *)(evt_dat->data) + offset, |
@@ -615,15 +912,24 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
615 | dmabuf); | 912 | dmabuf); |
616 | } else { | 913 | } else { |
617 | switch (cmd) { | 914 | switch (cmd) { |
915 | case ELX_LOOPBACK_DATA: | ||
916 | diag_cmd_data_free(phba, | ||
917 | (struct lpfc_dmabufext *) | ||
918 | dmabuf); | ||
919 | break; | ||
618 | case ELX_LOOPBACK_XRI_SETUP: | 920 | case ELX_LOOPBACK_XRI_SETUP: |
619 | if (!(phba->sli3_options & | 921 | if ((phba->sli_rev == |
620 | LPFC_SLI3_HBQ_ENABLED)) | 922 | LPFC_SLI_REV2) || |
923 | (phba->sli3_options & | ||
924 | LPFC_SLI3_HBQ_ENABLED | ||
925 | )) { | ||
926 | lpfc_in_buf_free(phba, | ||
927 | dmabuf); | ||
928 | } else { | ||
621 | lpfc_post_buffer(phba, | 929 | lpfc_post_buffer(phba, |
622 | pring, | 930 | pring, |
623 | 1); | 931 | 1); |
624 | else | 932 | } |
625 | lpfc_in_buf_free(phba, | ||
626 | dmabuf); | ||
627 | break; | 933 | break; |
628 | default: | 934 | default: |
629 | if (!(phba->sli3_options & | 935 | if (!(phba->sli3_options & |
@@ -637,7 +943,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
637 | } | 943 | } |
638 | } | 944 | } |
639 | 945 | ||
640 | mutex_lock(&phba->ct_event_mutex); | 946 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
641 | if (phba->sli_rev == LPFC_SLI_REV4) { | 947 | if (phba->sli_rev == LPFC_SLI_REV4) { |
642 | evt_dat->immed_dat = phba->ctx_idx; | 948 | evt_dat->immed_dat = phba->ctx_idx; |
643 | phba->ctx_idx = (phba->ctx_idx + 1) % 64; | 949 | phba->ctx_idx = (phba->ctx_idx + 1) % 64; |
@@ -650,122 +956,144 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
650 | 956 | ||
651 | evt_dat->type = FC_REG_CT_EVENT; | 957 | evt_dat->type = FC_REG_CT_EVENT; |
652 | list_add(&evt_dat->node, &evt->events_to_see); | 958 | list_add(&evt_dat->node, &evt->events_to_see); |
653 | wake_up_interruptible(&evt->wq); | 959 | if (evt_req_id == SLI_CT_ELX_LOOPBACK) { |
654 | lpfc_ct_event_unref(evt); | 960 | wake_up_interruptible(&evt->wq); |
655 | if (evt_req_id == SLI_CT_ELX_LOOPBACK) | 961 | lpfc_bsg_event_unref(evt); |
656 | break; | 962 | break; |
963 | } | ||
964 | |||
965 | list_move(evt->events_to_see.prev, &evt->events_to_get); | ||
966 | lpfc_bsg_event_unref(evt); | ||
967 | |||
968 | job = evt->set_job; | ||
969 | evt->set_job = NULL; | ||
970 | if (job) { | ||
971 | job->reply->reply_payload_rcv_len = size; | ||
972 | /* make error code available to userspace */ | ||
973 | job->reply->result = 0; | ||
974 | job->dd_data = NULL; | ||
975 | /* complete the job back to userspace */ | ||
976 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
977 | job->job_done(job); | ||
978 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
979 | } | ||
657 | } | 980 | } |
658 | mutex_unlock(&phba->ct_event_mutex); | 981 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
659 | 982 | ||
660 | error_ct_unsol_exit: | 983 | error_ct_unsol_exit: |
661 | if (!list_empty(&head)) | 984 | if (!list_empty(&head)) |
662 | list_del(&head); | 985 | list_del(&head); |
663 | 986 | if (evt_req_id == SLI_CT_ELX_LOOPBACK) | |
664 | return; | 987 | return 0; |
988 | return 1; | ||
665 | } | 989 | } |
666 | 990 | ||
667 | /** | 991 | /** |
668 | * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command | 992 | * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command |
669 | * @job: SET_EVENT fc_bsg_job | 993 | * @job: SET_EVENT fc_bsg_job |
670 | */ | 994 | **/ |
671 | static int | 995 | static int |
672 | lpfc_bsg_set_event(struct fc_bsg_job *job) | 996 | lpfc_bsg_hba_set_event(struct fc_bsg_job *job) |
673 | { | 997 | { |
674 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | 998 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; |
675 | struct lpfc_hba *phba = vport->phba; | 999 | struct lpfc_hba *phba = vport->phba; |
676 | struct set_ct_event *event_req; | 1000 | struct set_ct_event *event_req; |
677 | struct lpfc_ct_event *evt; | 1001 | struct lpfc_bsg_event *evt; |
678 | int rc = 0; | 1002 | int rc = 0; |
1003 | struct bsg_job_data *dd_data = NULL; | ||
1004 | uint32_t ev_mask; | ||
1005 | unsigned long flags; | ||
679 | 1006 | ||
680 | if (job->request_len < | 1007 | if (job->request_len < |
681 | sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { | 1008 | sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { |
682 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | 1009 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, |
683 | "2612 Received SET_CT_EVENT below minimum " | 1010 | "2612 Received SET_CT_EVENT below minimum " |
684 | "size\n"); | 1011 | "size\n"); |
685 | return -EINVAL; | 1012 | rc = -EINVAL; |
1013 | goto job_error; | ||
1014 | } | ||
1015 | |||
1016 | dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); | ||
1017 | if (dd_data == NULL) { | ||
1018 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
1019 | "2734 Failed allocation of dd_data\n"); | ||
1020 | rc = -ENOMEM; | ||
1021 | goto job_error; | ||
686 | } | 1022 | } |
687 | 1023 | ||
688 | event_req = (struct set_ct_event *) | 1024 | event_req = (struct set_ct_event *) |
689 | job->request->rqst_data.h_vendor.vendor_cmd; | 1025 | job->request->rqst_data.h_vendor.vendor_cmd; |
690 | 1026 | ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & | |
691 | mutex_lock(&phba->ct_event_mutex); | 1027 | FC_REG_EVENT_MASK); |
1028 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
692 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { | 1029 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { |
693 | if (evt->reg_id == event_req->ev_reg_id) { | 1030 | if (evt->reg_id == event_req->ev_reg_id) { |
694 | lpfc_ct_event_ref(evt); | 1031 | lpfc_bsg_event_ref(evt); |
695 | evt->wait_time_stamp = jiffies; | 1032 | evt->wait_time_stamp = jiffies; |
696 | break; | 1033 | break; |
697 | } | 1034 | } |
698 | } | 1035 | } |
699 | mutex_unlock(&phba->ct_event_mutex); | 1036 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
700 | 1037 | ||
701 | if (&evt->node == &phba->ct_ev_waiters) { | 1038 | if (&evt->node == &phba->ct_ev_waiters) { |
702 | /* no event waiting struct yet - first call */ | 1039 | /* no event waiting struct yet - first call */ |
703 | evt = lpfc_ct_event_new(event_req->ev_reg_id, | 1040 | evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id, |
704 | event_req->ev_req_id); | 1041 | event_req->ev_req_id); |
705 | if (!evt) { | 1042 | if (!evt) { |
706 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | 1043 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, |
707 | "2617 Failed allocation of event " | 1044 | "2617 Failed allocation of event " |
708 | "waiter\n"); | 1045 | "waiter\n"); |
709 | return -ENOMEM; | 1046 | rc = -ENOMEM; |
1047 | goto job_error; | ||
710 | } | 1048 | } |
711 | 1049 | ||
712 | mutex_lock(&phba->ct_event_mutex); | 1050 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
713 | list_add(&evt->node, &phba->ct_ev_waiters); | 1051 | list_add(&evt->node, &phba->ct_ev_waiters); |
714 | lpfc_ct_event_ref(evt); | 1052 | lpfc_bsg_event_ref(evt); |
715 | mutex_unlock(&phba->ct_event_mutex); | 1053 | evt->wait_time_stamp = jiffies; |
1054 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
716 | } | 1055 | } |
717 | 1056 | ||
1057 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
718 | evt->waiting = 1; | 1058 | evt->waiting = 1; |
719 | if (wait_event_interruptible(evt->wq, | 1059 | dd_data->type = TYPE_EVT; |
720 | !list_empty(&evt->events_to_see))) { | 1060 | dd_data->context_un.evt = evt; |
721 | mutex_lock(&phba->ct_event_mutex); | 1061 | evt->set_job = job; /* for unsolicited command */ |
722 | lpfc_ct_event_unref(evt); /* release ref */ | 1062 | job->dd_data = dd_data; /* for fc transport timeout callback*/ |
723 | lpfc_ct_event_unref(evt); /* delete */ | 1063 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
724 | mutex_unlock(&phba->ct_event_mutex); | 1064 | return 0; /* call job done later */ |
725 | rc = -EINTR; | 1065 | |
726 | goto set_event_out; | 1066 | job_error: |
727 | } | 1067 | if (dd_data != NULL) |
728 | 1068 | kfree(dd_data); | |
729 | evt->wait_time_stamp = jiffies; | 1069 | |
730 | evt->waiting = 0; | 1070 | job->dd_data = NULL; |
731 | 1071 | return rc; | |
732 | mutex_lock(&phba->ct_event_mutex); | ||
733 | list_move(evt->events_to_see.prev, &evt->events_to_get); | ||
734 | lpfc_ct_event_unref(evt); /* release ref */ | ||
735 | mutex_unlock(&phba->ct_event_mutex); | ||
736 | |||
737 | set_event_out: | ||
738 | /* set_event carries no reply payload */ | ||
739 | job->reply->reply_payload_rcv_len = 0; | ||
740 | /* make error code available to userspace */ | ||
741 | job->reply->result = rc; | ||
742 | /* complete the job back to userspace */ | ||
743 | job->job_done(job); | ||
744 | |||
745 | return 0; | ||
746 | } | 1072 | } |
747 | 1073 | ||
748 | /** | 1074 | /** |
749 | * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command | 1075 | * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command |
750 | * @job: GET_EVENT fc_bsg_job | 1076 | * @job: GET_EVENT fc_bsg_job |
751 | */ | 1077 | **/ |
752 | static int | 1078 | static int |
753 | lpfc_bsg_get_event(struct fc_bsg_job *job) | 1079 | lpfc_bsg_hba_get_event(struct fc_bsg_job *job) |
754 | { | 1080 | { |
755 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | 1081 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; |
756 | struct lpfc_hba *phba = vport->phba; | 1082 | struct lpfc_hba *phba = vport->phba; |
757 | struct get_ct_event *event_req; | 1083 | struct get_ct_event *event_req; |
758 | struct get_ct_event_reply *event_reply; | 1084 | struct get_ct_event_reply *event_reply; |
759 | struct lpfc_ct_event *evt; | 1085 | struct lpfc_bsg_event *evt; |
760 | struct event_data *evt_dat = NULL; | 1086 | struct event_data *evt_dat = NULL; |
761 | int rc = 0; | 1087 | unsigned long flags; |
1088 | uint32_t rc = 0; | ||
762 | 1089 | ||
763 | if (job->request_len < | 1090 | if (job->request_len < |
764 | sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { | 1091 | sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { |
765 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | 1092 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, |
766 | "2613 Received GET_CT_EVENT request below " | 1093 | "2613 Received GET_CT_EVENT request below " |
767 | "minimum size\n"); | 1094 | "minimum size\n"); |
768 | return -EINVAL; | 1095 | rc = -EINVAL; |
1096 | goto job_error; | ||
769 | } | 1097 | } |
770 | 1098 | ||
771 | event_req = (struct get_ct_event *) | 1099 | event_req = (struct get_ct_event *) |
@@ -773,13 +1101,12 @@ lpfc_bsg_get_event(struct fc_bsg_job *job) | |||
773 | 1101 | ||
774 | event_reply = (struct get_ct_event_reply *) | 1102 | event_reply = (struct get_ct_event_reply *) |
775 | job->reply->reply_data.vendor_reply.vendor_rsp; | 1103 | job->reply->reply_data.vendor_reply.vendor_rsp; |
776 | 1104 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | |
777 | mutex_lock(&phba->ct_event_mutex); | ||
778 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { | 1105 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { |
779 | if (evt->reg_id == event_req->ev_reg_id) { | 1106 | if (evt->reg_id == event_req->ev_reg_id) { |
780 | if (list_empty(&evt->events_to_get)) | 1107 | if (list_empty(&evt->events_to_get)) |
781 | break; | 1108 | break; |
782 | lpfc_ct_event_ref(evt); | 1109 | lpfc_bsg_event_ref(evt); |
783 | evt->wait_time_stamp = jiffies; | 1110 | evt->wait_time_stamp = jiffies; |
784 | evt_dat = list_entry(evt->events_to_get.prev, | 1111 | evt_dat = list_entry(evt->events_to_get.prev, |
785 | struct event_data, node); | 1112 | struct event_data, node); |
@@ -787,84 +1114,1904 @@ lpfc_bsg_get_event(struct fc_bsg_job *job) | |||
787 | break; | 1114 | break; |
788 | } | 1115 | } |
789 | } | 1116 | } |
790 | mutex_unlock(&phba->ct_event_mutex); | 1117 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
791 | 1118 | ||
792 | if (!evt_dat) { | 1119 | /* The app may continue to ask for event data until it gets |
1120 | * an error indicating that there isn't anymore | ||
1121 | */ | ||
1122 | if (evt_dat == NULL) { | ||
793 | job->reply->reply_payload_rcv_len = 0; | 1123 | job->reply->reply_payload_rcv_len = 0; |
794 | rc = -ENOENT; | 1124 | rc = -ENOENT; |
795 | goto error_get_event_exit; | 1125 | goto job_error; |
796 | } | 1126 | } |
797 | 1127 | ||
798 | if (evt_dat->len > job->reply_payload.payload_len) { | 1128 | if (evt_dat->len > job->request_payload.payload_len) { |
799 | evt_dat->len = job->reply_payload.payload_len; | 1129 | evt_dat->len = job->request_payload.payload_len; |
800 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | 1130 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, |
801 | "2618 Truncated event data at %d " | 1131 | "2618 Truncated event data at %d " |
802 | "bytes\n", | 1132 | "bytes\n", |
803 | job->reply_payload.payload_len); | 1133 | job->request_payload.payload_len); |
804 | } | 1134 | } |
805 | 1135 | ||
1136 | event_reply->type = evt_dat->type; | ||
806 | event_reply->immed_data = evt_dat->immed_dat; | 1137 | event_reply->immed_data = evt_dat->immed_dat; |
807 | |||
808 | if (evt_dat->len > 0) | 1138 | if (evt_dat->len > 0) |
809 | job->reply->reply_payload_rcv_len = | 1139 | job->reply->reply_payload_rcv_len = |
810 | sg_copy_from_buffer(job->reply_payload.sg_list, | 1140 | sg_copy_from_buffer(job->request_payload.sg_list, |
811 | job->reply_payload.sg_cnt, | 1141 | job->request_payload.sg_cnt, |
812 | evt_dat->data, evt_dat->len); | 1142 | evt_dat->data, evt_dat->len); |
813 | else | 1143 | else |
814 | job->reply->reply_payload_rcv_len = 0; | 1144 | job->reply->reply_payload_rcv_len = 0; |
815 | rc = 0; | ||
816 | 1145 | ||
817 | if (evt_dat) | 1146 | if (evt_dat) { |
818 | kfree(evt_dat->data); | 1147 | kfree(evt_dat->data); |
819 | kfree(evt_dat); | 1148 | kfree(evt_dat); |
820 | mutex_lock(&phba->ct_event_mutex); | 1149 | } |
821 | lpfc_ct_event_unref(evt); | 1150 | |
822 | mutex_unlock(&phba->ct_event_mutex); | 1151 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
1152 | lpfc_bsg_event_unref(evt); | ||
1153 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
1154 | job->dd_data = NULL; | ||
1155 | job->reply->result = 0; | ||
1156 | job->job_done(job); | ||
1157 | return 0; | ||
1158 | |||
1159 | job_error: | ||
1160 | job->dd_data = NULL; | ||
1161 | job->reply->result = rc; | ||
1162 | return rc; | ||
1163 | } | ||
1164 | |||
1165 | /** | ||
1166 | * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler | ||
1167 | * @phba: Pointer to HBA context object. | ||
1168 | * @cmdiocbq: Pointer to command iocb. | ||
1169 | * @rspiocbq: Pointer to response iocb. | ||
1170 | * | ||
1171 | * This function is the completion handler for iocbs issued using | ||
1172 | * lpfc_issue_ct_rsp_cmp function. This function is called by the | ||
1173 | * ring event handler function without any lock held. This function | ||
1174 | * can be called from both worker thread context and interrupt | ||
1175 | * context. This function also can be called from other thread which | ||
1176 | * cleans up the SLI layer objects. | ||
1177 | * This function copy the contents of the response iocb to the | ||
1178 | * response iocb memory object provided by the caller of | ||
1179 | * lpfc_sli_issue_iocb_wait and then wakes up the thread which | ||
1180 | * sleeps for the iocb completion. | ||
1181 | **/ | ||
1182 | static void | ||
1183 | lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, | ||
1184 | struct lpfc_iocbq *cmdiocbq, | ||
1185 | struct lpfc_iocbq *rspiocbq) | ||
1186 | { | ||
1187 | struct bsg_job_data *dd_data; | ||
1188 | struct fc_bsg_job *job; | ||
1189 | IOCB_t *rsp; | ||
1190 | struct lpfc_dmabuf *bmp; | ||
1191 | struct lpfc_nodelist *ndlp; | ||
1192 | unsigned long flags; | ||
1193 | int rc = 0; | ||
823 | 1194 | ||
824 | error_get_event_exit: | 1195 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
1196 | dd_data = cmdiocbq->context1; | ||
1197 | /* normal completion and timeout crossed paths, already done */ | ||
1198 | if (!dd_data) { | ||
1199 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
1200 | return; | ||
1201 | } | ||
1202 | |||
1203 | job = dd_data->context_un.iocb.set_job; | ||
1204 | bmp = dd_data->context_un.iocb.bmp; | ||
1205 | rsp = &rspiocbq->iocb; | ||
1206 | ndlp = dd_data->context_un.iocb.ndlp; | ||
1207 | |||
1208 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, | ||
1209 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
1210 | |||
1211 | if (rsp->ulpStatus) { | ||
1212 | if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { | ||
1213 | switch (rsp->un.ulpWord[4] & 0xff) { | ||
1214 | case IOERR_SEQUENCE_TIMEOUT: | ||
1215 | rc = -ETIMEDOUT; | ||
1216 | break; | ||
1217 | case IOERR_INVALID_RPI: | ||
1218 | rc = -EFAULT; | ||
1219 | break; | ||
1220 | default: | ||
1221 | rc = -EACCES; | ||
1222 | break; | ||
1223 | } | ||
1224 | } else | ||
1225 | rc = -EACCES; | ||
1226 | } else | ||
1227 | job->reply->reply_payload_rcv_len = | ||
1228 | rsp->un.genreq64.bdl.bdeSize; | ||
1229 | |||
1230 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); | ||
1231 | lpfc_sli_release_iocbq(phba, cmdiocbq); | ||
1232 | lpfc_nlp_put(ndlp); | ||
1233 | kfree(bmp); | ||
1234 | kfree(dd_data); | ||
825 | /* make error code available to userspace */ | 1235 | /* make error code available to userspace */ |
826 | job->reply->result = rc; | 1236 | job->reply->result = rc; |
1237 | job->dd_data = NULL; | ||
827 | /* complete the job back to userspace */ | 1238 | /* complete the job back to userspace */ |
828 | job->job_done(job); | 1239 | job->job_done(job); |
1240 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
1241 | return; | ||
1242 | } | ||
829 | 1243 | ||
1244 | /** | ||
1245 | * lpfc_issue_ct_rsp - issue a ct response | ||
1246 | * @phba: Pointer to HBA context object. | ||
1247 | * @job: Pointer to the job object. | ||
1248 | * @tag: tag index value into the ports context exchange array. | ||
1249 | * @bmp: Pointer to a dma buffer descriptor. | ||
1250 | * @num_entry: Number of enties in the bde. | ||
1251 | **/ | ||
1252 | static int | ||
1253 | lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, | ||
1254 | struct lpfc_dmabuf *bmp, int num_entry) | ||
1255 | { | ||
1256 | IOCB_t *icmd; | ||
1257 | struct lpfc_iocbq *ctiocb = NULL; | ||
1258 | int rc = 0; | ||
1259 | struct lpfc_nodelist *ndlp = NULL; | ||
1260 | struct bsg_job_data *dd_data; | ||
1261 | uint32_t creg_val; | ||
1262 | |||
1263 | /* allocate our bsg tracking structure */ | ||
1264 | dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); | ||
1265 | if (!dd_data) { | ||
1266 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
1267 | "2736 Failed allocation of dd_data\n"); | ||
1268 | rc = -ENOMEM; | ||
1269 | goto no_dd_data; | ||
1270 | } | ||
1271 | |||
1272 | /* Allocate buffer for command iocb */ | ||
1273 | ctiocb = lpfc_sli_get_iocbq(phba); | ||
1274 | if (!ctiocb) { | ||
1275 | rc = ENOMEM; | ||
1276 | goto no_ctiocb; | ||
1277 | } | ||
1278 | |||
1279 | icmd = &ctiocb->iocb; | ||
1280 | icmd->un.xseq64.bdl.ulpIoTag32 = 0; | ||
1281 | icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys); | ||
1282 | icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys); | ||
1283 | icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; | ||
1284 | icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); | ||
1285 | icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); | ||
1286 | icmd->un.xseq64.w5.hcsw.Dfctl = 0; | ||
1287 | icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL; | ||
1288 | icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; | ||
1289 | |||
1290 | /* Fill in rest of iocb */ | ||
1291 | icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; | ||
1292 | icmd->ulpBdeCount = 1; | ||
1293 | icmd->ulpLe = 1; | ||
1294 | icmd->ulpClass = CLASS3; | ||
1295 | if (phba->sli_rev == LPFC_SLI_REV4) { | ||
1296 | /* Do not issue unsol response if oxid not marked as valid */ | ||
1297 | if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) { | ||
1298 | rc = IOCB_ERROR; | ||
1299 | goto issue_ct_rsp_exit; | ||
1300 | } | ||
1301 | icmd->ulpContext = phba->ct_ctx[tag].oxid; | ||
1302 | ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); | ||
1303 | if (!ndlp) { | ||
1304 | lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, | ||
1305 | "2721 ndlp null for oxid %x SID %x\n", | ||
1306 | icmd->ulpContext, | ||
1307 | phba->ct_ctx[tag].SID); | ||
1308 | rc = IOCB_ERROR; | ||
1309 | goto issue_ct_rsp_exit; | ||
1310 | } | ||
1311 | icmd->un.ulpWord[3] = ndlp->nlp_rpi; | ||
1312 | /* The exchange is done, mark the entry as invalid */ | ||
1313 | phba->ct_ctx[tag].flags &= ~UNSOL_VALID; | ||
1314 | } else | ||
1315 | icmd->ulpContext = (ushort) tag; | ||
1316 | |||
1317 | icmd->ulpTimeout = phba->fc_ratov * 2; | ||
1318 | |||
1319 | /* Xmit CT response on exchange <xid> */ | ||
1320 | lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | ||
1321 | "2722 Xmit CT response on exchange x%x Data: x%x x%x\n", | ||
1322 | icmd->ulpContext, icmd->ulpIoTag, phba->link_state); | ||
1323 | |||
1324 | ctiocb->iocb_cmpl = NULL; | ||
1325 | ctiocb->iocb_flag |= LPFC_IO_LIBDFC; | ||
1326 | ctiocb->vport = phba->pport; | ||
1327 | ctiocb->context3 = bmp; | ||
1328 | |||
1329 | ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; | ||
1330 | ctiocb->context1 = dd_data; | ||
1331 | ctiocb->context2 = NULL; | ||
1332 | dd_data->type = TYPE_IOCB; | ||
1333 | dd_data->context_un.iocb.cmdiocbq = ctiocb; | ||
1334 | dd_data->context_un.iocb.rspiocbq = NULL; | ||
1335 | dd_data->context_un.iocb.set_job = job; | ||
1336 | dd_data->context_un.iocb.bmp = bmp; | ||
1337 | dd_data->context_un.iocb.ndlp = ndlp; | ||
1338 | |||
1339 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | ||
1340 | creg_val = readl(phba->HCregaddr); | ||
1341 | creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); | ||
1342 | writel(creg_val, phba->HCregaddr); | ||
1343 | readl(phba->HCregaddr); /* flush */ | ||
1344 | } | ||
1345 | |||
1346 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); | ||
1347 | |||
1348 | if (rc == IOCB_SUCCESS) | ||
1349 | return 0; /* done for now */ | ||
1350 | |||
1351 | issue_ct_rsp_exit: | ||
1352 | lpfc_sli_release_iocbq(phba, ctiocb); | ||
1353 | no_ctiocb: | ||
1354 | kfree(dd_data); | ||
1355 | no_dd_data: | ||
830 | return rc; | 1356 | return rc; |
831 | } | 1357 | } |
832 | 1358 | ||
833 | /** | 1359 | /** |
1360 | * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command | ||
1361 | * @job: SEND_MGMT_RESP fc_bsg_job | ||
1362 | **/ | ||
1363 | static int | ||
1364 | lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job) | ||
1365 | { | ||
1366 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | ||
1367 | struct lpfc_hba *phba = vport->phba; | ||
1368 | struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) | ||
1369 | job->request->rqst_data.h_vendor.vendor_cmd; | ||
1370 | struct ulp_bde64 *bpl; | ||
1371 | struct lpfc_dmabuf *bmp = NULL; | ||
1372 | struct scatterlist *sgel = NULL; | ||
1373 | int request_nseg; | ||
1374 | int numbde; | ||
1375 | dma_addr_t busaddr; | ||
1376 | uint32_t tag = mgmt_resp->tag; | ||
1377 | unsigned long reqbfrcnt = | ||
1378 | (unsigned long)job->request_payload.payload_len; | ||
1379 | int rc = 0; | ||
1380 | |||
1381 | /* in case no data is transferred */ | ||
1382 | job->reply->reply_payload_rcv_len = 0; | ||
1383 | |||
1384 | if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) { | ||
1385 | rc = -ERANGE; | ||
1386 | goto send_mgmt_rsp_exit; | ||
1387 | } | ||
1388 | |||
1389 | bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
1390 | if (!bmp) { | ||
1391 | rc = -ENOMEM; | ||
1392 | goto send_mgmt_rsp_exit; | ||
1393 | } | ||
1394 | |||
1395 | bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); | ||
1396 | if (!bmp->virt) { | ||
1397 | rc = -ENOMEM; | ||
1398 | goto send_mgmt_rsp_free_bmp; | ||
1399 | } | ||
1400 | |||
1401 | INIT_LIST_HEAD(&bmp->list); | ||
1402 | bpl = (struct ulp_bde64 *) bmp->virt; | ||
1403 | request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, | ||
1404 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
1405 | for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { | ||
1406 | busaddr = sg_dma_address(sgel); | ||
1407 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; | ||
1408 | bpl->tus.f.bdeSize = sg_dma_len(sgel); | ||
1409 | bpl->tus.w = cpu_to_le32(bpl->tus.w); | ||
1410 | bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); | ||
1411 | bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); | ||
1412 | bpl++; | ||
1413 | } | ||
1414 | |||
1415 | rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg); | ||
1416 | |||
1417 | if (rc == IOCB_SUCCESS) | ||
1418 | return 0; /* done for now */ | ||
1419 | |||
1420 | /* TBD need to handle a timeout */ | ||
1421 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, | ||
1422 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
1423 | rc = -EACCES; | ||
1424 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); | ||
1425 | |||
1426 | send_mgmt_rsp_free_bmp: | ||
1427 | kfree(bmp); | ||
1428 | send_mgmt_rsp_exit: | ||
1429 | /* make error code available to userspace */ | ||
1430 | job->reply->result = rc; | ||
1431 | job->dd_data = NULL; | ||
1432 | return rc; | ||
1433 | } | ||
1434 | |||
1435 | /** | ||
1436 | * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command | ||
1437 | * @job: LPFC_BSG_VENDOR_DIAG_MODE | ||
1438 | * | ||
1439 | * This function is responsible for placing a port into diagnostic loopback | ||
1440 | * mode in order to perform a diagnostic loopback test. | ||
1441 | * All new scsi requests are blocked, a small delay is used to allow the | ||
1442 | * scsi requests to complete then the link is brought down. If the link is | ||
1443 | * is placed in loopback mode then scsi requests are again allowed | ||
1444 | * so the scsi mid-layer doesn't give up on the port. | ||
1445 | * All of this is done in-line. | ||
1446 | */ | ||
1447 | static int | ||
1448 | lpfc_bsg_diag_mode(struct fc_bsg_job *job) | ||
1449 | { | ||
1450 | struct Scsi_Host *shost = job->shost; | ||
1451 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | ||
1452 | struct lpfc_hba *phba = vport->phba; | ||
1453 | struct diag_mode_set *loopback_mode; | ||
1454 | struct lpfc_sli *psli = &phba->sli; | ||
1455 | struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING]; | ||
1456 | uint32_t link_flags; | ||
1457 | uint32_t timeout; | ||
1458 | struct lpfc_vport **vports; | ||
1459 | LPFC_MBOXQ_t *pmboxq; | ||
1460 | int mbxstatus; | ||
1461 | int i = 0; | ||
1462 | int rc = 0; | ||
1463 | |||
1464 | /* no data to return just the return code */ | ||
1465 | job->reply->reply_payload_rcv_len = 0; | ||
1466 | |||
1467 | if (job->request_len < | ||
1468 | sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) { | ||
1469 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
1470 | "2738 Received DIAG MODE request below minimum " | ||
1471 | "size\n"); | ||
1472 | rc = -EINVAL; | ||
1473 | goto job_error; | ||
1474 | } | ||
1475 | |||
1476 | loopback_mode = (struct diag_mode_set *) | ||
1477 | job->request->rqst_data.h_vendor.vendor_cmd; | ||
1478 | link_flags = loopback_mode->type; | ||
1479 | timeout = loopback_mode->timeout; | ||
1480 | |||
1481 | if ((phba->link_state == LPFC_HBA_ERROR) || | ||
1482 | (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || | ||
1483 | (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { | ||
1484 | rc = -EACCES; | ||
1485 | goto job_error; | ||
1486 | } | ||
1487 | |||
1488 | pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
1489 | if (!pmboxq) { | ||
1490 | rc = -ENOMEM; | ||
1491 | goto job_error; | ||
1492 | } | ||
1493 | |||
1494 | vports = lpfc_create_vport_work_array(phba); | ||
1495 | if (vports) { | ||
1496 | for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | ||
1497 | shost = lpfc_shost_from_vport(vports[i]); | ||
1498 | scsi_block_requests(shost); | ||
1499 | } | ||
1500 | |||
1501 | lpfc_destroy_vport_work_array(phba, vports); | ||
1502 | } else { | ||
1503 | shost = lpfc_shost_from_vport(phba->pport); | ||
1504 | scsi_block_requests(shost); | ||
1505 | } | ||
1506 | |||
1507 | while (pring->txcmplq_cnt) { | ||
1508 | if (i++ > 500) /* wait up to 5 seconds */ | ||
1509 | break; | ||
1510 | |||
1511 | msleep(10); | ||
1512 | } | ||
1513 | |||
1514 | memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); | ||
1515 | pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; | ||
1516 | pmboxq->u.mb.mbxOwner = OWN_HOST; | ||
1517 | |||
1518 | mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); | ||
1519 | |||
1520 | if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) { | ||
1521 | /* wait for link down before proceeding */ | ||
1522 | i = 0; | ||
1523 | while (phba->link_state != LPFC_LINK_DOWN) { | ||
1524 | if (i++ > timeout) { | ||
1525 | rc = -ETIMEDOUT; | ||
1526 | goto loopback_mode_exit; | ||
1527 | } | ||
1528 | |||
1529 | msleep(10); | ||
1530 | } | ||
1531 | |||
1532 | memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); | ||
1533 | if (link_flags == INTERNAL_LOOP_BACK) | ||
1534 | pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB; | ||
1535 | else | ||
1536 | pmboxq->u.mb.un.varInitLnk.link_flags = | ||
1537 | FLAGS_TOPOLOGY_MODE_LOOP; | ||
1538 | |||
1539 | pmboxq->u.mb.mbxCommand = MBX_INIT_LINK; | ||
1540 | pmboxq->u.mb.mbxOwner = OWN_HOST; | ||
1541 | |||
1542 | mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, | ||
1543 | LPFC_MBOX_TMO); | ||
1544 | |||
1545 | if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) | ||
1546 | rc = -ENODEV; | ||
1547 | else { | ||
1548 | phba->link_flag |= LS_LOOPBACK_MODE; | ||
1549 | /* wait for the link attention interrupt */ | ||
1550 | msleep(100); | ||
1551 | |||
1552 | i = 0; | ||
1553 | while (phba->link_state != LPFC_HBA_READY) { | ||
1554 | if (i++ > timeout) { | ||
1555 | rc = -ETIMEDOUT; | ||
1556 | break; | ||
1557 | } | ||
1558 | |||
1559 | msleep(10); | ||
1560 | } | ||
1561 | } | ||
1562 | |||
1563 | } else | ||
1564 | rc = -ENODEV; | ||
1565 | |||
1566 | loopback_mode_exit: | ||
1567 | vports = lpfc_create_vport_work_array(phba); | ||
1568 | if (vports) { | ||
1569 | for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | ||
1570 | shost = lpfc_shost_from_vport(vports[i]); | ||
1571 | scsi_unblock_requests(shost); | ||
1572 | } | ||
1573 | lpfc_destroy_vport_work_array(phba, vports); | ||
1574 | } else { | ||
1575 | shost = lpfc_shost_from_vport(phba->pport); | ||
1576 | scsi_unblock_requests(shost); | ||
1577 | } | ||
1578 | |||
1579 | /* | ||
1580 | * Let SLI layer release mboxq if mbox command completed after timeout. | ||
1581 | */ | ||
1582 | if (mbxstatus != MBX_TIMEOUT) | ||
1583 | mempool_free(pmboxq, phba->mbox_mem_pool); | ||
1584 | |||
1585 | job_error: | ||
1586 | /* make error code available to userspace */ | ||
1587 | job->reply->result = rc; | ||
1588 | /* complete the job back to userspace if no error */ | ||
1589 | if (rc == 0) | ||
1590 | job->job_done(job); | ||
1591 | return rc; | ||
1592 | } | ||
1593 | |||
1594 | /** | ||
1595 | * lpfcdiag_loop_self_reg - obtains a remote port login id | ||
1596 | * @phba: Pointer to HBA context object | ||
1597 | * @rpi: Pointer to a remote port login id | ||
1598 | * | ||
1599 | * This function obtains a remote port login id so the diag loopback test | ||
1600 | * can send and receive its own unsolicited CT command. | ||
1601 | **/ | ||
1602 | static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi) | ||
1603 | { | ||
1604 | LPFC_MBOXQ_t *mbox; | ||
1605 | struct lpfc_dmabuf *dmabuff; | ||
1606 | int status; | ||
1607 | |||
1608 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
1609 | if (!mbox) | ||
1610 | return ENOMEM; | ||
1611 | |||
1612 | status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, | ||
1613 | (uint8_t *)&phba->pport->fc_sparam, mbox, 0); | ||
1614 | if (status) { | ||
1615 | mempool_free(mbox, phba->mbox_mem_pool); | ||
1616 | return ENOMEM; | ||
1617 | } | ||
1618 | |||
1619 | dmabuff = (struct lpfc_dmabuf *) mbox->context1; | ||
1620 | mbox->context1 = NULL; | ||
1621 | status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); | ||
1622 | |||
1623 | if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { | ||
1624 | lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); | ||
1625 | kfree(dmabuff); | ||
1626 | if (status != MBX_TIMEOUT) | ||
1627 | mempool_free(mbox, phba->mbox_mem_pool); | ||
1628 | return ENODEV; | ||
1629 | } | ||
1630 | |||
1631 | *rpi = mbox->u.mb.un.varWords[0]; | ||
1632 | |||
1633 | lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); | ||
1634 | kfree(dmabuff); | ||
1635 | mempool_free(mbox, phba->mbox_mem_pool); | ||
1636 | return 0; | ||
1637 | } | ||
1638 | |||
1639 | /** | ||
1640 | * lpfcdiag_loop_self_unreg - unregs from the rpi | ||
1641 | * @phba: Pointer to HBA context object | ||
1642 | * @rpi: Remote port login id | ||
1643 | * | ||
1644 | * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg | ||
1645 | **/ | ||
1646 | static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) | ||
1647 | { | ||
1648 | LPFC_MBOXQ_t *mbox; | ||
1649 | int status; | ||
1650 | |||
1651 | /* Allocate mboxq structure */ | ||
1652 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
1653 | if (mbox == NULL) | ||
1654 | return ENOMEM; | ||
1655 | |||
1656 | lpfc_unreg_login(phba, 0, rpi, mbox); | ||
1657 | status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); | ||
1658 | |||
1659 | if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { | ||
1660 | if (status != MBX_TIMEOUT) | ||
1661 | mempool_free(mbox, phba->mbox_mem_pool); | ||
1662 | return EIO; | ||
1663 | } | ||
1664 | |||
1665 | mempool_free(mbox, phba->mbox_mem_pool); | ||
1666 | return 0; | ||
1667 | } | ||
1668 | |||
1669 | /** | ||
1670 | * lpfcdiag_loop_get_xri - obtains the transmit and receive ids | ||
1671 | * @phba: Pointer to HBA context object | ||
1672 | * @rpi: Remote port login id | ||
1673 | * @txxri: Pointer to transmit exchange id | ||
1674 | * @rxxri: Pointer to response exchabge id | ||
1675 | * | ||
1676 | * This function obtains the transmit and receive ids required to send | ||
1677 | * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp | ||
1678 | * flags are used to the unsolicted response handler is able to process | ||
1679 | * the ct command sent on the same port. | ||
1680 | **/ | ||
1681 | static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, | ||
1682 | uint16_t *txxri, uint16_t * rxxri) | ||
1683 | { | ||
1684 | struct lpfc_bsg_event *evt; | ||
1685 | struct lpfc_iocbq *cmdiocbq, *rspiocbq; | ||
1686 | IOCB_t *cmd, *rsp; | ||
1687 | struct lpfc_dmabuf *dmabuf; | ||
1688 | struct ulp_bde64 *bpl = NULL; | ||
1689 | struct lpfc_sli_ct_request *ctreq = NULL; | ||
1690 | int ret_val = 0; | ||
1691 | unsigned long flags; | ||
1692 | |||
1693 | *txxri = 0; | ||
1694 | *rxxri = 0; | ||
1695 | evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, | ||
1696 | SLI_CT_ELX_LOOPBACK); | ||
1697 | if (!evt) | ||
1698 | return ENOMEM; | ||
1699 | |||
1700 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
1701 | list_add(&evt->node, &phba->ct_ev_waiters); | ||
1702 | lpfc_bsg_event_ref(evt); | ||
1703 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
1704 | |||
1705 | cmdiocbq = lpfc_sli_get_iocbq(phba); | ||
1706 | rspiocbq = lpfc_sli_get_iocbq(phba); | ||
1707 | |||
1708 | dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
1709 | if (dmabuf) { | ||
1710 | dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys); | ||
1711 | INIT_LIST_HEAD(&dmabuf->list); | ||
1712 | bpl = (struct ulp_bde64 *) dmabuf->virt; | ||
1713 | memset(bpl, 0, sizeof(*bpl)); | ||
1714 | ctreq = (struct lpfc_sli_ct_request *)(bpl + 1); | ||
1715 | bpl->addrHigh = | ||
1716 | le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl))); | ||
1717 | bpl->addrLow = | ||
1718 | le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl))); | ||
1719 | bpl->tus.f.bdeFlags = 0; | ||
1720 | bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ; | ||
1721 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | ||
1722 | } | ||
1723 | |||
1724 | if (cmdiocbq == NULL || rspiocbq == NULL || | ||
1725 | dmabuf == NULL || bpl == NULL || ctreq == NULL) { | ||
1726 | ret_val = ENOMEM; | ||
1727 | goto err_get_xri_exit; | ||
1728 | } | ||
1729 | |||
1730 | cmd = &cmdiocbq->iocb; | ||
1731 | rsp = &rspiocbq->iocb; | ||
1732 | |||
1733 | memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); | ||
1734 | |||
1735 | ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; | ||
1736 | ctreq->RevisionId.bits.InId = 0; | ||
1737 | ctreq->FsType = SLI_CT_ELX_LOOPBACK; | ||
1738 | ctreq->FsSubType = 0; | ||
1739 | ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP; | ||
1740 | ctreq->CommandResponse.bits.Size = 0; | ||
1741 | |||
1742 | |||
1743 | cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys); | ||
1744 | cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys); | ||
1745 | cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; | ||
1746 | cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl); | ||
1747 | |||
1748 | cmd->un.xseq64.w5.hcsw.Fctl = LA; | ||
1749 | cmd->un.xseq64.w5.hcsw.Dfctl = 0; | ||
1750 | cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; | ||
1751 | cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; | ||
1752 | |||
1753 | cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR; | ||
1754 | cmd->ulpBdeCount = 1; | ||
1755 | cmd->ulpLe = 1; | ||
1756 | cmd->ulpClass = CLASS3; | ||
1757 | cmd->ulpContext = rpi; | ||
1758 | |||
1759 | cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; | ||
1760 | cmdiocbq->vport = phba->pport; | ||
1761 | |||
1762 | ret_val = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, | ||
1763 | rspiocbq, | ||
1764 | (phba->fc_ratov * 2) | ||
1765 | + LPFC_DRVR_TIMEOUT); | ||
1766 | if (ret_val) | ||
1767 | goto err_get_xri_exit; | ||
1768 | |||
1769 | *txxri = rsp->ulpContext; | ||
1770 | |||
1771 | evt->waiting = 1; | ||
1772 | evt->wait_time_stamp = jiffies; | ||
1773 | ret_val = wait_event_interruptible_timeout( | ||
1774 | evt->wq, !list_empty(&evt->events_to_see), | ||
1775 | ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); | ||
1776 | if (list_empty(&evt->events_to_see)) | ||
1777 | ret_val = (ret_val) ? EINTR : ETIMEDOUT; | ||
1778 | else { | ||
1779 | ret_val = IOCB_SUCCESS; | ||
1780 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
1781 | list_move(evt->events_to_see.prev, &evt->events_to_get); | ||
1782 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
1783 | *rxxri = (list_entry(evt->events_to_get.prev, | ||
1784 | typeof(struct event_data), | ||
1785 | node))->immed_dat; | ||
1786 | } | ||
1787 | evt->waiting = 0; | ||
1788 | |||
1789 | err_get_xri_exit: | ||
1790 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
1791 | lpfc_bsg_event_unref(evt); /* release ref */ | ||
1792 | lpfc_bsg_event_unref(evt); /* delete */ | ||
1793 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
1794 | |||
1795 | if (dmabuf) { | ||
1796 | if (dmabuf->virt) | ||
1797 | lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); | ||
1798 | kfree(dmabuf); | ||
1799 | } | ||
1800 | |||
1801 | if (cmdiocbq && (ret_val != IOCB_TIMEDOUT)) | ||
1802 | lpfc_sli_release_iocbq(phba, cmdiocbq); | ||
1803 | if (rspiocbq) | ||
1804 | lpfc_sli_release_iocbq(phba, rspiocbq); | ||
1805 | return ret_val; | ||
1806 | } | ||
1807 | |||
1808 | /** | ||
1809 | * diag_cmd_data_alloc - fills in a bde struct with dma buffers | ||
1810 | * @phba: Pointer to HBA context object | ||
1811 | * @bpl: Pointer to 64 bit bde structure | ||
1812 | * @size: Number of bytes to process | ||
1813 | * @nocopydata: Flag to copy user data into the allocated buffer | ||
1814 | * | ||
1815 | * This function allocates page size buffers and populates an lpfc_dmabufext. | ||
1816 | * If allowed the user data pointed to with indataptr is copied into the kernel | ||
1817 | * memory. The chained list of page size buffers is returned. | ||
1818 | **/ | ||
1819 | static struct lpfc_dmabufext * | ||
1820 | diag_cmd_data_alloc(struct lpfc_hba *phba, | ||
1821 | struct ulp_bde64 *bpl, uint32_t size, | ||
1822 | int nocopydata) | ||
1823 | { | ||
1824 | struct lpfc_dmabufext *mlist = NULL; | ||
1825 | struct lpfc_dmabufext *dmp; | ||
1826 | int cnt, offset = 0, i = 0; | ||
1827 | struct pci_dev *pcidev; | ||
1828 | |||
1829 | pcidev = phba->pcidev; | ||
1830 | |||
1831 | while (size) { | ||
1832 | /* We get chunks of 4K */ | ||
1833 | if (size > BUF_SZ_4K) | ||
1834 | cnt = BUF_SZ_4K; | ||
1835 | else | ||
1836 | cnt = size; | ||
1837 | |||
1838 | /* allocate struct lpfc_dmabufext buffer header */ | ||
1839 | dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL); | ||
1840 | if (!dmp) | ||
1841 | goto out; | ||
1842 | |||
1843 | INIT_LIST_HEAD(&dmp->dma.list); | ||
1844 | |||
1845 | /* Queue it to a linked list */ | ||
1846 | if (mlist) | ||
1847 | list_add_tail(&dmp->dma.list, &mlist->dma.list); | ||
1848 | else | ||
1849 | mlist = dmp; | ||
1850 | |||
1851 | /* allocate buffer */ | ||
1852 | dmp->dma.virt = dma_alloc_coherent(&pcidev->dev, | ||
1853 | cnt, | ||
1854 | &(dmp->dma.phys), | ||
1855 | GFP_KERNEL); | ||
1856 | |||
1857 | if (!dmp->dma.virt) | ||
1858 | goto out; | ||
1859 | |||
1860 | dmp->size = cnt; | ||
1861 | |||
1862 | if (nocopydata) { | ||
1863 | bpl->tus.f.bdeFlags = 0; | ||
1864 | pci_dma_sync_single_for_device(phba->pcidev, | ||
1865 | dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE); | ||
1866 | |||
1867 | } else { | ||
1868 | memset((uint8_t *)dmp->dma.virt, 0, cnt); | ||
1869 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; | ||
1870 | } | ||
1871 | |||
1872 | /* build buffer ptr list for IOCB */ | ||
1873 | bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys)); | ||
1874 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys)); | ||
1875 | bpl->tus.f.bdeSize = (ushort) cnt; | ||
1876 | bpl->tus.w = le32_to_cpu(bpl->tus.w); | ||
1877 | bpl++; | ||
1878 | |||
1879 | i++; | ||
1880 | offset += cnt; | ||
1881 | size -= cnt; | ||
1882 | } | ||
1883 | |||
1884 | mlist->flag = i; | ||
1885 | return mlist; | ||
1886 | out: | ||
1887 | diag_cmd_data_free(phba, mlist); | ||
1888 | return NULL; | ||
1889 | } | ||
1890 | |||
1891 | /** | ||
1892 | * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd | ||
1893 | * @phba: Pointer to HBA context object | ||
1894 | * @rxxri: Receive exchange id | ||
1895 | * @len: Number of data bytes | ||
1896 | * | ||
1897 | * This function allocates and posts a data buffer of sufficient size to recieve | ||
1898 | * an unsolicted CT command. | ||
1899 | **/ | ||
1900 | static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, | ||
1901 | size_t len) | ||
1902 | { | ||
1903 | struct lpfc_sli *psli = &phba->sli; | ||
1904 | struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; | ||
1905 | struct lpfc_iocbq *cmdiocbq; | ||
1906 | IOCB_t *cmd = NULL; | ||
1907 | struct list_head head, *curr, *next; | ||
1908 | struct lpfc_dmabuf *rxbmp; | ||
1909 | struct lpfc_dmabuf *dmp; | ||
1910 | struct lpfc_dmabuf *mp[2] = {NULL, NULL}; | ||
1911 | struct ulp_bde64 *rxbpl = NULL; | ||
1912 | uint32_t num_bde; | ||
1913 | struct lpfc_dmabufext *rxbuffer = NULL; | ||
1914 | int ret_val = 0; | ||
1915 | int i = 0; | ||
1916 | |||
1917 | cmdiocbq = lpfc_sli_get_iocbq(phba); | ||
1918 | rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
1919 | if (rxbmp != NULL) { | ||
1920 | rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); | ||
1921 | INIT_LIST_HEAD(&rxbmp->list); | ||
1922 | rxbpl = (struct ulp_bde64 *) rxbmp->virt; | ||
1923 | rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0); | ||
1924 | } | ||
1925 | |||
1926 | if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) { | ||
1927 | ret_val = ENOMEM; | ||
1928 | goto err_post_rxbufs_exit; | ||
1929 | } | ||
1930 | |||
1931 | /* Queue buffers for the receive exchange */ | ||
1932 | num_bde = (uint32_t)rxbuffer->flag; | ||
1933 | dmp = &rxbuffer->dma; | ||
1934 | |||
1935 | cmd = &cmdiocbq->iocb; | ||
1936 | i = 0; | ||
1937 | |||
1938 | INIT_LIST_HEAD(&head); | ||
1939 | list_add_tail(&head, &dmp->list); | ||
1940 | list_for_each_safe(curr, next, &head) { | ||
1941 | mp[i] = list_entry(curr, struct lpfc_dmabuf, list); | ||
1942 | list_del(curr); | ||
1943 | |||
1944 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { | ||
1945 | mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba); | ||
1946 | cmd->un.quexri64cx.buff.bde.addrHigh = | ||
1947 | putPaddrHigh(mp[i]->phys); | ||
1948 | cmd->un.quexri64cx.buff.bde.addrLow = | ||
1949 | putPaddrLow(mp[i]->phys); | ||
1950 | cmd->un.quexri64cx.buff.bde.tus.f.bdeSize = | ||
1951 | ((struct lpfc_dmabufext *)mp[i])->size; | ||
1952 | cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag; | ||
1953 | cmd->ulpCommand = CMD_QUE_XRI64_CX; | ||
1954 | cmd->ulpPU = 0; | ||
1955 | cmd->ulpLe = 1; | ||
1956 | cmd->ulpBdeCount = 1; | ||
1957 | cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0; | ||
1958 | |||
1959 | } else { | ||
1960 | cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys); | ||
1961 | cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys); | ||
1962 | cmd->un.cont64[i].tus.f.bdeSize = | ||
1963 | ((struct lpfc_dmabufext *)mp[i])->size; | ||
1964 | cmd->ulpBdeCount = ++i; | ||
1965 | |||
1966 | if ((--num_bde > 0) && (i < 2)) | ||
1967 | continue; | ||
1968 | |||
1969 | cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX; | ||
1970 | cmd->ulpLe = 1; | ||
1971 | } | ||
1972 | |||
1973 | cmd->ulpClass = CLASS3; | ||
1974 | cmd->ulpContext = rxxri; | ||
1975 | |||
1976 | ret_val = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); | ||
1977 | |||
1978 | if (ret_val == IOCB_ERROR) { | ||
1979 | diag_cmd_data_free(phba, | ||
1980 | (struct lpfc_dmabufext *)mp[0]); | ||
1981 | if (mp[1]) | ||
1982 | diag_cmd_data_free(phba, | ||
1983 | (struct lpfc_dmabufext *)mp[1]); | ||
1984 | dmp = list_entry(next, struct lpfc_dmabuf, list); | ||
1985 | ret_val = EIO; | ||
1986 | goto err_post_rxbufs_exit; | ||
1987 | } | ||
1988 | |||
1989 | lpfc_sli_ringpostbuf_put(phba, pring, mp[0]); | ||
1990 | if (mp[1]) { | ||
1991 | lpfc_sli_ringpostbuf_put(phba, pring, mp[1]); | ||
1992 | mp[1] = NULL; | ||
1993 | } | ||
1994 | |||
1995 | /* The iocb was freed by lpfc_sli_issue_iocb */ | ||
1996 | cmdiocbq = lpfc_sli_get_iocbq(phba); | ||
1997 | if (!cmdiocbq) { | ||
1998 | dmp = list_entry(next, struct lpfc_dmabuf, list); | ||
1999 | ret_val = EIO; | ||
2000 | goto err_post_rxbufs_exit; | ||
2001 | } | ||
2002 | |||
2003 | cmd = &cmdiocbq->iocb; | ||
2004 | i = 0; | ||
2005 | } | ||
2006 | list_del(&head); | ||
2007 | |||
2008 | err_post_rxbufs_exit: | ||
2009 | |||
2010 | if (rxbmp) { | ||
2011 | if (rxbmp->virt) | ||
2012 | lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys); | ||
2013 | kfree(rxbmp); | ||
2014 | } | ||
2015 | |||
2016 | if (cmdiocbq) | ||
2017 | lpfc_sli_release_iocbq(phba, cmdiocbq); | ||
2018 | return ret_val; | ||
2019 | } | ||
2020 | |||
2021 | /** | ||
2022 | * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself | ||
2023 | * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job | ||
2024 | * | ||
2025 | * This function receives a user data buffer to be transmitted and received on | ||
2026 | * the same port, the link must be up and in loopback mode prior | ||
2027 | * to being called. | ||
2028 | * 1. A kernel buffer is allocated to copy the user data into. | ||
2029 | * 2. The port registers with "itself". | ||
2030 | * 3. The transmit and receive exchange ids are obtained. | ||
2031 | * 4. The receive exchange id is posted. | ||
2032 | * 5. A new els loopback event is created. | ||
2033 | * 6. The command and response iocbs are allocated. | ||
2034 | * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback. | ||
2035 | * | ||
2036 | * This function is meant to be called n times while the port is in loopback | ||
2037 | * so it is the apps responsibility to issue a reset to take the port out | ||
2038 | * of loopback mode. | ||
2039 | **/ | ||
2040 | static int | ||
2041 | lpfc_bsg_diag_test(struct fc_bsg_job *job) | ||
2042 | { | ||
2043 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | ||
2044 | struct lpfc_hba *phba = vport->phba; | ||
2045 | struct diag_mode_test *diag_mode; | ||
2046 | struct lpfc_bsg_event *evt; | ||
2047 | struct event_data *evdat; | ||
2048 | struct lpfc_sli *psli = &phba->sli; | ||
2049 | uint32_t size; | ||
2050 | uint32_t full_size; | ||
2051 | size_t segment_len = 0, segment_offset = 0, current_offset = 0; | ||
2052 | uint16_t rpi; | ||
2053 | struct lpfc_iocbq *cmdiocbq, *rspiocbq; | ||
2054 | IOCB_t *cmd, *rsp; | ||
2055 | struct lpfc_sli_ct_request *ctreq; | ||
2056 | struct lpfc_dmabuf *txbmp; | ||
2057 | struct ulp_bde64 *txbpl = NULL; | ||
2058 | struct lpfc_dmabufext *txbuffer = NULL; | ||
2059 | struct list_head head; | ||
2060 | struct lpfc_dmabuf *curr; | ||
2061 | uint16_t txxri, rxxri; | ||
2062 | uint32_t num_bde; | ||
2063 | uint8_t *ptr = NULL, *rx_databuf = NULL; | ||
2064 | int rc = 0; | ||
2065 | unsigned long flags; | ||
2066 | void *dataout = NULL; | ||
2067 | uint32_t total_mem; | ||
2068 | |||
2069 | /* in case no data is returned return just the return code */ | ||
2070 | job->reply->reply_payload_rcv_len = 0; | ||
2071 | |||
2072 | if (job->request_len < | ||
2073 | sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) { | ||
2074 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
2075 | "2739 Received DIAG TEST request below minimum " | ||
2076 | "size\n"); | ||
2077 | rc = -EINVAL; | ||
2078 | goto loopback_test_exit; | ||
2079 | } | ||
2080 | |||
2081 | if (job->request_payload.payload_len != | ||
2082 | job->reply_payload.payload_len) { | ||
2083 | rc = -EINVAL; | ||
2084 | goto loopback_test_exit; | ||
2085 | } | ||
2086 | |||
2087 | diag_mode = (struct diag_mode_test *) | ||
2088 | job->request->rqst_data.h_vendor.vendor_cmd; | ||
2089 | |||
2090 | if ((phba->link_state == LPFC_HBA_ERROR) || | ||
2091 | (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || | ||
2092 | (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { | ||
2093 | rc = -EACCES; | ||
2094 | goto loopback_test_exit; | ||
2095 | } | ||
2096 | |||
2097 | if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) { | ||
2098 | rc = -EACCES; | ||
2099 | goto loopback_test_exit; | ||
2100 | } | ||
2101 | |||
2102 | size = job->request_payload.payload_len; | ||
2103 | full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */ | ||
2104 | |||
2105 | if ((size == 0) || (size > 80 * BUF_SZ_4K)) { | ||
2106 | rc = -ERANGE; | ||
2107 | goto loopback_test_exit; | ||
2108 | } | ||
2109 | |||
2110 | if (size >= BUF_SZ_4K) { | ||
2111 | /* | ||
2112 | * Allocate memory for ioctl data. If buffer is bigger than 64k, | ||
2113 | * then we allocate 64k and re-use that buffer over and over to | ||
2114 | * xfer the whole block. This is because Linux kernel has a | ||
2115 | * problem allocating more than 120k of kernel space memory. Saw | ||
2116 | * problem with GET_FCPTARGETMAPPING... | ||
2117 | */ | ||
2118 | if (size <= (64 * 1024)) | ||
2119 | total_mem = size; | ||
2120 | else | ||
2121 | total_mem = 64 * 1024; | ||
2122 | } else | ||
2123 | /* Allocate memory for ioctl data */ | ||
2124 | total_mem = BUF_SZ_4K; | ||
2125 | |||
2126 | dataout = kmalloc(total_mem, GFP_KERNEL); | ||
2127 | if (dataout == NULL) { | ||
2128 | rc = -ENOMEM; | ||
2129 | goto loopback_test_exit; | ||
2130 | } | ||
2131 | |||
2132 | ptr = dataout; | ||
2133 | ptr += ELX_LOOPBACK_HEADER_SZ; | ||
2134 | sg_copy_to_buffer(job->request_payload.sg_list, | ||
2135 | job->request_payload.sg_cnt, | ||
2136 | ptr, size); | ||
2137 | |||
2138 | rc = lpfcdiag_loop_self_reg(phba, &rpi); | ||
2139 | if (rc) { | ||
2140 | rc = -ENOMEM; | ||
2141 | goto loopback_test_exit; | ||
2142 | } | ||
2143 | |||
2144 | rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri); | ||
2145 | if (rc) { | ||
2146 | lpfcdiag_loop_self_unreg(phba, rpi); | ||
2147 | rc = -ENOMEM; | ||
2148 | goto loopback_test_exit; | ||
2149 | } | ||
2150 | |||
2151 | rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size); | ||
2152 | if (rc) { | ||
2153 | lpfcdiag_loop_self_unreg(phba, rpi); | ||
2154 | rc = -ENOMEM; | ||
2155 | goto loopback_test_exit; | ||
2156 | } | ||
2157 | |||
2158 | evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, | ||
2159 | SLI_CT_ELX_LOOPBACK); | ||
2160 | if (!evt) { | ||
2161 | lpfcdiag_loop_self_unreg(phba, rpi); | ||
2162 | rc = -ENOMEM; | ||
2163 | goto loopback_test_exit; | ||
2164 | } | ||
2165 | |||
2166 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
2167 | list_add(&evt->node, &phba->ct_ev_waiters); | ||
2168 | lpfc_bsg_event_ref(evt); | ||
2169 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
2170 | |||
2171 | cmdiocbq = lpfc_sli_get_iocbq(phba); | ||
2172 | rspiocbq = lpfc_sli_get_iocbq(phba); | ||
2173 | txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
2174 | |||
2175 | if (txbmp) { | ||
2176 | txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys); | ||
2177 | INIT_LIST_HEAD(&txbmp->list); | ||
2178 | txbpl = (struct ulp_bde64 *) txbmp->virt; | ||
2179 | if (txbpl) | ||
2180 | txbuffer = diag_cmd_data_alloc(phba, | ||
2181 | txbpl, full_size, 0); | ||
2182 | } | ||
2183 | |||
2184 | if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer) { | ||
2185 | rc = -ENOMEM; | ||
2186 | goto err_loopback_test_exit; | ||
2187 | } | ||
2188 | |||
2189 | cmd = &cmdiocbq->iocb; | ||
2190 | rsp = &rspiocbq->iocb; | ||
2191 | |||
2192 | INIT_LIST_HEAD(&head); | ||
2193 | list_add_tail(&head, &txbuffer->dma.list); | ||
2194 | list_for_each_entry(curr, &head, list) { | ||
2195 | segment_len = ((struct lpfc_dmabufext *)curr)->size; | ||
2196 | if (current_offset == 0) { | ||
2197 | ctreq = curr->virt; | ||
2198 | memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); | ||
2199 | ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; | ||
2200 | ctreq->RevisionId.bits.InId = 0; | ||
2201 | ctreq->FsType = SLI_CT_ELX_LOOPBACK; | ||
2202 | ctreq->FsSubType = 0; | ||
2203 | ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA; | ||
2204 | ctreq->CommandResponse.bits.Size = size; | ||
2205 | segment_offset = ELX_LOOPBACK_HEADER_SZ; | ||
2206 | } else | ||
2207 | segment_offset = 0; | ||
2208 | |||
2209 | BUG_ON(segment_offset >= segment_len); | ||
2210 | memcpy(curr->virt + segment_offset, | ||
2211 | ptr + current_offset, | ||
2212 | segment_len - segment_offset); | ||
2213 | |||
2214 | current_offset += segment_len - segment_offset; | ||
2215 | BUG_ON(current_offset > size); | ||
2216 | } | ||
2217 | list_del(&head); | ||
2218 | |||
2219 | /* Build the XMIT_SEQUENCE iocb */ | ||
2220 | |||
2221 | num_bde = (uint32_t)txbuffer->flag; | ||
2222 | |||
2223 | cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys); | ||
2224 | cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys); | ||
2225 | cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; | ||
2226 | cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64)); | ||
2227 | |||
2228 | cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); | ||
2229 | cmd->un.xseq64.w5.hcsw.Dfctl = 0; | ||
2230 | cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; | ||
2231 | cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; | ||
2232 | |||
2233 | cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; | ||
2234 | cmd->ulpBdeCount = 1; | ||
2235 | cmd->ulpLe = 1; | ||
2236 | cmd->ulpClass = CLASS3; | ||
2237 | cmd->ulpContext = txxri; | ||
2238 | |||
2239 | cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; | ||
2240 | cmdiocbq->vport = phba->pport; | ||
2241 | |||
2242 | rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, | ||
2243 | (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT); | ||
2244 | |||
2245 | if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) { | ||
2246 | rc = -EIO; | ||
2247 | goto err_loopback_test_exit; | ||
2248 | } | ||
2249 | |||
2250 | evt->waiting = 1; | ||
2251 | rc = wait_event_interruptible_timeout( | ||
2252 | evt->wq, !list_empty(&evt->events_to_see), | ||
2253 | ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); | ||
2254 | evt->waiting = 0; | ||
2255 | if (list_empty(&evt->events_to_see)) | ||
2256 | rc = (rc) ? -EINTR : -ETIMEDOUT; | ||
2257 | else { | ||
2258 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
2259 | list_move(evt->events_to_see.prev, &evt->events_to_get); | ||
2260 | evdat = list_entry(evt->events_to_get.prev, | ||
2261 | typeof(*evdat), node); | ||
2262 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
2263 | rx_databuf = evdat->data; | ||
2264 | if (evdat->len != full_size) { | ||
2265 | lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, | ||
2266 | "1603 Loopback test did not receive expected " | ||
2267 | "data length. actual length 0x%x expected " | ||
2268 | "length 0x%x\n", | ||
2269 | evdat->len, full_size); | ||
2270 | rc = -EIO; | ||
2271 | } else if (rx_databuf == NULL) | ||
2272 | rc = -EIO; | ||
2273 | else { | ||
2274 | rc = IOCB_SUCCESS; | ||
2275 | /* skip over elx loopback header */ | ||
2276 | rx_databuf += ELX_LOOPBACK_HEADER_SZ; | ||
2277 | job->reply->reply_payload_rcv_len = | ||
2278 | sg_copy_from_buffer(job->reply_payload.sg_list, | ||
2279 | job->reply_payload.sg_cnt, | ||
2280 | rx_databuf, size); | ||
2281 | job->reply->reply_payload_rcv_len = size; | ||
2282 | } | ||
2283 | } | ||
2284 | |||
2285 | err_loopback_test_exit: | ||
2286 | lpfcdiag_loop_self_unreg(phba, rpi); | ||
2287 | |||
2288 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
2289 | lpfc_bsg_event_unref(evt); /* release ref */ | ||
2290 | lpfc_bsg_event_unref(evt); /* delete */ | ||
2291 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
2292 | |||
2293 | if (cmdiocbq != NULL) | ||
2294 | lpfc_sli_release_iocbq(phba, cmdiocbq); | ||
2295 | |||
2296 | if (rspiocbq != NULL) | ||
2297 | lpfc_sli_release_iocbq(phba, rspiocbq); | ||
2298 | |||
2299 | if (txbmp != NULL) { | ||
2300 | if (txbpl != NULL) { | ||
2301 | if (txbuffer != NULL) | ||
2302 | diag_cmd_data_free(phba, txbuffer); | ||
2303 | lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys); | ||
2304 | } | ||
2305 | kfree(txbmp); | ||
2306 | } | ||
2307 | |||
2308 | loopback_test_exit: | ||
2309 | kfree(dataout); | ||
2310 | /* make error code available to userspace */ | ||
2311 | job->reply->result = rc; | ||
2312 | job->dd_data = NULL; | ||
2313 | /* complete the job back to userspace if no error */ | ||
2314 | if (rc == 0) | ||
2315 | job->job_done(job); | ||
2316 | return rc; | ||
2317 | } | ||
2318 | |||
2319 | /** | ||
2320 | * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command | ||
2321 | * @job: GET_DFC_REV fc_bsg_job | ||
2322 | **/ | ||
2323 | static int | ||
2324 | lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job) | ||
2325 | { | ||
2326 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | ||
2327 | struct lpfc_hba *phba = vport->phba; | ||
2328 | struct get_mgmt_rev *event_req; | ||
2329 | struct get_mgmt_rev_reply *event_reply; | ||
2330 | int rc = 0; | ||
2331 | |||
2332 | if (job->request_len < | ||
2333 | sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) { | ||
2334 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
2335 | "2740 Received GET_DFC_REV request below " | ||
2336 | "minimum size\n"); | ||
2337 | rc = -EINVAL; | ||
2338 | goto job_error; | ||
2339 | } | ||
2340 | |||
2341 | event_req = (struct get_mgmt_rev *) | ||
2342 | job->request->rqst_data.h_vendor.vendor_cmd; | ||
2343 | |||
2344 | event_reply = (struct get_mgmt_rev_reply *) | ||
2345 | job->reply->reply_data.vendor_reply.vendor_rsp; | ||
2346 | |||
2347 | if (job->reply_len < | ||
2348 | sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) { | ||
2349 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
2350 | "2741 Received GET_DFC_REV reply below " | ||
2351 | "minimum size\n"); | ||
2352 | rc = -EINVAL; | ||
2353 | goto job_error; | ||
2354 | } | ||
2355 | |||
2356 | event_reply->info.a_Major = MANAGEMENT_MAJOR_REV; | ||
2357 | event_reply->info.a_Minor = MANAGEMENT_MINOR_REV; | ||
2358 | job_error: | ||
2359 | job->reply->result = rc; | ||
2360 | if (rc == 0) | ||
2361 | job->job_done(job); | ||
2362 | return rc; | ||
2363 | } | ||
2364 | |||
2365 | /** | ||
2366 | * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler | ||
2367 | * @phba: Pointer to HBA context object. | ||
2368 | * @pmboxq: Pointer to mailbox command. | ||
2369 | * | ||
2370 | * This is completion handler function for mailbox commands issued from | ||
2371 | * lpfc_bsg_issue_mbox function. This function is called by the | ||
2372 | * mailbox event handler function with no lock held. This function | ||
2373 | * will wake up thread waiting on the wait queue pointed by context1 | ||
2374 | * of the mailbox. | ||
2375 | **/ | ||
2376 | void | ||
2377 | lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) | ||
2378 | { | ||
2379 | struct bsg_job_data *dd_data; | ||
2380 | MAILBOX_t *pmb; | ||
2381 | MAILBOX_t *mb; | ||
2382 | struct fc_bsg_job *job; | ||
2383 | uint32_t size; | ||
2384 | unsigned long flags; | ||
2385 | |||
2386 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
2387 | dd_data = pmboxq->context1; | ||
2388 | if (!dd_data) { | ||
2389 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
2390 | return; | ||
2391 | } | ||
2392 | |||
2393 | pmb = &dd_data->context_un.mbox.pmboxq->u.mb; | ||
2394 | mb = dd_data->context_un.mbox.mb; | ||
2395 | job = dd_data->context_un.mbox.set_job; | ||
2396 | memcpy(mb, pmb, sizeof(*pmb)); | ||
2397 | size = job->request_payload.payload_len; | ||
2398 | job->reply->reply_payload_rcv_len = | ||
2399 | sg_copy_from_buffer(job->reply_payload.sg_list, | ||
2400 | job->reply_payload.sg_cnt, | ||
2401 | mb, size); | ||
2402 | job->reply->result = 0; | ||
2403 | dd_data->context_un.mbox.set_job = NULL; | ||
2404 | job->dd_data = NULL; | ||
2405 | job->job_done(job); | ||
2406 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
2407 | mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); | ||
2408 | kfree(mb); | ||
2409 | kfree(dd_data); | ||
2410 | return; | ||
2411 | } | ||
2412 | |||
2413 | /** | ||
2414 | * lpfc_bsg_check_cmd_access - test for a supported mailbox command | ||
2415 | * @phba: Pointer to HBA context object. | ||
2416 | * @mb: Pointer to a mailbox object. | ||
2417 | * @vport: Pointer to a vport object. | ||
2418 | * | ||
2419 | * Some commands require the port to be offline, some may not be called from | ||
2420 | * the application. | ||
2421 | **/ | ||
2422 | static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, | ||
2423 | MAILBOX_t *mb, struct lpfc_vport *vport) | ||
2424 | { | ||
2425 | /* return negative error values for bsg job */ | ||
2426 | switch (mb->mbxCommand) { | ||
2427 | /* Offline only */ | ||
2428 | case MBX_INIT_LINK: | ||
2429 | case MBX_DOWN_LINK: | ||
2430 | case MBX_CONFIG_LINK: | ||
2431 | case MBX_CONFIG_RING: | ||
2432 | case MBX_RESET_RING: | ||
2433 | case MBX_UNREG_LOGIN: | ||
2434 | case MBX_CLEAR_LA: | ||
2435 | case MBX_DUMP_CONTEXT: | ||
2436 | case MBX_RUN_DIAGS: | ||
2437 | case MBX_RESTART: | ||
2438 | case MBX_SET_MASK: | ||
2439 | if (!(vport->fc_flag & FC_OFFLINE_MODE)) { | ||
2440 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
2441 | "2743 Command 0x%x is illegal in on-line " | ||
2442 | "state\n", | ||
2443 | mb->mbxCommand); | ||
2444 | return -EPERM; | ||
2445 | } | ||
2446 | case MBX_WRITE_NV: | ||
2447 | case MBX_WRITE_VPARMS: | ||
2448 | case MBX_LOAD_SM: | ||
2449 | case MBX_READ_NV: | ||
2450 | case MBX_READ_CONFIG: | ||
2451 | case MBX_READ_RCONFIG: | ||
2452 | case MBX_READ_STATUS: | ||
2453 | case MBX_READ_XRI: | ||
2454 | case MBX_READ_REV: | ||
2455 | case MBX_READ_LNK_STAT: | ||
2456 | case MBX_DUMP_MEMORY: | ||
2457 | case MBX_DOWN_LOAD: | ||
2458 | case MBX_UPDATE_CFG: | ||
2459 | case MBX_KILL_BOARD: | ||
2460 | case MBX_LOAD_AREA: | ||
2461 | case MBX_LOAD_EXP_ROM: | ||
2462 | case MBX_BEACON: | ||
2463 | case MBX_DEL_LD_ENTRY: | ||
2464 | case MBX_SET_DEBUG: | ||
2465 | case MBX_WRITE_WWN: | ||
2466 | case MBX_SLI4_CONFIG: | ||
2467 | case MBX_READ_EVENT_LOG_STATUS: | ||
2468 | case MBX_WRITE_EVENT_LOG: | ||
2469 | case MBX_PORT_CAPABILITIES: | ||
2470 | case MBX_PORT_IOV_CONTROL: | ||
2471 | break; | ||
2472 | case MBX_SET_VARIABLE: | ||
2473 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
2474 | "1226 mbox: set_variable 0x%x, 0x%x\n", | ||
2475 | mb->un.varWords[0], | ||
2476 | mb->un.varWords[1]); | ||
2477 | if ((mb->un.varWords[0] == SETVAR_MLOMNT) | ||
2478 | && (mb->un.varWords[1] == 1)) { | ||
2479 | phba->wait_4_mlo_maint_flg = 1; | ||
2480 | } else if (mb->un.varWords[0] == SETVAR_MLORST) { | ||
2481 | phba->link_flag &= ~LS_LOOPBACK_MODE; | ||
2482 | phba->fc_topology = TOPOLOGY_PT_PT; | ||
2483 | } | ||
2484 | break; | ||
2485 | case MBX_RUN_BIU_DIAG64: | ||
2486 | case MBX_READ_EVENT_LOG: | ||
2487 | case MBX_READ_SPARM64: | ||
2488 | case MBX_READ_LA: | ||
2489 | case MBX_READ_LA64: | ||
2490 | case MBX_REG_LOGIN: | ||
2491 | case MBX_REG_LOGIN64: | ||
2492 | case MBX_CONFIG_PORT: | ||
2493 | case MBX_RUN_BIU_DIAG: | ||
2494 | default: | ||
2495 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
2496 | "2742 Unknown Command 0x%x\n", | ||
2497 | mb->mbxCommand); | ||
2498 | return -EPERM; | ||
2499 | } | ||
2500 | |||
2501 | return 0; /* ok */ | ||
2502 | } | ||
2503 | |||
2504 | /** | ||
2505 | * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app | ||
2506 | * @phba: Pointer to HBA context object. | ||
2507 | * @mb: Pointer to a mailbox object. | ||
2508 | * @vport: Pointer to a vport object. | ||
2509 | * | ||
2510 | * Allocate a tracking object, mailbox command memory, get a mailbox | ||
2511 | * from the mailbox pool, copy the caller mailbox command. | ||
2512 | * | ||
2513 | * If offline and the sli is active we need to poll for the command (port is | ||
2514 | * being reset) and com-plete the job, otherwise issue the mailbox command and | ||
2515 | * let our completion handler finish the command. | ||
2516 | **/ | ||
2517 | static uint32_t | ||
2518 | lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, | ||
2519 | struct lpfc_vport *vport) | ||
2520 | { | ||
2521 | LPFC_MBOXQ_t *pmboxq; | ||
2522 | MAILBOX_t *pmb; | ||
2523 | MAILBOX_t *mb; | ||
2524 | struct bsg_job_data *dd_data; | ||
2525 | uint32_t size; | ||
2526 | int rc = 0; | ||
2527 | |||
2528 | /* allocate our bsg tracking structure */ | ||
2529 | dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); | ||
2530 | if (!dd_data) { | ||
2531 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
2532 | "2727 Failed allocation of dd_data\n"); | ||
2533 | return -ENOMEM; | ||
2534 | } | ||
2535 | |||
2536 | mb = kzalloc(PAGE_SIZE, GFP_KERNEL); | ||
2537 | if (!mb) { | ||
2538 | kfree(dd_data); | ||
2539 | return -ENOMEM; | ||
2540 | } | ||
2541 | |||
2542 | pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
2543 | if (!pmboxq) { | ||
2544 | kfree(dd_data); | ||
2545 | kfree(mb); | ||
2546 | return -ENOMEM; | ||
2547 | } | ||
2548 | |||
2549 | size = job->request_payload.payload_len; | ||
2550 | job->reply->reply_payload_rcv_len = | ||
2551 | sg_copy_to_buffer(job->request_payload.sg_list, | ||
2552 | job->request_payload.sg_cnt, | ||
2553 | mb, size); | ||
2554 | |||
2555 | rc = lpfc_bsg_check_cmd_access(phba, mb, vport); | ||
2556 | if (rc != 0) { | ||
2557 | kfree(dd_data); | ||
2558 | kfree(mb); | ||
2559 | mempool_free(pmboxq, phba->mbox_mem_pool); | ||
2560 | return rc; /* must be negative */ | ||
2561 | } | ||
2562 | |||
2563 | memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); | ||
2564 | pmb = &pmboxq->u.mb; | ||
2565 | memcpy(pmb, mb, sizeof(*pmb)); | ||
2566 | pmb->mbxOwner = OWN_HOST; | ||
2567 | pmboxq->context1 = NULL; | ||
2568 | pmboxq->vport = vport; | ||
2569 | |||
2570 | if ((vport->fc_flag & FC_OFFLINE_MODE) || | ||
2571 | (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { | ||
2572 | rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); | ||
2573 | if (rc != MBX_SUCCESS) { | ||
2574 | if (rc != MBX_TIMEOUT) { | ||
2575 | kfree(dd_data); | ||
2576 | kfree(mb); | ||
2577 | mempool_free(pmboxq, phba->mbox_mem_pool); | ||
2578 | } | ||
2579 | return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; | ||
2580 | } | ||
2581 | |||
2582 | memcpy(mb, pmb, sizeof(*pmb)); | ||
2583 | job->reply->reply_payload_rcv_len = | ||
2584 | sg_copy_from_buffer(job->reply_payload.sg_list, | ||
2585 | job->reply_payload.sg_cnt, | ||
2586 | mb, size); | ||
2587 | kfree(dd_data); | ||
2588 | kfree(mb); | ||
2589 | mempool_free(pmboxq, phba->mbox_mem_pool); | ||
2590 | /* not waiting mbox already done */ | ||
2591 | return 0; | ||
2592 | } | ||
2593 | |||
2594 | /* setup wake call as IOCB callback */ | ||
2595 | pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait; | ||
2596 | /* setup context field to pass wait_queue pointer to wake function */ | ||
2597 | pmboxq->context1 = dd_data; | ||
2598 | dd_data->type = TYPE_MBOX; | ||
2599 | dd_data->context_un.mbox.pmboxq = pmboxq; | ||
2600 | dd_data->context_un.mbox.mb = mb; | ||
2601 | dd_data->context_un.mbox.set_job = job; | ||
2602 | job->dd_data = dd_data; | ||
2603 | rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); | ||
2604 | if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { | ||
2605 | kfree(dd_data); | ||
2606 | kfree(mb); | ||
2607 | mempool_free(pmboxq, phba->mbox_mem_pool); | ||
2608 | return -EIO; | ||
2609 | } | ||
2610 | |||
2611 | return 1; | ||
2612 | } | ||
2613 | |||
2614 | /** | ||
2615 | * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command | ||
2616 | * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX. | ||
2617 | **/ | ||
2618 | static int | ||
2619 | lpfc_bsg_mbox_cmd(struct fc_bsg_job *job) | ||
2620 | { | ||
2621 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | ||
2622 | struct lpfc_hba *phba = vport->phba; | ||
2623 | int rc = 0; | ||
2624 | |||
2625 | /* in case no data is transferred */ | ||
2626 | job->reply->reply_payload_rcv_len = 0; | ||
2627 | if (job->request_len < | ||
2628 | sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { | ||
2629 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
2630 | "2737 Received MBOX_REQ request below " | ||
2631 | "minimum size\n"); | ||
2632 | rc = -EINVAL; | ||
2633 | goto job_error; | ||
2634 | } | ||
2635 | |||
2636 | if (job->request_payload.payload_len != PAGE_SIZE) { | ||
2637 | rc = -EINVAL; | ||
2638 | goto job_error; | ||
2639 | } | ||
2640 | |||
2641 | if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { | ||
2642 | rc = -EAGAIN; | ||
2643 | goto job_error; | ||
2644 | } | ||
2645 | |||
2646 | rc = lpfc_bsg_issue_mbox(phba, job, vport); | ||
2647 | |||
2648 | job_error: | ||
2649 | if (rc == 0) { | ||
2650 | /* job done */ | ||
2651 | job->reply->result = 0; | ||
2652 | job->dd_data = NULL; | ||
2653 | job->job_done(job); | ||
2654 | } else if (rc == 1) | ||
2655 | /* job submitted, will complete later*/ | ||
2656 | rc = 0; /* return zero, no error */ | ||
2657 | else { | ||
2658 | /* some error occurred */ | ||
2659 | job->reply->result = rc; | ||
2660 | job->dd_data = NULL; | ||
2661 | } | ||
2662 | |||
2663 | return rc; | ||
2664 | } | ||
2665 | |||
2666 | /** | ||
2667 | * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler | ||
2668 | * @phba: Pointer to HBA context object. | ||
2669 | * @cmdiocbq: Pointer to command iocb. | ||
2670 | * @rspiocbq: Pointer to response iocb. | ||
2671 | * | ||
2672 | * This function is the completion handler for iocbs issued using | ||
2673 | * lpfc_menlo_cmd function. This function is called by the | ||
2674 | * ring event handler function without any lock held. This function | ||
2675 | * can be called from both worker thread context and interrupt | ||
2676 | * context. This function also can be called from another thread which | ||
2677 | * cleans up the SLI layer objects. | ||
2678 | * This function copies the contents of the response iocb to the | ||
2679 | * response iocb memory object provided by the caller of | ||
2680 | * lpfc_sli_issue_iocb_wait and then wakes up the thread which | ||
2681 | * sleeps for the iocb completion. | ||
2682 | **/ | ||
2683 | static void | ||
2684 | lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, | ||
2685 | struct lpfc_iocbq *cmdiocbq, | ||
2686 | struct lpfc_iocbq *rspiocbq) | ||
2687 | { | ||
2688 | struct bsg_job_data *dd_data; | ||
2689 | struct fc_bsg_job *job; | ||
2690 | IOCB_t *rsp; | ||
2691 | struct lpfc_dmabuf *bmp; | ||
2692 | struct lpfc_bsg_menlo *menlo; | ||
2693 | unsigned long flags; | ||
2694 | struct menlo_response *menlo_resp; | ||
2695 | int rc = 0; | ||
2696 | |||
2697 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
2698 | dd_data = cmdiocbq->context1; | ||
2699 | if (!dd_data) { | ||
2700 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
2701 | return; | ||
2702 | } | ||
2703 | |||
2704 | menlo = &dd_data->context_un.menlo; | ||
2705 | job = menlo->set_job; | ||
2706 | job->dd_data = NULL; /* so timeout handler does not reply */ | ||
2707 | |||
2708 | spin_lock_irqsave(&phba->hbalock, flags); | ||
2709 | cmdiocbq->iocb_flag |= LPFC_IO_WAKE; | ||
2710 | if (cmdiocbq->context2 && rspiocbq) | ||
2711 | memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, | ||
2712 | &rspiocbq->iocb, sizeof(IOCB_t)); | ||
2713 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
2714 | |||
2715 | bmp = menlo->bmp; | ||
2716 | rspiocbq = menlo->rspiocbq; | ||
2717 | rsp = &rspiocbq->iocb; | ||
2718 | |||
2719 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, | ||
2720 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
2721 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, | ||
2722 | job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
2723 | |||
2724 | /* always return the xri, this would be used in the case | ||
2725 | * of a menlo download to allow the data to be sent as a continuation | ||
2726 | * of the exchange. | ||
2727 | */ | ||
2728 | menlo_resp = (struct menlo_response *) | ||
2729 | job->reply->reply_data.vendor_reply.vendor_rsp; | ||
2730 | menlo_resp->xri = rsp->ulpContext; | ||
2731 | if (rsp->ulpStatus) { | ||
2732 | if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { | ||
2733 | switch (rsp->un.ulpWord[4] & 0xff) { | ||
2734 | case IOERR_SEQUENCE_TIMEOUT: | ||
2735 | rc = -ETIMEDOUT; | ||
2736 | break; | ||
2737 | case IOERR_INVALID_RPI: | ||
2738 | rc = -EFAULT; | ||
2739 | break; | ||
2740 | default: | ||
2741 | rc = -EACCES; | ||
2742 | break; | ||
2743 | } | ||
2744 | } else | ||
2745 | rc = -EACCES; | ||
2746 | } else | ||
2747 | job->reply->reply_payload_rcv_len = | ||
2748 | rsp->un.genreq64.bdl.bdeSize; | ||
2749 | |||
2750 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); | ||
2751 | lpfc_sli_release_iocbq(phba, rspiocbq); | ||
2752 | lpfc_sli_release_iocbq(phba, cmdiocbq); | ||
2753 | kfree(bmp); | ||
2754 | kfree(dd_data); | ||
2755 | /* make error code available to userspace */ | ||
2756 | job->reply->result = rc; | ||
2757 | /* complete the job back to userspace */ | ||
2758 | job->job_done(job); | ||
2759 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
2760 | return; | ||
2761 | } | ||
2762 | |||
2763 | /** | ||
2764 | * lpfc_menlo_cmd - send an ioctl for menlo hardware | ||
2765 | * @job: fc_bsg_job to handle | ||
2766 | * | ||
2767 | * This function issues a gen request 64 CR ioctl for all menlo cmd requests, | ||
2768 | * all the command completions will return the xri for the command. | ||
2769 | * For menlo data requests a gen request 64 CX is used to continue the exchange | ||
2770 | * supplied in the menlo request header xri field. | ||
2771 | **/ | ||
2772 | static int | ||
2773 | lpfc_menlo_cmd(struct fc_bsg_job *job) | ||
2774 | { | ||
2775 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | ||
2776 | struct lpfc_hba *phba = vport->phba; | ||
2777 | struct lpfc_iocbq *cmdiocbq, *rspiocbq; | ||
2778 | IOCB_t *cmd, *rsp; | ||
2779 | int rc = 0; | ||
2780 | struct menlo_command *menlo_cmd; | ||
2781 | struct menlo_response *menlo_resp; | ||
2782 | struct lpfc_dmabuf *bmp = NULL; | ||
2783 | int request_nseg; | ||
2784 | int reply_nseg; | ||
2785 | struct scatterlist *sgel = NULL; | ||
2786 | int numbde; | ||
2787 | dma_addr_t busaddr; | ||
2788 | struct bsg_job_data *dd_data; | ||
2789 | struct ulp_bde64 *bpl = NULL; | ||
2790 | |||
2791 | /* in case no data is returned return just the return code */ | ||
2792 | job->reply->reply_payload_rcv_len = 0; | ||
2793 | |||
2794 | if (job->request_len < | ||
2795 | sizeof(struct fc_bsg_request) + | ||
2796 | sizeof(struct menlo_command)) { | ||
2797 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
2798 | "2784 Received MENLO_CMD request below " | ||
2799 | "minimum size\n"); | ||
2800 | rc = -ERANGE; | ||
2801 | goto no_dd_data; | ||
2802 | } | ||
2803 | |||
2804 | if (job->reply_len < | ||
2805 | sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) { | ||
2806 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
2807 | "2785 Received MENLO_CMD reply below " | ||
2808 | "minimum size\n"); | ||
2809 | rc = -ERANGE; | ||
2810 | goto no_dd_data; | ||
2811 | } | ||
2812 | |||
2813 | if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) { | ||
2814 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
2815 | "2786 Adapter does not support menlo " | ||
2816 | "commands\n"); | ||
2817 | rc = -EPERM; | ||
2818 | goto no_dd_data; | ||
2819 | } | ||
2820 | |||
2821 | menlo_cmd = (struct menlo_command *) | ||
2822 | job->request->rqst_data.h_vendor.vendor_cmd; | ||
2823 | |||
2824 | menlo_resp = (struct menlo_response *) | ||
2825 | job->reply->reply_data.vendor_reply.vendor_rsp; | ||
2826 | |||
2827 | /* allocate our bsg tracking structure */ | ||
2828 | dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); | ||
2829 | if (!dd_data) { | ||
2830 | lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, | ||
2831 | "2787 Failed allocation of dd_data\n"); | ||
2832 | rc = -ENOMEM; | ||
2833 | goto no_dd_data; | ||
2834 | } | ||
2835 | |||
2836 | bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
2837 | if (!bmp) { | ||
2838 | rc = -ENOMEM; | ||
2839 | goto free_dd; | ||
2840 | } | ||
2841 | |||
2842 | cmdiocbq = lpfc_sli_get_iocbq(phba); | ||
2843 | if (!cmdiocbq) { | ||
2844 | rc = -ENOMEM; | ||
2845 | goto free_bmp; | ||
2846 | } | ||
2847 | |||
2848 | rspiocbq = lpfc_sli_get_iocbq(phba); | ||
2849 | if (!rspiocbq) { | ||
2850 | rc = -ENOMEM; | ||
2851 | goto free_cmdiocbq; | ||
2852 | } | ||
2853 | |||
2854 | rsp = &rspiocbq->iocb; | ||
2855 | |||
2856 | bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); | ||
2857 | if (!bmp->virt) { | ||
2858 | rc = -ENOMEM; | ||
2859 | goto free_rspiocbq; | ||
2860 | } | ||
2861 | |||
2862 | INIT_LIST_HEAD(&bmp->list); | ||
2863 | bpl = (struct ulp_bde64 *) bmp->virt; | ||
2864 | request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, | ||
2865 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
2866 | for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { | ||
2867 | busaddr = sg_dma_address(sgel); | ||
2868 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; | ||
2869 | bpl->tus.f.bdeSize = sg_dma_len(sgel); | ||
2870 | bpl->tus.w = cpu_to_le32(bpl->tus.w); | ||
2871 | bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); | ||
2872 | bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); | ||
2873 | bpl++; | ||
2874 | } | ||
2875 | |||
2876 | reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, | ||
2877 | job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
2878 | for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { | ||
2879 | busaddr = sg_dma_address(sgel); | ||
2880 | bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; | ||
2881 | bpl->tus.f.bdeSize = sg_dma_len(sgel); | ||
2882 | bpl->tus.w = cpu_to_le32(bpl->tus.w); | ||
2883 | bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); | ||
2884 | bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); | ||
2885 | bpl++; | ||
2886 | } | ||
2887 | |||
2888 | cmd = &cmdiocbq->iocb; | ||
2889 | cmd->un.genreq64.bdl.ulpIoTag32 = 0; | ||
2890 | cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); | ||
2891 | cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); | ||
2892 | cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; | ||
2893 | cmd->un.genreq64.bdl.bdeSize = | ||
2894 | (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); | ||
2895 | cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); | ||
2896 | cmd->un.genreq64.w5.hcsw.Dfctl = 0; | ||
2897 | cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD; | ||
2898 | cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */ | ||
2899 | cmd->ulpBdeCount = 1; | ||
2900 | cmd->ulpClass = CLASS3; | ||
2901 | cmd->ulpOwner = OWN_CHIP; | ||
2902 | cmd->ulpLe = 1; /* Limited Edition */ | ||
2903 | cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; | ||
2904 | cmdiocbq->vport = phba->pport; | ||
2905 | /* We want the firmware to timeout before we do */ | ||
2906 | cmd->ulpTimeout = MENLO_TIMEOUT - 5; | ||
2907 | cmdiocbq->context3 = bmp; | ||
2908 | cmdiocbq->context2 = rspiocbq; | ||
2909 | cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp; | ||
2910 | cmdiocbq->context1 = dd_data; | ||
2911 | cmdiocbq->context2 = rspiocbq; | ||
2912 | if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) { | ||
2913 | cmd->ulpCommand = CMD_GEN_REQUEST64_CR; | ||
2914 | cmd->ulpPU = MENLO_PU; /* 3 */ | ||
2915 | cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */ | ||
2916 | cmd->ulpContext = MENLO_CONTEXT; /* 0 */ | ||
2917 | } else { | ||
2918 | cmd->ulpCommand = CMD_GEN_REQUEST64_CX; | ||
2919 | cmd->ulpPU = 1; | ||
2920 | cmd->un.ulpWord[4] = 0; | ||
2921 | cmd->ulpContext = menlo_cmd->xri; | ||
2922 | } | ||
2923 | |||
2924 | dd_data->type = TYPE_MENLO; | ||
2925 | dd_data->context_un.menlo.cmdiocbq = cmdiocbq; | ||
2926 | dd_data->context_un.menlo.rspiocbq = rspiocbq; | ||
2927 | dd_data->context_un.menlo.set_job = job; | ||
2928 | dd_data->context_un.menlo.bmp = bmp; | ||
2929 | |||
2930 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, | ||
2931 | MENLO_TIMEOUT - 5); | ||
2932 | if (rc == IOCB_SUCCESS) | ||
2933 | return 0; /* done for now */ | ||
2934 | |||
2935 | /* iocb failed so cleanup */ | ||
2936 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, | ||
2937 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | ||
2938 | pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, | ||
2939 | job->reply_payload.sg_cnt, DMA_FROM_DEVICE); | ||
2940 | |||
2941 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); | ||
2942 | |||
2943 | free_rspiocbq: | ||
2944 | lpfc_sli_release_iocbq(phba, rspiocbq); | ||
2945 | free_cmdiocbq: | ||
2946 | lpfc_sli_release_iocbq(phba, cmdiocbq); | ||
2947 | free_bmp: | ||
2948 | kfree(bmp); | ||
2949 | free_dd: | ||
2950 | kfree(dd_data); | ||
2951 | no_dd_data: | ||
2952 | /* make error code available to userspace */ | ||
2953 | job->reply->result = rc; | ||
2954 | job->dd_data = NULL; | ||
2955 | return rc; | ||
2956 | } | ||
2957 | /** | ||
834 | * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job | 2958 | * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job |
835 | * @job: fc_bsg_job to handle | 2959 | * @job: fc_bsg_job to handle |
836 | */ | 2960 | **/ |
837 | static int | 2961 | static int |
838 | lpfc_bsg_hst_vendor(struct fc_bsg_job *job) | 2962 | lpfc_bsg_hst_vendor(struct fc_bsg_job *job) |
839 | { | 2963 | { |
840 | int command = job->request->rqst_data.h_vendor.vendor_cmd[0]; | 2964 | int command = job->request->rqst_data.h_vendor.vendor_cmd[0]; |
2965 | int rc; | ||
841 | 2966 | ||
842 | switch (command) { | 2967 | switch (command) { |
843 | case LPFC_BSG_VENDOR_SET_CT_EVENT: | 2968 | case LPFC_BSG_VENDOR_SET_CT_EVENT: |
844 | return lpfc_bsg_set_event(job); | 2969 | rc = lpfc_bsg_hba_set_event(job); |
845 | break; | 2970 | break; |
846 | |||
847 | case LPFC_BSG_VENDOR_GET_CT_EVENT: | 2971 | case LPFC_BSG_VENDOR_GET_CT_EVENT: |
848 | return lpfc_bsg_get_event(job); | 2972 | rc = lpfc_bsg_hba_get_event(job); |
2973 | break; | ||
2974 | case LPFC_BSG_VENDOR_SEND_MGMT_RESP: | ||
2975 | rc = lpfc_bsg_send_mgmt_rsp(job); | ||
2976 | break; | ||
2977 | case LPFC_BSG_VENDOR_DIAG_MODE: | ||
2978 | rc = lpfc_bsg_diag_mode(job); | ||
2979 | break; | ||
2980 | case LPFC_BSG_VENDOR_DIAG_TEST: | ||
2981 | rc = lpfc_bsg_diag_test(job); | ||
2982 | break; | ||
2983 | case LPFC_BSG_VENDOR_GET_MGMT_REV: | ||
2984 | rc = lpfc_bsg_get_dfc_rev(job); | ||
2985 | break; | ||
2986 | case LPFC_BSG_VENDOR_MBOX: | ||
2987 | rc = lpfc_bsg_mbox_cmd(job); | ||
2988 | break; | ||
2989 | case LPFC_BSG_VENDOR_MENLO_CMD: | ||
2990 | case LPFC_BSG_VENDOR_MENLO_DATA: | ||
2991 | rc = lpfc_menlo_cmd(job); | ||
849 | break; | 2992 | break; |
850 | |||
851 | default: | 2993 | default: |
852 | return -EINVAL; | 2994 | rc = -EINVAL; |
2995 | job->reply->reply_payload_rcv_len = 0; | ||
2996 | /* make error code available to userspace */ | ||
2997 | job->reply->result = rc; | ||
2998 | break; | ||
853 | } | 2999 | } |
3000 | |||
3001 | return rc; | ||
854 | } | 3002 | } |
855 | 3003 | ||
856 | /** | 3004 | /** |
857 | * lpfc_bsg_request - handle a bsg request from the FC transport | 3005 | * lpfc_bsg_request - handle a bsg request from the FC transport |
858 | * @job: fc_bsg_job to handle | 3006 | * @job: fc_bsg_job to handle |
859 | */ | 3007 | **/ |
860 | int | 3008 | int |
861 | lpfc_bsg_request(struct fc_bsg_job *job) | 3009 | lpfc_bsg_request(struct fc_bsg_job *job) |
862 | { | 3010 | { |
863 | uint32_t msgcode; | 3011 | uint32_t msgcode; |
864 | int rc = -EINVAL; | 3012 | int rc; |
865 | 3013 | ||
866 | msgcode = job->request->msgcode; | 3014 | msgcode = job->request->msgcode; |
867 | |||
868 | switch (msgcode) { | 3015 | switch (msgcode) { |
869 | case FC_BSG_HST_VENDOR: | 3016 | case FC_BSG_HST_VENDOR: |
870 | rc = lpfc_bsg_hst_vendor(job); | 3017 | rc = lpfc_bsg_hst_vendor(job); |
@@ -873,9 +3020,13 @@ lpfc_bsg_request(struct fc_bsg_job *job) | |||
873 | rc = lpfc_bsg_rport_els(job); | 3020 | rc = lpfc_bsg_rport_els(job); |
874 | break; | 3021 | break; |
875 | case FC_BSG_RPT_CT: | 3022 | case FC_BSG_RPT_CT: |
876 | rc = lpfc_bsg_rport_ct(job); | 3023 | rc = lpfc_bsg_send_mgmt_cmd(job); |
877 | break; | 3024 | break; |
878 | default: | 3025 | default: |
3026 | rc = -EINVAL; | ||
3027 | job->reply->reply_payload_rcv_len = 0; | ||
3028 | /* make error code available to userspace */ | ||
3029 | job->reply->result = rc; | ||
879 | break; | 3030 | break; |
880 | } | 3031 | } |
881 | 3032 | ||
@@ -888,17 +3039,83 @@ lpfc_bsg_request(struct fc_bsg_job *job) | |||
888 | * | 3039 | * |
889 | * This function just aborts the job's IOCB. The aborted IOCB will return to | 3040 | * This function just aborts the job's IOCB. The aborted IOCB will return to |
890 | * the waiting function which will handle passing the error back to userspace | 3041 | * the waiting function which will handle passing the error back to userspace |
891 | */ | 3042 | **/ |
892 | int | 3043 | int |
893 | lpfc_bsg_timeout(struct fc_bsg_job *job) | 3044 | lpfc_bsg_timeout(struct fc_bsg_job *job) |
894 | { | 3045 | { |
895 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; | 3046 | struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; |
896 | struct lpfc_hba *phba = vport->phba; | 3047 | struct lpfc_hba *phba = vport->phba; |
897 | struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)job->dd_data; | 3048 | struct lpfc_iocbq *cmdiocb; |
3049 | struct lpfc_bsg_event *evt; | ||
3050 | struct lpfc_bsg_iocb *iocb; | ||
3051 | struct lpfc_bsg_mbox *mbox; | ||
3052 | struct lpfc_bsg_menlo *menlo; | ||
898 | struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; | 3053 | struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; |
3054 | struct bsg_job_data *dd_data; | ||
3055 | unsigned long flags; | ||
3056 | |||
3057 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | ||
3058 | dd_data = (struct bsg_job_data *)job->dd_data; | ||
3059 | /* timeout and completion crossed paths if no dd_data */ | ||
3060 | if (!dd_data) { | ||
3061 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
3062 | return 0; | ||
3063 | } | ||
899 | 3064 | ||
900 | if (cmdiocb) | 3065 | switch (dd_data->type) { |
3066 | case TYPE_IOCB: | ||
3067 | iocb = &dd_data->context_un.iocb; | ||
3068 | cmdiocb = iocb->cmdiocbq; | ||
3069 | /* hint to completion handler that the job timed out */ | ||
3070 | job->reply->result = -EAGAIN; | ||
3071 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
3072 | /* this will call our completion handler */ | ||
3073 | spin_lock_irq(&phba->hbalock); | ||
3074 | lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); | ||
3075 | spin_unlock_irq(&phba->hbalock); | ||
3076 | break; | ||
3077 | case TYPE_EVT: | ||
3078 | evt = dd_data->context_un.evt; | ||
3079 | /* this event has no job anymore */ | ||
3080 | evt->set_job = NULL; | ||
3081 | job->dd_data = NULL; | ||
3082 | job->reply->reply_payload_rcv_len = 0; | ||
3083 | /* Return -EAGAIN which is our way of signallying the | ||
3084 | * app to retry. | ||
3085 | */ | ||
3086 | job->reply->result = -EAGAIN; | ||
3087 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
3088 | job->job_done(job); | ||
3089 | break; | ||
3090 | case TYPE_MBOX: | ||
3091 | mbox = &dd_data->context_un.mbox; | ||
3092 | /* this mbox has no job anymore */ | ||
3093 | mbox->set_job = NULL; | ||
3094 | job->dd_data = NULL; | ||
3095 | job->reply->reply_payload_rcv_len = 0; | ||
3096 | job->reply->result = -EAGAIN; | ||
3097 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
3098 | job->job_done(job); | ||
3099 | break; | ||
3100 | case TYPE_MENLO: | ||
3101 | menlo = &dd_data->context_un.menlo; | ||
3102 | cmdiocb = menlo->cmdiocbq; | ||
3103 | /* hint to completion handler that the job timed out */ | ||
3104 | job->reply->result = -EAGAIN; | ||
3105 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
3106 | /* this will call our completion handler */ | ||
3107 | spin_lock_irq(&phba->hbalock); | ||
901 | lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); | 3108 | lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); |
3109 | spin_unlock_irq(&phba->hbalock); | ||
3110 | break; | ||
3111 | default: | ||
3112 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
3113 | break; | ||
3114 | } | ||
902 | 3115 | ||
3116 | /* scsi transport fc fc_bsg_job_timeout expects a zero return code, | ||
3117 | * otherwise an error message will be displayed on the console | ||
3118 | * so always return success (zero) | ||
3119 | */ | ||
903 | return 0; | 3120 | return 0; |
904 | } | 3121 | } |