aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/Makefile2
-rw-r--r--drivers/scsi/lpfc/lpfc.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c904
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c259
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h74
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c134
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c93
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c263
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c53
19 files changed, 1689 insertions, 223 deletions
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index 1c286707dd5f..ad05d6edb8f6 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -28,4 +28,4 @@ obj-$(CONFIG_SCSI_LPFC) := lpfc.o
28 28
29lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \ 29lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
30 lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \ 30 lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
31 lpfc_vport.o lpfc_debugfs.o 31 lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 1877d9811831..aa10f7951634 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -312,6 +312,7 @@ struct lpfc_vport {
312#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */ 312#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
313#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */ 313#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
314#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */ 314#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
315#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */
315 316
316 uint32_t ct_flags; 317 uint32_t ct_flags;
317#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */ 318#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
@@ -440,6 +441,12 @@ enum intr_type_t {
440 MSIX, 441 MSIX,
441}; 442};
442 443
444struct unsol_rcv_ct_ctx {
445 uint32_t ctxt_id;
446 uint32_t SID;
447 uint32_t oxid;
448};
449
443struct lpfc_hba { 450struct lpfc_hba {
444 /* SCSI interface function jump table entries */ 451 /* SCSI interface function jump table entries */
445 int (*lpfc_new_scsi_buf) 452 int (*lpfc_new_scsi_buf)
@@ -525,6 +532,8 @@ struct lpfc_hba {
525#define FCP_XRI_ABORT_EVENT 0x20 532#define FCP_XRI_ABORT_EVENT 0x20
526#define ELS_XRI_ABORT_EVENT 0x40 533#define ELS_XRI_ABORT_EVENT 0x40
527#define ASYNC_EVENT 0x80 534#define ASYNC_EVENT 0x80
535#define LINK_DISABLED 0x100 /* Link disabled by user */
536#define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */
528 struct lpfc_dmabuf slim2p; 537 struct lpfc_dmabuf slim2p;
529 538
530 MAILBOX_t *mbox; 539 MAILBOX_t *mbox;
@@ -616,6 +625,8 @@ struct lpfc_hba {
616 uint32_t hbq_count; /* Count of configured HBQs */ 625 uint32_t hbq_count; /* Count of configured HBQs */
617 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ 626 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
618 627
628 uint32_t fcp_qidx; /* next work queue to post work to */
629
619 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ 630 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
620 unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */ 631 unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */
621 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ 632 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
@@ -682,6 +693,7 @@ struct lpfc_hba {
682 struct pci_pool *lpfc_mbuf_pool; 693 struct pci_pool *lpfc_mbuf_pool;
683 struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ 694 struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
684 struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ 695 struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
696 struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
685 struct lpfc_dma_pool lpfc_mbuf_safety_pool; 697 struct lpfc_dma_pool lpfc_mbuf_safety_pool;
686 698
687 mempool_t *mbox_mem_pool; 699 mempool_t *mbox_mem_pool;
@@ -763,11 +775,18 @@ struct lpfc_hba {
763/* Maximum number of events that can be outstanding at any time*/ 775/* Maximum number of events that can be outstanding at any time*/
764#define LPFC_MAX_EVT_COUNT 512 776#define LPFC_MAX_EVT_COUNT 512
765 atomic_t fast_event_count; 777 atomic_t fast_event_count;
778 uint32_t fcoe_eventtag;
779 uint32_t fcoe_eventtag_at_fcf_scan;
766 struct lpfc_fcf fcf; 780 struct lpfc_fcf fcf;
767 uint8_t fc_map[3]; 781 uint8_t fc_map[3];
768 uint8_t valid_vlan; 782 uint8_t valid_vlan;
769 uint16_t vlan_id; 783 uint16_t vlan_id;
770 struct list_head fcf_conn_rec_list; 784 struct list_head fcf_conn_rec_list;
785
786 struct mutex ct_event_mutex; /* synchronize access to ct_ev_waiters */
787 struct list_head ct_ev_waiters;
788 struct unsol_rcv_ct_ctx ct_ctx[64];
789 uint32_t ctx_idx;
771}; 790};
772 791
773static inline struct Scsi_Host * 792static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index fc07be5fbce9..e1a30a16a9fa 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -394,7 +394,12 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
394 case LPFC_INIT_MBX_CMDS: 394 case LPFC_INIT_MBX_CMDS:
395 case LPFC_LINK_DOWN: 395 case LPFC_LINK_DOWN:
396 case LPFC_HBA_ERROR: 396 case LPFC_HBA_ERROR:
397 len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n"); 397 if (phba->hba_flag & LINK_DISABLED)
398 len += snprintf(buf + len, PAGE_SIZE-len,
399 "Link Down - User disabled\n");
400 else
401 len += snprintf(buf + len, PAGE_SIZE-len,
402 "Link Down\n");
398 break; 403 break;
399 case LPFC_LINK_UP: 404 case LPFC_LINK_UP:
400 case LPFC_CLEAR_LA: 405 case LPFC_CLEAR_LA:
@@ -4127,6 +4132,9 @@ struct fc_function_template lpfc_transport_functions = {
4127 .vport_disable = lpfc_vport_disable, 4132 .vport_disable = lpfc_vport_disable,
4128 4133
4129 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name, 4134 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
4135
4136 .bsg_request = lpfc_bsg_request,
4137 .bsg_timeout = lpfc_bsg_timeout,
4130}; 4138};
4131 4139
4132struct fc_function_template lpfc_vport_transport_functions = { 4140struct fc_function_template lpfc_vport_transport_functions = {
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
new file mode 100644
index 000000000000..da6bf5aac9dd
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -0,0 +1,904 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21#include <linux/interrupt.h>
22#include <linux/mempool.h>
23#include <linux/pci.h>
24
25#include <scsi/scsi.h>
26#include <scsi/scsi_host.h>
27#include <scsi/scsi_transport_fc.h>
28#include <scsi/scsi_bsg_fc.h>
29
30#include "lpfc_hw4.h"
31#include "lpfc_hw.h"
32#include "lpfc_sli.h"
33#include "lpfc_sli4.h"
34#include "lpfc_nl.h"
35#include "lpfc_disc.h"
36#include "lpfc_scsi.h"
37#include "lpfc.h"
38#include "lpfc_logmsg.h"
39#include "lpfc_crtn.h"
40#include "lpfc_vport.h"
41#include "lpfc_version.h"
42
43/**
44 * lpfc_bsg_rport_ct - send a CT command from a bsg request
45 * @job: fc_bsg_job to handle
46 */
47static int
48lpfc_bsg_rport_ct(struct fc_bsg_job *job)
49{
50 struct Scsi_Host *shost = job->shost;
51 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
52 struct lpfc_hba *phba = vport->phba;
53 struct lpfc_rport_data *rdata = job->rport->dd_data;
54 struct lpfc_nodelist *ndlp = rdata->pnode;
55 struct ulp_bde64 *bpl = NULL;
56 uint32_t timeout;
57 struct lpfc_iocbq *cmdiocbq = NULL;
58 struct lpfc_iocbq *rspiocbq = NULL;
59 IOCB_t *cmd;
60 IOCB_t *rsp;
61 struct lpfc_dmabuf *bmp = NULL;
62 int request_nseg;
63 int reply_nseg;
64 struct scatterlist *sgel = NULL;
65 int numbde;
66 dma_addr_t busaddr;
67 int rc = 0;
68
69 /* in case no data is transferred */
70 job->reply->reply_payload_rcv_len = 0;
71
72 if (!lpfc_nlp_get(ndlp)) {
73 job->reply->result = -ENODEV;
74 return 0;
75 }
76
77 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
78 rc = -ENODEV;
79 goto free_ndlp_exit;
80 }
81
82 spin_lock_irq(shost->host_lock);
83 cmdiocbq = lpfc_sli_get_iocbq(phba);
84 if (!cmdiocbq) {
85 rc = -ENOMEM;
86 spin_unlock_irq(shost->host_lock);
87 goto free_ndlp_exit;
88 }
89 cmd = &cmdiocbq->iocb;
90
91 rspiocbq = lpfc_sli_get_iocbq(phba);
92 if (!rspiocbq) {
93 rc = -ENOMEM;
94 goto free_cmdiocbq;
95 }
96 spin_unlock_irq(shost->host_lock);
97
98 rsp = &rspiocbq->iocb;
99
100 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
101 if (!bmp) {
102 rc = -ENOMEM;
103 spin_lock_irq(shost->host_lock);
104 goto free_rspiocbq;
105 }
106
107 spin_lock_irq(shost->host_lock);
108 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
109 if (!bmp->virt) {
110 rc = -ENOMEM;
111 goto free_bmp;
112 }
113 spin_unlock_irq(shost->host_lock);
114
115 INIT_LIST_HEAD(&bmp->list);
116 bpl = (struct ulp_bde64 *) bmp->virt;
117
118 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
119 job->request_payload.sg_cnt, DMA_TO_DEVICE);
120 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
121 busaddr = sg_dma_address(sgel);
122 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
123 bpl->tus.f.bdeSize = sg_dma_len(sgel);
124 bpl->tus.w = cpu_to_le32(bpl->tus.w);
125 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
126 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
127 bpl++;
128 }
129
130 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
131 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
132 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
133 busaddr = sg_dma_address(sgel);
134 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
135 bpl->tus.f.bdeSize = sg_dma_len(sgel);
136 bpl->tus.w = cpu_to_le32(bpl->tus.w);
137 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
138 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
139 bpl++;
140 }
141
142 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
143 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
144 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
145 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
146 cmd->un.genreq64.bdl.bdeSize =
147 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
148 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
149 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
150 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
151 cmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
152 cmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
153 cmd->ulpBdeCount = 1;
154 cmd->ulpLe = 1;
155 cmd->ulpClass = CLASS3;
156 cmd->ulpContext = ndlp->nlp_rpi;
157 cmd->ulpOwner = OWN_CHIP;
158 cmdiocbq->vport = phba->pport;
159 cmdiocbq->context1 = NULL;
160 cmdiocbq->context2 = NULL;
161 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
162
163 timeout = phba->fc_ratov * 2;
164 job->dd_data = cmdiocbq;
165
166 rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
167 timeout + LPFC_DRVR_TIMEOUT);
168
169 if (rc != IOCB_TIMEDOUT) {
170 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
171 job->request_payload.sg_cnt, DMA_TO_DEVICE);
172 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
173 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
174 }
175
176 if (rc == IOCB_TIMEDOUT) {
177 lpfc_sli_release_iocbq(phba, rspiocbq);
178 rc = -EACCES;
179 goto free_ndlp_exit;
180 }
181
182 if (rc != IOCB_SUCCESS) {
183 rc = -EACCES;
184 goto free_outdmp;
185 }
186
187 if (rsp->ulpStatus) {
188 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
189 switch (rsp->un.ulpWord[4] & 0xff) {
190 case IOERR_SEQUENCE_TIMEOUT:
191 rc = -ETIMEDOUT;
192 break;
193 case IOERR_INVALID_RPI:
194 rc = -EFAULT;
195 break;
196 default:
197 rc = -EACCES;
198 break;
199 }
200 goto free_outdmp;
201 }
202 } else
203 job->reply->reply_payload_rcv_len =
204 rsp->un.genreq64.bdl.bdeSize;
205
206free_outdmp:
207 spin_lock_irq(shost->host_lock);
208 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
209free_bmp:
210 kfree(bmp);
211free_rspiocbq:
212 lpfc_sli_release_iocbq(phba, rspiocbq);
213free_cmdiocbq:
214 lpfc_sli_release_iocbq(phba, cmdiocbq);
215 spin_unlock_irq(shost->host_lock);
216free_ndlp_exit:
217 lpfc_nlp_put(ndlp);
218
219 /* make error code available to userspace */
220 job->reply->result = rc;
221 /* complete the job back to userspace */
222 job->job_done(job);
223
224 return 0;
225}
226
227/**
228 * lpfc_bsg_rport_els - send an ELS command from a bsg request
229 * @job: fc_bsg_job to handle
230 */
231static int
232lpfc_bsg_rport_els(struct fc_bsg_job *job)
233{
234 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
235 struct lpfc_hba *phba = vport->phba;
236 struct lpfc_rport_data *rdata = job->rport->dd_data;
237 struct lpfc_nodelist *ndlp = rdata->pnode;
238
239 uint32_t elscmd;
240 uint32_t cmdsize;
241 uint32_t rspsize;
242 struct lpfc_iocbq *rspiocbq;
243 struct lpfc_iocbq *cmdiocbq;
244 IOCB_t *rsp;
245 uint16_t rpi = 0;
246 struct lpfc_dmabuf *pcmd;
247 struct lpfc_dmabuf *prsp;
248 struct lpfc_dmabuf *pbuflist = NULL;
249 struct ulp_bde64 *bpl;
250 int iocb_status;
251 int request_nseg;
252 int reply_nseg;
253 struct scatterlist *sgel = NULL;
254 int numbde;
255 dma_addr_t busaddr;
256 int rc = 0;
257
258 /* in case no data is transferred */
259 job->reply->reply_payload_rcv_len = 0;
260
261 if (!lpfc_nlp_get(ndlp)) {
262 rc = -ENODEV;
263 goto out;
264 }
265
266 elscmd = job->request->rqst_data.r_els.els_code;
267 cmdsize = job->request_payload.payload_len;
268 rspsize = job->reply_payload.payload_len;
269 rspiocbq = lpfc_sli_get_iocbq(phba);
270 if (!rspiocbq) {
271 lpfc_nlp_put(ndlp);
272 rc = -ENOMEM;
273 goto out;
274 }
275
276 rsp = &rspiocbq->iocb;
277 rpi = ndlp->nlp_rpi;
278
279 cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, ndlp,
280 ndlp->nlp_DID, elscmd);
281
282 if (!cmdiocbq) {
283 lpfc_sli_release_iocbq(phba, rspiocbq);
284 return -EIO;
285 }
286
287 job->dd_data = cmdiocbq;
288 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
289 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
290
291 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
292 kfree(pcmd);
293 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
294 kfree(prsp);
295 cmdiocbq->context2 = NULL;
296
297 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
298 bpl = (struct ulp_bde64 *) pbuflist->virt;
299
300 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
301 job->request_payload.sg_cnt, DMA_TO_DEVICE);
302
303 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
304 busaddr = sg_dma_address(sgel);
305 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
306 bpl->tus.f.bdeSize = sg_dma_len(sgel);
307 bpl->tus.w = cpu_to_le32(bpl->tus.w);
308 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
309 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
310 bpl++;
311 }
312
313 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
314 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
315 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
316 busaddr = sg_dma_address(sgel);
317 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
318 bpl->tus.f.bdeSize = sg_dma_len(sgel);
319 bpl->tus.w = cpu_to_le32(bpl->tus.w);
320 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
321 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
322 bpl++;
323 }
324
325 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
326 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
327 cmdiocbq->iocb.ulpContext = rpi;
328 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
329 cmdiocbq->context1 = NULL;
330 cmdiocbq->context2 = NULL;
331
332 iocb_status = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
333 rspiocbq, (phba->fc_ratov * 2)
334 + LPFC_DRVR_TIMEOUT);
335
336 /* release the new ndlp once the iocb completes */
337 lpfc_nlp_put(ndlp);
338 if (iocb_status != IOCB_TIMEDOUT) {
339 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
340 job->request_payload.sg_cnt, DMA_TO_DEVICE);
341 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
342 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
343 }
344
345 if (iocb_status == IOCB_SUCCESS) {
346 if (rsp->ulpStatus == IOSTAT_SUCCESS) {
347 job->reply->reply_payload_rcv_len =
348 rsp->un.elsreq64.bdl.bdeSize;
349 rc = 0;
350 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
351 struct fc_bsg_ctels_reply *els_reply;
352 /* LS_RJT data returned in word 4 */
353 uint8_t *rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
354
355 els_reply = &job->reply->reply_data.ctels_reply;
356 job->reply->result = 0;
357 els_reply->status = FC_CTELS_STATUS_REJECT;
358 els_reply->rjt_data.action = rjt_data[0];
359 els_reply->rjt_data.reason_code = rjt_data[1];
360 els_reply->rjt_data.reason_explanation = rjt_data[2];
361 els_reply->rjt_data.vendor_unique = rjt_data[3];
362 } else
363 rc = -EIO;
364 } else
365 rc = -EIO;
366
367 if (iocb_status != IOCB_TIMEDOUT)
368 lpfc_els_free_iocb(phba, cmdiocbq);
369
370 lpfc_sli_release_iocbq(phba, rspiocbq);
371
372out:
373 /* make error code available to userspace */
374 job->reply->result = rc;
375 /* complete the job back to userspace */
376 job->job_done(job);
377
378 return 0;
379}
380
381struct lpfc_ct_event {
382 struct list_head node;
383 int ref;
384 wait_queue_head_t wq;
385
386 /* Event type and waiter identifiers */
387 uint32_t type_mask;
388 uint32_t req_id;
389 uint32_t reg_id;
390
391 /* next two flags are here for the auto-delete logic */
392 unsigned long wait_time_stamp;
393 int waiting;
394
395 /* seen and not seen events */
396 struct list_head events_to_get;
397 struct list_head events_to_see;
398};
399
400struct event_data {
401 struct list_head node;
402 uint32_t type;
403 uint32_t immed_dat;
404 void *data;
405 uint32_t len;
406};
407
408static struct lpfc_ct_event *
409lpfc_ct_event_new(int ev_reg_id, uint32_t ev_req_id)
410{
411 struct lpfc_ct_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
412 if (!evt)
413 return NULL;
414
415 INIT_LIST_HEAD(&evt->events_to_get);
416 INIT_LIST_HEAD(&evt->events_to_see);
417 evt->req_id = ev_req_id;
418 evt->reg_id = ev_reg_id;
419 evt->wait_time_stamp = jiffies;
420 init_waitqueue_head(&evt->wq);
421
422 return evt;
423}
424
425static void
426lpfc_ct_event_free(struct lpfc_ct_event *evt)
427{
428 struct event_data *ed;
429
430 list_del(&evt->node);
431
432 while (!list_empty(&evt->events_to_get)) {
433 ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
434 list_del(&ed->node);
435 kfree(ed->data);
436 kfree(ed);
437 }
438
439 while (!list_empty(&evt->events_to_see)) {
440 ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
441 list_del(&ed->node);
442 kfree(ed->data);
443 kfree(ed);
444 }
445
446 kfree(evt);
447}
448
449static inline void
450lpfc_ct_event_ref(struct lpfc_ct_event *evt)
451{
452 evt->ref++;
453}
454
455static inline void
456lpfc_ct_event_unref(struct lpfc_ct_event *evt)
457{
458 if (--evt->ref < 0)
459 lpfc_ct_event_free(evt);
460}
461
462#define SLI_CT_ELX_LOOPBACK 0x10
463
464enum ELX_LOOPBACK_CMD {
465 ELX_LOOPBACK_XRI_SETUP,
466 ELX_LOOPBACK_DATA,
467};
468
469/**
470 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
471 * @phba:
472 * @pring:
473 * @piocbq:
474 *
475 * This function is called when an unsolicited CT command is received. It
476 * forwards the event to any processes registerd to receive CT events.
477 */
478void
479lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
480 struct lpfc_iocbq *piocbq)
481{
482 uint32_t evt_req_id = 0;
483 uint32_t cmd;
484 uint32_t len;
485 struct lpfc_dmabuf *dmabuf = NULL;
486 struct lpfc_ct_event *evt;
487 struct event_data *evt_dat = NULL;
488 struct lpfc_iocbq *iocbq;
489 size_t offset = 0;
490 struct list_head head;
491 struct ulp_bde64 *bde;
492 dma_addr_t dma_addr;
493 int i;
494 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
495 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
496 struct lpfc_hbq_entry *hbqe;
497 struct lpfc_sli_ct_request *ct_req;
498
499 INIT_LIST_HEAD(&head);
500 list_add_tail(&head, &piocbq->list);
501
502 if (piocbq->iocb.ulpBdeCount == 0 ||
503 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
504 goto error_ct_unsol_exit;
505
506 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
507 dmabuf = bdeBuf1;
508 else {
509 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
510 piocbq->iocb.un.cont64[0].addrLow);
511 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
512 }
513
514 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
515 evt_req_id = ct_req->FsType;
516 cmd = ct_req->CommandResponse.bits.CmdRsp;
517 len = ct_req->CommandResponse.bits.Size;
518 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
519 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
520
521 mutex_lock(&phba->ct_event_mutex);
522 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
523 if (evt->req_id != evt_req_id)
524 continue;
525
526 lpfc_ct_event_ref(evt);
527
528 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
529 if (!evt_dat) {
530 lpfc_ct_event_unref(evt);
531 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
532 "2614 Memory allocation failed for "
533 "CT event\n");
534 break;
535 }
536
537 mutex_unlock(&phba->ct_event_mutex);
538
539 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
540 /* take accumulated byte count from the last iocbq */
541 iocbq = list_entry(head.prev, typeof(*iocbq), list);
542 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
543 } else {
544 list_for_each_entry(iocbq, &head, list) {
545 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
546 evt_dat->len +=
547 iocbq->iocb.un.cont64[i].tus.f.bdeSize;
548 }
549 }
550
551 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
552 if (!evt_dat->data) {
553 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
554 "2615 Memory allocation failed for "
555 "CT event data, size %d\n",
556 evt_dat->len);
557 kfree(evt_dat);
558 mutex_lock(&phba->ct_event_mutex);
559 lpfc_ct_event_unref(evt);
560 mutex_unlock(&phba->ct_event_mutex);
561 goto error_ct_unsol_exit;
562 }
563
564 list_for_each_entry(iocbq, &head, list) {
565 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
566 bdeBuf1 = iocbq->context2;
567 bdeBuf2 = iocbq->context3;
568 }
569 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
570 int size = 0;
571 if (phba->sli3_options &
572 LPFC_SLI3_HBQ_ENABLED) {
573 if (i == 0) {
574 hbqe = (struct lpfc_hbq_entry *)
575 &iocbq->iocb.un.ulpWord[0];
576 size = hbqe->bde.tus.f.bdeSize;
577 dmabuf = bdeBuf1;
578 } else if (i == 1) {
579 hbqe = (struct lpfc_hbq_entry *)
580 &iocbq->iocb.unsli3.
581 sli3Words[4];
582 size = hbqe->bde.tus.f.bdeSize;
583 dmabuf = bdeBuf2;
584 }
585 if ((offset + size) > evt_dat->len)
586 size = evt_dat->len - offset;
587 } else {
588 size = iocbq->iocb.un.cont64[i].
589 tus.f.bdeSize;
590 bde = &iocbq->iocb.un.cont64[i];
591 dma_addr = getPaddr(bde->addrHigh,
592 bde->addrLow);
593 dmabuf = lpfc_sli_ringpostbuf_get(phba,
594 pring, dma_addr);
595 }
596 if (!dmabuf) {
597 lpfc_printf_log(phba, KERN_ERR,
598 LOG_LIBDFC, "2616 No dmabuf "
599 "found for iocbq 0x%p\n",
600 iocbq);
601 kfree(evt_dat->data);
602 kfree(evt_dat);
603 mutex_lock(&phba->ct_event_mutex);
604 lpfc_ct_event_unref(evt);
605 mutex_unlock(&phba->ct_event_mutex);
606 goto error_ct_unsol_exit;
607 }
608 memcpy((char *)(evt_dat->data) + offset,
609 dmabuf->virt, size);
610 offset += size;
611 if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
612 !(phba->sli3_options &
613 LPFC_SLI3_HBQ_ENABLED)) {
614 lpfc_sli_ringpostbuf_put(phba, pring,
615 dmabuf);
616 } else {
617 switch (cmd) {
618 case ELX_LOOPBACK_XRI_SETUP:
619 if (!(phba->sli3_options &
620 LPFC_SLI3_HBQ_ENABLED))
621 lpfc_post_buffer(phba,
622 pring,
623 1);
624 else
625 lpfc_in_buf_free(phba,
626 dmabuf);
627 break;
628 default:
629 if (!(phba->sli3_options &
630 LPFC_SLI3_HBQ_ENABLED))
631 lpfc_post_buffer(phba,
632 pring,
633 1);
634 break;
635 }
636 }
637 }
638 }
639
640 mutex_lock(&phba->ct_event_mutex);
641 if (phba->sli_rev == LPFC_SLI_REV4) {
642 evt_dat->immed_dat = phba->ctx_idx;
643 phba->ctx_idx = (phba->ctx_idx + 1) % 64;
644 phba->ct_ctx[evt_dat->immed_dat].oxid =
645 piocbq->iocb.ulpContext;
646 phba->ct_ctx[evt_dat->immed_dat].SID =
647 piocbq->iocb.un.rcvels.remoteID;
648 } else
649 evt_dat->immed_dat = piocbq->iocb.ulpContext;
650
651 evt_dat->type = FC_REG_CT_EVENT;
652 list_add(&evt_dat->node, &evt->events_to_see);
653 wake_up_interruptible(&evt->wq);
654 lpfc_ct_event_unref(evt);
655 if (evt_req_id == SLI_CT_ELX_LOOPBACK)
656 break;
657 }
658 mutex_unlock(&phba->ct_event_mutex);
659
660error_ct_unsol_exit:
661 if (!list_empty(&head))
662 list_del(&head);
663
664 return;
665}
666
667/**
668 * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command
669 * @job: SET_EVENT fc_bsg_job
670 */
671static int
672lpfc_bsg_set_event(struct fc_bsg_job *job)
673{
674 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
675 struct lpfc_hba *phba = vport->phba;
676 struct set_ct_event *event_req;
677 struct lpfc_ct_event *evt;
678 int rc = 0;
679
680 if (job->request_len <
681 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
682 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
683 "2612 Received SET_CT_EVENT below minimum "
684 "size\n");
685 return -EINVAL;
686 }
687
688 event_req = (struct set_ct_event *)
689 job->request->rqst_data.h_vendor.vendor_cmd;
690
691 mutex_lock(&phba->ct_event_mutex);
692 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
693 if (evt->reg_id == event_req->ev_reg_id) {
694 lpfc_ct_event_ref(evt);
695 evt->wait_time_stamp = jiffies;
696 break;
697 }
698 }
699 mutex_unlock(&phba->ct_event_mutex);
700
701 if (&evt->node == &phba->ct_ev_waiters) {
702 /* no event waiting struct yet - first call */
703 evt = lpfc_ct_event_new(event_req->ev_reg_id,
704 event_req->ev_req_id);
705 if (!evt) {
706 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
707 "2617 Failed allocation of event "
708 "waiter\n");
709 return -ENOMEM;
710 }
711
712 mutex_lock(&phba->ct_event_mutex);
713 list_add(&evt->node, &phba->ct_ev_waiters);
714 lpfc_ct_event_ref(evt);
715 mutex_unlock(&phba->ct_event_mutex);
716 }
717
718 evt->waiting = 1;
719 if (wait_event_interruptible(evt->wq,
720 !list_empty(&evt->events_to_see))) {
721 mutex_lock(&phba->ct_event_mutex);
722 lpfc_ct_event_unref(evt); /* release ref */
723 lpfc_ct_event_unref(evt); /* delete */
724 mutex_unlock(&phba->ct_event_mutex);
725 rc = -EINTR;
726 goto set_event_out;
727 }
728
729 evt->wait_time_stamp = jiffies;
730 evt->waiting = 0;
731
732 mutex_lock(&phba->ct_event_mutex);
733 list_move(evt->events_to_see.prev, &evt->events_to_get);
734 lpfc_ct_event_unref(evt); /* release ref */
735 mutex_unlock(&phba->ct_event_mutex);
736
737set_event_out:
738 /* set_event carries no reply payload */
739 job->reply->reply_payload_rcv_len = 0;
740 /* make error code available to userspace */
741 job->reply->result = rc;
742 /* complete the job back to userspace */
743 job->job_done(job);
744
745 return 0;
746}
747
748/**
749 * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command
750 * @job: GET_EVENT fc_bsg_job
751 */
752static int
753lpfc_bsg_get_event(struct fc_bsg_job *job)
754{
755 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
756 struct lpfc_hba *phba = vport->phba;
757 struct get_ct_event *event_req;
758 struct get_ct_event_reply *event_reply;
759 struct lpfc_ct_event *evt;
760 struct event_data *evt_dat = NULL;
761 int rc = 0;
762
763 if (job->request_len <
764 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
765 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
766 "2613 Received GET_CT_EVENT request below "
767 "minimum size\n");
768 return -EINVAL;
769 }
770
771 event_req = (struct get_ct_event *)
772 job->request->rqst_data.h_vendor.vendor_cmd;
773
774 event_reply = (struct get_ct_event_reply *)
775 job->reply->reply_data.vendor_reply.vendor_rsp;
776
777 mutex_lock(&phba->ct_event_mutex);
778 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
779 if (evt->reg_id == event_req->ev_reg_id) {
780 if (list_empty(&evt->events_to_get))
781 break;
782 lpfc_ct_event_ref(evt);
783 evt->wait_time_stamp = jiffies;
784 evt_dat = list_entry(evt->events_to_get.prev,
785 struct event_data, node);
786 list_del(&evt_dat->node);
787 break;
788 }
789 }
790 mutex_unlock(&phba->ct_event_mutex);
791
792 if (!evt_dat) {
793 job->reply->reply_payload_rcv_len = 0;
794 rc = -ENOENT;
795 goto error_get_event_exit;
796 }
797
798 if (evt_dat->len > job->reply_payload.payload_len) {
799 evt_dat->len = job->reply_payload.payload_len;
800 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
801 "2618 Truncated event data at %d "
802 "bytes\n",
803 job->reply_payload.payload_len);
804 }
805
806 event_reply->immed_data = evt_dat->immed_dat;
807
808 if (evt_dat->len > 0)
809 job->reply->reply_payload_rcv_len =
810 sg_copy_from_buffer(job->reply_payload.sg_list,
811 job->reply_payload.sg_cnt,
812 evt_dat->data, evt_dat->len);
813 else
814 job->reply->reply_payload_rcv_len = 0;
815 rc = 0;
816
817 if (evt_dat)
818 kfree(evt_dat->data);
819 kfree(evt_dat);
820 mutex_lock(&phba->ct_event_mutex);
821 lpfc_ct_event_unref(evt);
822 mutex_unlock(&phba->ct_event_mutex);
823
824error_get_event_exit:
825 /* make error code available to userspace */
826 job->reply->result = rc;
827 /* complete the job back to userspace */
828 job->job_done(job);
829
830 return rc;
831}
832
833/**
834 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
835 * @job: fc_bsg_job to handle
836 */
837static int
838lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
839{
840 int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
841
842 switch (command) {
843 case LPFC_BSG_VENDOR_SET_CT_EVENT:
844 return lpfc_bsg_set_event(job);
845 break;
846
847 case LPFC_BSG_VENDOR_GET_CT_EVENT:
848 return lpfc_bsg_get_event(job);
849 break;
850
851 default:
852 return -EINVAL;
853 }
854}
855
856/**
857 * lpfc_bsg_request - handle a bsg request from the FC transport
858 * @job: fc_bsg_job to handle
859 */
860int
861lpfc_bsg_request(struct fc_bsg_job *job)
862{
863 uint32_t msgcode;
864 int rc = -EINVAL;
865
866 msgcode = job->request->msgcode;
867
868 switch (msgcode) {
869 case FC_BSG_HST_VENDOR:
870 rc = lpfc_bsg_hst_vendor(job);
871 break;
872 case FC_BSG_RPT_ELS:
873 rc = lpfc_bsg_rport_els(job);
874 break;
875 case FC_BSG_RPT_CT:
876 rc = lpfc_bsg_rport_ct(job);
877 break;
878 default:
879 break;
880 }
881
882 return rc;
883}
884
885/**
886 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
887 * @job: fc_bsg_job that has timed out
888 *
889 * This function just aborts the job's IOCB. The aborted IOCB will return to
890 * the waiting function which will handle passing the error back to userspace
891 */
892int
893lpfc_bsg_timeout(struct fc_bsg_job *job)
894{
895 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
896 struct lpfc_hba *phba = vport->phba;
897 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)job->dd_data;
898 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
899
900 if (cmdiocb)
901 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
902
903 return 0;
904}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index d2a922997c0f..0830f37409a3 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -21,9 +21,11 @@
21typedef int (*node_filter)(struct lpfc_nodelist *, void *); 21typedef int (*node_filter)(struct lpfc_nodelist *, void *);
22 22
23struct fc_rport; 23struct fc_rport;
24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 24void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
25void lpfc_sli_read_link_ste(struct lpfc_hba *);
26void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
25void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *); 27void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
26void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 28int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
27int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *); 29int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
28void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 30void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
29void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 31void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
@@ -135,6 +137,9 @@ int lpfc_els_disc_adisc(struct lpfc_vport *);
135int lpfc_els_disc_plogi(struct lpfc_vport *); 137int lpfc_els_disc_plogi(struct lpfc_vport *);
136void lpfc_els_timeout(unsigned long); 138void lpfc_els_timeout(unsigned long);
137void lpfc_els_timeout_handler(struct lpfc_vport *); 139void lpfc_els_timeout_handler(struct lpfc_vport *);
140struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t,
141 uint8_t, struct lpfc_nodelist *,
142 uint32_t, uint32_t);
138void lpfc_hb_timeout_handler(struct lpfc_hba *); 143void lpfc_hb_timeout_handler(struct lpfc_hba *);
139 144
140void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 145void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
@@ -182,11 +187,12 @@ int lpfc_mbox_dev_check(struct lpfc_hba *);
182int lpfc_mbox_tmo_val(struct lpfc_hba *, int); 187int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
183void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *); 188void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
184void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); 189void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
185void lpfc_init_vpi(struct lpfcMboxq *, uint16_t); 190void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t);
186void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t); 191void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t);
187void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); 192void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
188void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); 193void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
189void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); 194void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
195int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t);
190 196
191void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, 197void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
192 uint32_t , LPFC_MBOXQ_t *); 198 uint32_t , LPFC_MBOXQ_t *);
@@ -234,6 +240,7 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
234int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, 240int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
235 struct lpfc_iocbq *, uint32_t); 241 struct lpfc_iocbq *, uint32_t);
236void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 242void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
243void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
237void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 244void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
238void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); 245void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
239int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, 246int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
@@ -360,3 +367,8 @@ void lpfc_start_fdiscs(struct lpfc_hba *phba);
360#define HBA_EVENT_LINK_UP 2 367#define HBA_EVENT_LINK_UP 2
361#define HBA_EVENT_LINK_DOWN 3 368#define HBA_EVENT_LINK_DOWN 3
362 369
370/* functions to support SGIOv4/bsg interface */
371int lpfc_bsg_request(struct fc_bsg_job *);
372int lpfc_bsg_timeout(struct fc_bsg_job *);
373void lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
374 struct lpfc_iocbq *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 0e532f072eb3..9df7ed38e1be 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -97,6 +97,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
97 struct list_head head; 97 struct list_head head;
98 struct lpfc_dmabuf *bdeBuf; 98 struct lpfc_dmabuf *bdeBuf;
99 99
100 lpfc_bsg_ct_unsol_event(phba, pring, piocbq);
101
100 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) { 102 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
101 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 103 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
102 } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) && 104 } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index f72fdf23bf1b..45337cd23feb 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -146,7 +146,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
146 * Pointer to the newly allocated/prepared els iocb data structure 146 * Pointer to the newly allocated/prepared els iocb data structure
147 * NULL - when els iocb data structure allocation/preparation failed 147 * NULL - when els iocb data structure allocation/preparation failed
148 **/ 148 **/
149static struct lpfc_iocbq * 149struct lpfc_iocbq *
150lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, 150lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
151 uint16_t cmdSize, uint8_t retry, 151 uint16_t cmdSize, uint8_t retry,
152 struct lpfc_nodelist *ndlp, uint32_t did, 152 struct lpfc_nodelist *ndlp, uint32_t did,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index ed46b24a3380..e6a47e25b218 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -61,6 +61,7 @@ static uint8_t lpfcAlpaArray[] = {
61 61
62static void lpfc_disc_timeout_handler(struct lpfc_vport *); 62static void lpfc_disc_timeout_handler(struct lpfc_vport *);
63static void lpfc_disc_flush_list(struct lpfc_vport *vport); 63static void lpfc_disc_flush_list(struct lpfc_vport *vport);
64static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
64 65
65void 66void
66lpfc_terminate_rport_io(struct fc_rport *rport) 67lpfc_terminate_rport_io(struct fc_rport *rport)
@@ -1009,9 +1010,15 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1009 spin_lock_irqsave(&phba->hbalock, flags); 1010 spin_lock_irqsave(&phba->hbalock, flags);
1010 phba->fcf.fcf_flag |= FCF_REGISTERED; 1011 phba->fcf.fcf_flag |= FCF_REGISTERED;
1011 spin_unlock_irqrestore(&phba->hbalock, flags); 1012 spin_unlock_irqrestore(&phba->hbalock, flags);
1013 /* If there is a pending FCoE event, restart FCF table scan. */
1014 if (lpfc_check_pending_fcoe_event(phba, 1)) {
1015 mempool_free(mboxq, phba->mbox_mem_pool);
1016 return;
1017 }
1012 if (vport->port_state != LPFC_FLOGI) { 1018 if (vport->port_state != LPFC_FLOGI) {
1013 spin_lock_irqsave(&phba->hbalock, flags); 1019 spin_lock_irqsave(&phba->hbalock, flags);
1014 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); 1020 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1021 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1015 spin_unlock_irqrestore(&phba->hbalock, flags); 1022 spin_unlock_irqrestore(&phba->hbalock, flags);
1016 lpfc_initial_flogi(vport); 1023 lpfc_initial_flogi(vport);
1017 } 1024 }
@@ -1054,6 +1061,39 @@ lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1054} 1061}
1055 1062
1056/** 1063/**
1064 * lpfc_sw_name_match - Check if the fcf switch name match.
1065 * @fab_name: pointer to fabric name.
1066 * @new_fcf_record: pointer to fcf record.
1067 *
1068 * This routine compare the fcf record's switch name with provided
1069 * switch name. If the switch name are identical this function
1070 * returns 1 else return 0.
1071 **/
1072static uint32_t
1073lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1074{
1075 if ((sw_name[0] ==
1076 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) &&
1077 (sw_name[1] ==
1078 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) &&
1079 (sw_name[2] ==
1080 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) &&
1081 (sw_name[3] ==
1082 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) &&
1083 (sw_name[4] ==
1084 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) &&
1085 (sw_name[5] ==
1086 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) &&
1087 (sw_name[6] ==
1088 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) &&
1089 (sw_name[7] ==
1090 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)))
1091 return 1;
1092 else
1093 return 0;
1094}
1095
1096/**
1057 * lpfc_mac_addr_match - Check if the fcf mac address match. 1097 * lpfc_mac_addr_match - Check if the fcf mac address match.
1058 * @phba: pointer to lpfc hba data structure. 1098 * @phba: pointer to lpfc hba data structure.
1059 * @new_fcf_record: pointer to fcf record. 1099 * @new_fcf_record: pointer to fcf record.
@@ -1123,6 +1163,22 @@ lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
1123 bf_get(lpfc_fcf_record_mac_5, new_fcf_record); 1163 bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1124 phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 1164 phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1125 phba->fcf.priority = new_fcf_record->fip_priority; 1165 phba->fcf.priority = new_fcf_record->fip_priority;
1166 phba->fcf.switch_name[0] =
1167 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
1168 phba->fcf.switch_name[1] =
1169 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
1170 phba->fcf.switch_name[2] =
1171 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
1172 phba->fcf.switch_name[3] =
1173 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
1174 phba->fcf.switch_name[4] =
1175 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
1176 phba->fcf.switch_name[5] =
1177 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
1178 phba->fcf.switch_name[6] =
1179 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
1180 phba->fcf.switch_name[7] =
1181 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
1126} 1182}
1127 1183
1128/** 1184/**
@@ -1150,6 +1206,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1150 /* The FCF is already registered, start discovery */ 1206 /* The FCF is already registered, start discovery */
1151 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1207 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1152 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); 1208 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
1209 phba->hba_flag &= ~FCF_DISC_INPROGRESS;
1153 spin_unlock_irqrestore(&phba->hbalock, flags); 1210 spin_unlock_irqrestore(&phba->hbalock, flags);
1154 if (phba->pport->port_state != LPFC_FLOGI) 1211 if (phba->pport->port_state != LPFC_FLOGI)
1155 lpfc_initial_flogi(phba->pport); 1212 lpfc_initial_flogi(phba->pport);
@@ -1239,9 +1296,12 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1239 1296
1240 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) && 1297 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1241 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name, 1298 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1242 new_fcf_record)) 1299 new_fcf_record))
1300 continue;
1301 if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
1302 !lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
1303 new_fcf_record))
1243 continue; 1304 continue;
1244
1245 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) { 1305 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1246 /* 1306 /*
1247 * If the vlan bit map does not have the bit set for the 1307 * If the vlan bit map does not have the bit set for the
@@ -1336,6 +1396,60 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1336} 1396}
1337 1397
1338/** 1398/**
1399 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1400 * @phba: pointer to lpfc hba data structure.
1401 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1402 *
1403 * This function check if there is any fcoe event pending while driver
1404 * scan FCF entries. If there is any pending event, it will restart the
1405 * FCF saning and return 1 else return 0.
1406 */
1407int
1408lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1409{
1410 LPFC_MBOXQ_t *mbox;
1411 int rc;
1412 /*
1413 * If the Link is up and no FCoE events while in the
1414 * FCF discovery, no need to restart FCF discovery.
1415 */
1416 if ((phba->link_state >= LPFC_LINK_UP) &&
1417 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1418 return 0;
1419
1420 spin_lock_irq(&phba->hbalock);
1421 phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
1422 spin_unlock_irq(&phba->hbalock);
1423
1424 if (phba->link_state >= LPFC_LINK_UP)
1425 lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
1426
1427 if (unreg_fcf) {
1428 spin_lock_irq(&phba->hbalock);
1429 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
1430 spin_unlock_irq(&phba->hbalock);
1431 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1432 if (!mbox) {
1433 lpfc_printf_log(phba, KERN_ERR,
1434 LOG_DISCOVERY|LOG_MBOX,
1435 "2610 UNREG_FCFI mbox allocation failed\n");
1436 return 1;
1437 }
1438 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
1439 mbox->vport = phba->pport;
1440 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
1441 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1442 if (rc == MBX_NOT_FINISHED) {
1443 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
1444 "2611 UNREG_FCFI issue mbox failed\n");
1445 mempool_free(mbox, phba->mbox_mem_pool);
1446 }
1447 }
1448
1449 return 1;
1450}
1451
1452/**
1339 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. 1453 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
1340 * @phba: pointer to lpfc hba data structure. 1454 * @phba: pointer to lpfc hba data structure.
1341 * @mboxq: pointer to mailbox object. 1455 * @mboxq: pointer to mailbox object.
@@ -1367,6 +1481,12 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1367 unsigned long flags; 1481 unsigned long flags;
1368 uint16_t vlan_id; 1482 uint16_t vlan_id;
1369 1483
1484 /* If there is pending FCoE event restart FCF table scan */
1485 if (lpfc_check_pending_fcoe_event(phba, 0)) {
1486 lpfc_sli4_mbox_cmd_free(phba, mboxq);
1487 return;
1488 }
1489
1370 /* Get the first SGE entry from the non-embedded DMA memory. This 1490 /* Get the first SGE entry from the non-embedded DMA memory. This
1371 * routine only uses a single SGE. 1491 * routine only uses a single SGE.
1372 */ 1492 */
@@ -1424,7 +1544,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1424 spin_lock_irqsave(&phba->hbalock, flags); 1544 spin_lock_irqsave(&phba->hbalock, flags);
1425 if (phba->fcf.fcf_flag & FCF_IN_USE) { 1545 if (phba->fcf.fcf_flag & FCF_IN_USE) {
1426 if (lpfc_fab_name_match(phba->fcf.fabric_name, 1546 if (lpfc_fab_name_match(phba->fcf.fabric_name,
1427 new_fcf_record) && 1547 new_fcf_record) &&
1548 lpfc_sw_name_match(phba->fcf.switch_name,
1549 new_fcf_record) &&
1428 lpfc_mac_addr_match(phba, new_fcf_record)) { 1550 lpfc_mac_addr_match(phba, new_fcf_record)) {
1429 phba->fcf.fcf_flag |= FCF_AVAILABLE; 1551 phba->fcf.fcf_flag |= FCF_AVAILABLE;
1430 spin_unlock_irqrestore(&phba->hbalock, flags); 1552 spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -1464,9 +1586,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1464 * If there is a record with lower priority value for 1586 * If there is a record with lower priority value for
1465 * the current FCF, use that record. 1587 * the current FCF, use that record.
1466 */ 1588 */
1467 if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record) 1589 if (lpfc_fab_name_match(phba->fcf.fabric_name,
1468 && (new_fcf_record->fip_priority < 1590 new_fcf_record) &&
1469 phba->fcf.priority)) { 1591 (new_fcf_record->fip_priority < phba->fcf.priority)) {
1470 /* Use this FCF record */ 1592 /* Use this FCF record */
1471 lpfc_copy_fcf_record(phba, new_fcf_record); 1593 lpfc_copy_fcf_record(phba, new_fcf_record);
1472 phba->fcf.addr_mode = addr_mode; 1594 phba->fcf.addr_mode = addr_mode;
@@ -1512,6 +1634,39 @@ out:
1512} 1634}
1513 1635
1514/** 1636/**
1637 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
1638 * @phba: pointer to lpfc hba data structure.
1639 * @mboxq: pointer to mailbox data structure.
1640 *
1641 * This function handles completion of init vpi mailbox command.
1642 */
1643static void
1644lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1645{
1646 struct lpfc_vport *vport = mboxq->vport;
1647 if (mboxq->u.mb.mbxStatus) {
1648 lpfc_printf_vlog(vport, KERN_ERR,
1649 LOG_MBOX,
1650 "2609 Init VPI mailbox failed 0x%x\n",
1651 mboxq->u.mb.mbxStatus);
1652 mempool_free(mboxq, phba->mbox_mem_pool);
1653 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1654 return;
1655 }
1656 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
1657
1658 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1659 lpfc_initial_fdisc(vport);
1660 else {
1661 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
1662 lpfc_printf_vlog(vport, KERN_ERR,
1663 LOG_ELS,
1664 "2606 No NPIV Fabric support\n");
1665 }
1666 return;
1667}
1668
1669/**
1515 * lpfc_start_fdiscs - send fdiscs for each vports on this port. 1670 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
1516 * @phba: pointer to lpfc hba data structure. 1671 * @phba: pointer to lpfc hba data structure.
1517 * 1672 *
@@ -1523,6 +1678,8 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
1523{ 1678{
1524 struct lpfc_vport **vports; 1679 struct lpfc_vport **vports;
1525 int i; 1680 int i;
1681 LPFC_MBOXQ_t *mboxq;
1682 int rc;
1526 1683
1527 vports = lpfc_create_vport_work_array(phba); 1684 vports = lpfc_create_vport_work_array(phba);
1528 if (vports != NULL) { 1685 if (vports != NULL) {
@@ -1540,6 +1697,29 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
1540 FC_VPORT_LINKDOWN); 1697 FC_VPORT_LINKDOWN);
1541 continue; 1698 continue;
1542 } 1699 }
1700 if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
1701 mboxq = mempool_alloc(phba->mbox_mem_pool,
1702 GFP_KERNEL);
1703 if (!mboxq) {
1704 lpfc_printf_vlog(vports[i], KERN_ERR,
1705 LOG_MBOX, "2607 Failed to allocate "
1706 "init_vpi mailbox\n");
1707 continue;
1708 }
1709 lpfc_init_vpi(phba, mboxq, vports[i]->vpi);
1710 mboxq->vport = vports[i];
1711 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
1712 rc = lpfc_sli_issue_mbox(phba, mboxq,
1713 MBX_NOWAIT);
1714 if (rc == MBX_NOT_FINISHED) {
1715 lpfc_printf_vlog(vports[i], KERN_ERR,
1716 LOG_MBOX, "2608 Failed to issue "
1717 "init_vpi mailbox\n");
1718 mempool_free(mboxq,
1719 phba->mbox_mem_pool);
1720 }
1721 continue;
1722 }
1543 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 1723 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1544 lpfc_initial_fdisc(vports[i]); 1724 lpfc_initial_fdisc(vports[i]);
1545 else { 1725 else {
@@ -1769,6 +1949,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1769 goto out; 1949 goto out;
1770 } 1950 }
1771 } else { 1951 } else {
1952 vport->port_state = LPFC_VPORT_UNKNOWN;
1772 /* 1953 /*
1773 * Add the driver's default FCF record at FCF index 0 now. This 1954 * Add the driver's default FCF record at FCF index 0 now. This
1774 * is phase 1 implementation that support FCF index 0 and driver 1955 * is phase 1 implementation that support FCF index 0 and driver
@@ -1804,6 +1985,12 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
1804 * The driver is expected to do FIP/FCF. Call the port 1985 * The driver is expected to do FIP/FCF. Call the port
1805 * and get the FCF Table. 1986 * and get the FCF Table.
1806 */ 1987 */
1988 spin_lock_irq(&phba->hbalock);
1989 if (phba->hba_flag & FCF_DISC_INPROGRESS) {
1990 spin_unlock_irq(&phba->hbalock);
1991 return;
1992 }
1993 spin_unlock_irq(&phba->hbalock);
1807 rc = lpfc_sli4_read_fcf_record(phba, 1994 rc = lpfc_sli4_read_fcf_record(phba,
1808 LPFC_FCOE_FCF_GET_FIRST); 1995 LPFC_FCOE_FCF_GET_FIRST);
1809 if (rc) 1996 if (rc)
@@ -2113,13 +2300,15 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
2113 LPFC_MBOXQ_t *pmb = NULL; 2300 LPFC_MBOXQ_t *pmb = NULL;
2114 MAILBOX_t *mb; 2301 MAILBOX_t *mb;
2115 struct static_vport_info *vport_info; 2302 struct static_vport_info *vport_info;
2116 int rc, i; 2303 int rc = 0, i;
2117 struct fc_vport_identifiers vport_id; 2304 struct fc_vport_identifiers vport_id;
2118 struct fc_vport *new_fc_vport; 2305 struct fc_vport *new_fc_vport;
2119 struct Scsi_Host *shost; 2306 struct Scsi_Host *shost;
2120 struct lpfc_vport *vport; 2307 struct lpfc_vport *vport;
2121 uint16_t offset = 0; 2308 uint16_t offset = 0;
2122 uint8_t *vport_buff; 2309 uint8_t *vport_buff;
2310 struct lpfc_dmabuf *mp;
2311 uint32_t byte_count = 0;
2123 2312
2124 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2313 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2125 if (!pmb) { 2314 if (!pmb) {
@@ -2142,7 +2331,9 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
2142 2331
2143 vport_buff = (uint8_t *) vport_info; 2332 vport_buff = (uint8_t *) vport_info;
2144 do { 2333 do {
2145 lpfc_dump_static_vport(phba, pmb, offset); 2334 if (lpfc_dump_static_vport(phba, pmb, offset))
2335 goto out;
2336
2146 pmb->vport = phba->pport; 2337 pmb->vport = phba->pport;
2147 rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO); 2338 rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
2148 2339
@@ -2155,17 +2346,30 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
2155 goto out; 2346 goto out;
2156 } 2347 }
2157 2348
2158 if (mb->un.varDmp.word_cnt > 2349 if (phba->sli_rev == LPFC_SLI_REV4) {
2159 sizeof(struct static_vport_info) - offset) 2350 byte_count = pmb->u.mqe.un.mb_words[5];
2160 mb->un.varDmp.word_cnt = 2351 mp = (struct lpfc_dmabuf *) pmb->context2;
2161 sizeof(struct static_vport_info) - offset; 2352 if (byte_count > sizeof(struct static_vport_info) -
2162 2353 offset)
2163 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 2354 byte_count = sizeof(struct static_vport_info)
2164 vport_buff + offset, 2355 - offset;
2165 mb->un.varDmp.word_cnt); 2356 memcpy(vport_buff + offset, mp->virt, byte_count);
2166 offset += mb->un.varDmp.word_cnt; 2357 offset += byte_count;
2358 } else {
2359 if (mb->un.varDmp.word_cnt >
2360 sizeof(struct static_vport_info) - offset)
2361 mb->un.varDmp.word_cnt =
2362 sizeof(struct static_vport_info)
2363 - offset;
2364 byte_count = mb->un.varDmp.word_cnt;
2365 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
2366 vport_buff + offset,
2367 byte_count);
2368
2369 offset += byte_count;
2370 }
2167 2371
2168 } while (mb->un.varDmp.word_cnt && 2372 } while (byte_count &&
2169 offset < sizeof(struct static_vport_info)); 2373 offset < sizeof(struct static_vport_info));
2170 2374
2171 2375
@@ -2198,7 +2402,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
2198 if (!new_fc_vport) { 2402 if (!new_fc_vport) {
2199 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2403 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2200 "0546 lpfc_create_static_vport failed to" 2404 "0546 lpfc_create_static_vport failed to"
2201 " create vport \n"); 2405 " create vport\n");
2202 continue; 2406 continue;
2203 } 2407 }
2204 2408
@@ -2207,16 +2411,15 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
2207 } 2411 }
2208 2412
2209out: 2413out:
2210 /*
2211 * If this is timed out command, setting NULL to context2 tell SLI
2212 * layer not to use this buffer.
2213 */
2214 spin_lock_irq(&phba->hbalock);
2215 pmb->context2 = NULL;
2216 spin_unlock_irq(&phba->hbalock);
2217 kfree(vport_info); 2414 kfree(vport_info);
2218 if (rc != MBX_TIMEOUT) 2415 if (rc != MBX_TIMEOUT) {
2416 if (pmb->context2) {
2417 mp = (struct lpfc_dmabuf *) pmb->context2;
2418 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2419 kfree(mp);
2420 }
2219 mempool_free(pmb, phba->mbox_mem_pool); 2421 mempool_free(pmb, phba->mbox_mem_pool);
2422 }
2220 2423
2221 return; 2424 return;
2222} 2425}
@@ -4360,7 +4563,7 @@ lpfc_read_fcoe_param(struct lpfc_hba *phba,
4360 fcoe_param_hdr = (struct lpfc_fip_param_hdr *) 4563 fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
4361 buff; 4564 buff;
4362 fcoe_param = (struct lpfc_fcoe_params *) 4565 fcoe_param = (struct lpfc_fcoe_params *)
4363 buff + sizeof(struct lpfc_fip_param_hdr); 4566 (buff + sizeof(struct lpfc_fip_param_hdr));
4364 4567
4365 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) || 4568 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
4366 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) 4569 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 8a3a026667e4..ccb26724dc53 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -2496,8 +2496,8 @@ typedef struct {
2496#define DMP_VPORT_REGION_SIZE 0x200 2496#define DMP_VPORT_REGION_SIZE 0x200
2497#define DMP_MBOX_OFFSET_WORD 0x5 2497#define DMP_MBOX_OFFSET_WORD 0x5
2498 2498
2499#define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */ 2499#define DMP_REGION_23 0x17 /* fcoe param and port state region */
2500#define DMP_FCOEPARAM_RGN_SIZE 0x400 2500#define DMP_RGN23_SIZE 0x400
2501 2501
2502#define WAKE_UP_PARMS_REGION_ID 4 2502#define WAKE_UP_PARMS_REGION_ID 4
2503#define WAKE_UP_PARMS_WORD_SIZE 15 2503#define WAKE_UP_PARMS_WORD_SIZE 15
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 2995d128f07f..3689eee04535 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -52,6 +52,31 @@ struct dma_address {
52 uint32_t addr_hi; 52 uint32_t addr_hi;
53}; 53};
54 54
55#define LPFC_SLIREV_CONF_WORD 0x58
56struct lpfc_sli_intf {
57 uint32_t word0;
58#define lpfc_sli_intf_iftype_MASK 0x00000007
59#define lpfc_sli_intf_iftype_SHIFT 0
60#define lpfc_sli_intf_iftype_WORD word0
61#define lpfc_sli_intf_rev_MASK 0x0000000f
62#define lpfc_sli_intf_rev_SHIFT 4
63#define lpfc_sli_intf_rev_WORD word0
64#define LPFC_SLIREV_CONF_SLI4 4
65#define lpfc_sli_intf_family_MASK 0x000000ff
66#define lpfc_sli_intf_family_SHIFT 8
67#define lpfc_sli_intf_family_WORD word0
68#define lpfc_sli_intf_feat1_MASK 0x000000ff
69#define lpfc_sli_intf_feat1_SHIFT 16
70#define lpfc_sli_intf_feat1_WORD word0
71#define lpfc_sli_intf_feat2_MASK 0x0000001f
72#define lpfc_sli_intf_feat2_SHIFT 24
73#define lpfc_sli_intf_feat2_WORD word0
74#define lpfc_sli_intf_valid_MASK 0x00000007
75#define lpfc_sli_intf_valid_SHIFT 29
76#define lpfc_sli_intf_valid_WORD word0
77#define LPFC_SLI_INTF_VALID 6
78};
79
55#define LPFC_SLI4_BAR0 1 80#define LPFC_SLI4_BAR0 1
56#define LPFC_SLI4_BAR1 2 81#define LPFC_SLI4_BAR1 2
57#define LPFC_SLI4_BAR2 4 82#define LPFC_SLI4_BAR2 4
@@ -1181,6 +1206,32 @@ struct fcf_record {
1181#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF 1206#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF
1182#define lpfc_fcf_record_fcf_state_WORD word8 1207#define lpfc_fcf_record_fcf_state_WORD word8
1183 uint8_t vlan_bitmap[512]; 1208 uint8_t vlan_bitmap[512];
1209 uint32_t word137;
1210#define lpfc_fcf_record_switch_name_0_SHIFT 0
1211#define lpfc_fcf_record_switch_name_0_MASK 0x000000FF
1212#define lpfc_fcf_record_switch_name_0_WORD word137
1213#define lpfc_fcf_record_switch_name_1_SHIFT 8
1214#define lpfc_fcf_record_switch_name_1_MASK 0x000000FF
1215#define lpfc_fcf_record_switch_name_1_WORD word137
1216#define lpfc_fcf_record_switch_name_2_SHIFT 16
1217#define lpfc_fcf_record_switch_name_2_MASK 0x000000FF
1218#define lpfc_fcf_record_switch_name_2_WORD word137
1219#define lpfc_fcf_record_switch_name_3_SHIFT 24
1220#define lpfc_fcf_record_switch_name_3_MASK 0x000000FF
1221#define lpfc_fcf_record_switch_name_3_WORD word137
1222 uint32_t word138;
1223#define lpfc_fcf_record_switch_name_4_SHIFT 0
1224#define lpfc_fcf_record_switch_name_4_MASK 0x000000FF
1225#define lpfc_fcf_record_switch_name_4_WORD word138
1226#define lpfc_fcf_record_switch_name_5_SHIFT 8
1227#define lpfc_fcf_record_switch_name_5_MASK 0x000000FF
1228#define lpfc_fcf_record_switch_name_5_WORD word138
1229#define lpfc_fcf_record_switch_name_6_SHIFT 16
1230#define lpfc_fcf_record_switch_name_6_MASK 0x000000FF
1231#define lpfc_fcf_record_switch_name_6_WORD word138
1232#define lpfc_fcf_record_switch_name_7_SHIFT 24
1233#define lpfc_fcf_record_switch_name_7_MASK 0x000000FF
1234#define lpfc_fcf_record_switch_name_7_WORD word138
1184}; 1235};
1185 1236
1186struct lpfc_mbx_read_fcf_tbl { 1237struct lpfc_mbx_read_fcf_tbl {
@@ -1385,20 +1436,17 @@ struct lpfc_mbx_unreg_vfi {
1385 1436
1386struct lpfc_mbx_resume_rpi { 1437struct lpfc_mbx_resume_rpi {
1387 uint32_t word1; 1438 uint32_t word1;
1388#define lpfc_resume_rpi_rpi_SHIFT 0 1439#define lpfc_resume_rpi_index_SHIFT 0
1389#define lpfc_resume_rpi_rpi_MASK 0x0000FFFF 1440#define lpfc_resume_rpi_index_MASK 0x0000FFFF
1390#define lpfc_resume_rpi_rpi_WORD word1 1441#define lpfc_resume_rpi_index_WORD word1
1442#define lpfc_resume_rpi_ii_SHIFT 30
1443#define lpfc_resume_rpi_ii_MASK 0x00000003
1444#define lpfc_resume_rpi_ii_WORD word1
1445#define RESUME_INDEX_RPI 0
1446#define RESUME_INDEX_VPI 1
1447#define RESUME_INDEX_VFI 2
1448#define RESUME_INDEX_FCFI 3
1391 uint32_t event_tag; 1449 uint32_t event_tag;
1392 uint32_t word3_rsvd;
1393 uint32_t word4_rsvd;
1394 uint32_t word5_rsvd;
1395 uint32_t word6;
1396#define lpfc_resume_rpi_vpi_SHIFT 0
1397#define lpfc_resume_rpi_vpi_MASK 0x0000FFFF
1398#define lpfc_resume_rpi_vpi_WORD word6
1399#define lpfc_resume_rpi_vfi_SHIFT 16
1400#define lpfc_resume_rpi_vfi_MASK 0x0000FFFF
1401#define lpfc_resume_rpi_vfi_WORD word6
1402}; 1450};
1403 1451
1404#define REG_FCF_INVALID_QID 0xFFFF 1452#define REG_FCF_INVALID_QID 0xFFFF
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index fc67cc65c63b..562d8cee874b 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -211,7 +211,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
211 goto out_free_mbox; 211 goto out_free_mbox;
212 212
213 do { 213 do {
214 lpfc_dump_mem(phba, pmb, offset); 214 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
215 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 215 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
216 216
217 if (rc != MBX_SUCCESS) { 217 if (rc != MBX_SUCCESS) {
@@ -425,6 +425,9 @@ lpfc_config_port_post(struct lpfc_hba *phba)
425 return -EIO; 425 return -EIO;
426 } 426 }
427 427
428 /* Check if the port is disabled */
429 lpfc_sli_read_link_ste(phba);
430
428 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 431 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
429 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 432 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
430 phba->cfg_hba_queue_depth = 433 phba->cfg_hba_queue_depth =
@@ -524,27 +527,46 @@ lpfc_config_port_post(struct lpfc_hba *phba)
524 /* Set up error attention (ERATT) polling timer */ 527 /* Set up error attention (ERATT) polling timer */
525 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 528 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
526 529
527 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 530 if (phba->hba_flag & LINK_DISABLED) {
528 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 531 lpfc_printf_log(phba,
529 lpfc_set_loopback_flag(phba); 532 KERN_ERR, LOG_INIT,
530 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 533 "2598 Adapter Link is disabled.\n");
531 if (rc != MBX_SUCCESS) { 534 lpfc_down_link(phba, pmb);
532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 535 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
536 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
537 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
538 lpfc_printf_log(phba,
539 KERN_ERR, LOG_INIT,
540 "2599 Adapter failed to issue DOWN_LINK"
541 " mbox command rc 0x%x\n", rc);
542
543 mempool_free(pmb, phba->mbox_mem_pool);
544 return -EIO;
545 }
546 } else {
547 lpfc_init_link(phba, pmb, phba->cfg_topology,
548 phba->cfg_link_speed);
549 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
550 lpfc_set_loopback_flag(phba);
551 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
552 if (rc != MBX_SUCCESS) {
553 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
533 "0454 Adapter failed to init, mbxCmd x%x " 554 "0454 Adapter failed to init, mbxCmd x%x "
534 "INIT_LINK, mbxStatus x%x\n", 555 "INIT_LINK, mbxStatus x%x\n",
535 mb->mbxCommand, mb->mbxStatus); 556 mb->mbxCommand, mb->mbxStatus);
536 557
537 /* Clear all interrupt enable conditions */ 558 /* Clear all interrupt enable conditions */
538 writel(0, phba->HCregaddr); 559 writel(0, phba->HCregaddr);
539 readl(phba->HCregaddr); /* flush */ 560 readl(phba->HCregaddr); /* flush */
540 /* Clear all pending interrupts */ 561 /* Clear all pending interrupts */
541 writel(0xffffffff, phba->HAregaddr); 562 writel(0xffffffff, phba->HAregaddr);
542 readl(phba->HAregaddr); /* flush */ 563 readl(phba->HAregaddr); /* flush */
543 564
544 phba->link_state = LPFC_HBA_ERROR; 565 phba->link_state = LPFC_HBA_ERROR;
545 if (rc != MBX_BUSY) 566 if (rc != MBX_BUSY)
546 mempool_free(pmb, phba->mbox_mem_pool); 567 mempool_free(pmb, phba->mbox_mem_pool);
547 return -EIO; 568 return -EIO;
569 }
548 } 570 }
549 /* MBOX buffer will be freed in mbox compl */ 571 /* MBOX buffer will be freed in mbox compl */
550 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 572 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -558,7 +580,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
558 KERN_ERR, 580 KERN_ERR,
559 LOG_INIT, 581 LOG_INIT,
560 "0456 Adapter failed to issue " 582 "0456 Adapter failed to issue "
561 "ASYNCEVT_ENABLE mbox status x%x \n.", 583 "ASYNCEVT_ENABLE mbox status x%x\n",
562 rc); 584 rc);
563 mempool_free(pmb, phba->mbox_mem_pool); 585 mempool_free(pmb, phba->mbox_mem_pool);
564 } 586 }
@@ -572,7 +594,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
572 594
573 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 595 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 596 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
575 "to get Option ROM version status x%x\n.", rc); 597 "to get Option ROM version status x%x\n", rc);
576 mempool_free(pmb, phba->mbox_mem_pool); 598 mempool_free(pmb, phba->mbox_mem_pool);
577 } 599 }
578 600
@@ -2133,6 +2155,8 @@ lpfc_online(struct lpfc_hba *phba)
2133 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2155 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2134 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2156 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2135 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2157 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2158 if (phba->sli_rev == LPFC_SLI_REV4)
2159 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2136 spin_unlock_irq(shost->host_lock); 2160 spin_unlock_irq(shost->host_lock);
2137 } 2161 }
2138 lpfc_destroy_vport_work_array(phba, vports); 2162 lpfc_destroy_vport_work_array(phba, vports);
@@ -2807,6 +2831,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
2807 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 2831 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
2808 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) 2832 if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
2809 return; 2833 return;
2834 phba->fcoe_eventtag = acqe_link->event_tag;
2810 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2835 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2811 if (!pmb) { 2836 if (!pmb) {
2812 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2837 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -2894,18 +2919,20 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2894 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); 2919 uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
2895 int rc; 2920 int rc;
2896 2921
2922 phba->fcoe_eventtag = acqe_fcoe->event_tag;
2897 switch (event_type) { 2923 switch (event_type) {
2898 case LPFC_FCOE_EVENT_TYPE_NEW_FCF: 2924 case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
2899 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2925 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2900 "2546 New FCF found index 0x%x tag 0x%x \n", 2926 "2546 New FCF found index 0x%x tag 0x%x\n",
2901 acqe_fcoe->fcf_index, 2927 acqe_fcoe->fcf_index,
2902 acqe_fcoe->event_tag); 2928 acqe_fcoe->event_tag);
2903 /* 2929 /*
2904 * If the current FCF is in discovered state, 2930 * If the current FCF is in discovered state, or
2905 * do nothing. 2931 * FCF discovery is in progress do nothing.
2906 */ 2932 */
2907 spin_lock_irq(&phba->hbalock); 2933 spin_lock_irq(&phba->hbalock);
2908 if (phba->fcf.fcf_flag & FCF_DISCOVERED) { 2934 if ((phba->fcf.fcf_flag & FCF_DISCOVERED) ||
2935 (phba->hba_flag & FCF_DISC_INPROGRESS)) {
2909 spin_unlock_irq(&phba->hbalock); 2936 spin_unlock_irq(&phba->hbalock);
2910 break; 2937 break;
2911 } 2938 }
@@ -2922,7 +2949,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2922 2949
2923 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: 2950 case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
2924 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2951 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2925 "2548 FCF Table full count 0x%x tag 0x%x \n", 2952 "2548 FCF Table full count 0x%x tag 0x%x\n",
2926 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), 2953 bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
2927 acqe_fcoe->event_tag); 2954 acqe_fcoe->event_tag);
2928 break; 2955 break;
@@ -2930,7 +2957,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
2930 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: 2957 case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
2931 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2958 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2932 "2549 FCF disconnected fron network index 0x%x" 2959 "2549 FCF disconnected fron network index 0x%x"
2933 " tag 0x%x \n", acqe_fcoe->fcf_index, 2960 " tag 0x%x\n", acqe_fcoe->fcf_index,
2934 acqe_fcoe->event_tag); 2961 acqe_fcoe->event_tag);
2935 /* If the event is not for currently used fcf do nothing */ 2962 /* If the event is not for currently used fcf do nothing */
2936 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) 2963 if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
@@ -4130,8 +4157,7 @@ lpfc_hba_alloc(struct pci_dev *pdev)
4130 /* Allocate memory for HBA structure */ 4157 /* Allocate memory for HBA structure */
4131 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 4158 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
4132 if (!phba) { 4159 if (!phba) {
4133 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4160 dev_err(&pdev->dev, "failed to allocate hba struct\n");
4134 "1417 Failed to allocate hba struct.\n");
4135 return NULL; 4161 return NULL;
4136 } 4162 }
4137 4163
@@ -4145,6 +4171,9 @@ lpfc_hba_alloc(struct pci_dev *pdev)
4145 return NULL; 4171 return NULL;
4146 } 4172 }
4147 4173
4174 mutex_init(&phba->ct_event_mutex);
4175 INIT_LIST_HEAD(&phba->ct_ev_waiters);
4176
4148 return phba; 4177 return phba;
4149} 4178}
4150 4179
@@ -4489,23 +4518,6 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4489 if (!phba->sli4_hba.STAregaddr) 4518 if (!phba->sli4_hba.STAregaddr)
4490 return -ENODEV; 4519 return -ENODEV;
4491 4520
4492 /* With uncoverable error, log the error message and return error */
4493 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
4494 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
4495 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
4496 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4497 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4498 if (uerrlo_reg.word0 || uerrhi_reg.word0) {
4499 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4500 "1422 HBA Unrecoverable error: "
4501 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4502 "online0_reg=0x%x, online1_reg=0x%x\n",
4503 uerrlo_reg.word0, uerrhi_reg.word0,
4504 onlnreg0, onlnreg1);
4505 }
4506 return -ENODEV;
4507 }
4508
4509 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 4521 /* Wait up to 30 seconds for the SLI Port POST done and ready */
4510 for (i = 0; i < 3000; i++) { 4522 for (i = 0; i < 3000; i++) {
4511 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); 4523 sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
@@ -4545,6 +4557,23 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
4545 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), 4557 bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
4546 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); 4558 bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
4547 4559
4560 /* With uncoverable error, log the error message and return error */
4561 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
4562 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
4563 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
4564 uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
4565 uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
4566 if (uerrlo_reg.word0 || uerrhi_reg.word0) {
4567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4568 "1422 HBA Unrecoverable error: "
4569 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
4570 "online0_reg=0x%x, online1_reg=0x%x\n",
4571 uerrlo_reg.word0, uerrhi_reg.word0,
4572 onlnreg0, onlnreg1);
4573 }
4574 return -ENODEV;
4575 }
4576
4548 return port_error; 4577 return port_error;
4549} 4578}
4550 4579
@@ -7347,6 +7376,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
7347 /* Perform post initialization setup */ 7376 /* Perform post initialization setup */
7348 lpfc_post_init_setup(phba); 7377 lpfc_post_init_setup(phba);
7349 7378
7379 /* Check if there are static vports to be created. */
7380 lpfc_create_static_vport(phba);
7381
7350 return 0; 7382 return 0;
7351 7383
7352out_disable_intr: 7384out_disable_intr:
@@ -7636,19 +7668,17 @@ static int __devinit
7636lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 7668lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7637{ 7669{
7638 int rc; 7670 int rc;
7639 uint16_t dev_id; 7671 struct lpfc_sli_intf intf;
7640 7672
7641 if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id)) 7673 if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0))
7642 return -ENODEV; 7674 return -ENODEV;
7643 7675
7644 switch (dev_id) { 7676 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
7645 case PCI_DEVICE_ID_TIGERSHARK: 7677 (bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4))
7646 rc = lpfc_pci_probe_one_s4(pdev, pid); 7678 rc = lpfc_pci_probe_one_s4(pdev, pid);
7647 break; 7679 else
7648 default:
7649 rc = lpfc_pci_probe_one_s3(pdev, pid); 7680 rc = lpfc_pci_probe_one_s3(pdev, pid);
7650 break; 7681
7651 }
7652 return rc; 7682 return rc;
7653} 7683}
7654 7684
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 3423571dd1b3..1ab405902a18 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -52,48 +52,85 @@
52 * This routine prepares the mailbox command for dumping list of static 52 * This routine prepares the mailbox command for dumping list of static
53 * vports to be created. 53 * vports to be created.
54 **/ 54 **/
55void 55int
56lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, 56lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
57 uint16_t offset) 57 uint16_t offset)
58{ 58{
59 MAILBOX_t *mb; 59 MAILBOX_t *mb;
60 void *ctx; 60 struct lpfc_dmabuf *mp;
61 61
62 mb = &pmb->u.mb; 62 mb = &pmb->u.mb;
63 ctx = pmb->context2;
64 63
65 /* Setup to dump vport info region */ 64 /* Setup to dump vport info region */
66 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 65 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
67 mb->mbxCommand = MBX_DUMP_MEMORY; 66 mb->mbxCommand = MBX_DUMP_MEMORY;
68 mb->un.varDmp.cv = 1;
69 mb->un.varDmp.type = DMP_NV_PARAMS; 67 mb->un.varDmp.type = DMP_NV_PARAMS;
70 mb->un.varDmp.entry_index = offset; 68 mb->un.varDmp.entry_index = offset;
71 mb->un.varDmp.region_id = DMP_REGION_VPORT; 69 mb->un.varDmp.region_id = DMP_REGION_VPORT;
72 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
73 mb->un.varDmp.co = 0;
74 mb->un.varDmp.resp_offset = 0;
75 pmb->context2 = ctx;
76 mb->mbxOwner = OWN_HOST; 70 mb->mbxOwner = OWN_HOST;
77 71
78 return; 72 /* For SLI3 HBAs data is embedded in mailbox */
73 if (phba->sli_rev != LPFC_SLI_REV4) {
74 mb->un.varDmp.cv = 1;
75 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
76 return 0;
77 }
78
79 /* For SLI4 HBAs driver need to allocate memory */
80 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
81 if (mp)
82 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
83
84 if (!mp || !mp->virt) {
85 kfree(mp);
86 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
87 "2605 lpfc_dump_static_vport: memory"
88 " allocation failed\n");
89 return 1;
90 }
91 memset(mp->virt, 0, LPFC_BPL_SIZE);
92 INIT_LIST_HEAD(&mp->list);
93 /* save address for completion */
94 pmb->context2 = (uint8_t *) mp;
95 mb->un.varWords[3] = putPaddrLow(mp->phys);
96 mb->un.varWords[4] = putPaddrHigh(mp->phys);
97 mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
98
99 return 0;
100}
101
102/**
103 * lpfc_down_link - Bring down HBAs link.
104 * @phba: pointer to lpfc hba data structure.
105 * @pmb: pointer to the driver internal queue element for mailbox command.
106 *
107 * This routine prepares a mailbox command to bring down HBA link.
108 **/
109void
110lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
111{
112 MAILBOX_t *mb;
113 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
114 mb = &pmb->u.mb;
115 mb->mbxCommand = MBX_DOWN_LINK;
116 mb->mbxOwner = OWN_HOST;
79} 117}
80 118
81/** 119/**
82 * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory 120 * lpfc_dump_mem - Prepare a mailbox command for reading a region.
83 * @phba: pointer to lpfc hba data structure. 121 * @phba: pointer to lpfc hba data structure.
84 * @pmb: pointer to the driver internal queue element for mailbox command. 122 * @pmb: pointer to the driver internal queue element for mailbox command.
85 * @offset: offset for dumping VPD memory mailbox command. 123 * @offset: offset into the region.
124 * @region_id: config region id.
86 * 125 *
87 * The dump mailbox command provides a method for the device driver to obtain 126 * The dump mailbox command provides a method for the device driver to obtain
88 * various types of information from the HBA device. 127 * various types of information from the HBA device.
89 * 128 *
90 * This routine prepares the mailbox command for dumping HBA Vital Product 129 * This routine prepares the mailbox command for dumping HBA's config region.
91 * Data (VPD) memory. This mailbox command is to be used for retrieving a
92 * portion (DMP_RSP_SIZE bytes) of a HBA's VPD from the HBA at an address
93 * offset specified by the offset parameter.
94 **/ 130 **/
95void 131void
96lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset) 132lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
133 uint16_t region_id)
97{ 134{
98 MAILBOX_t *mb; 135 MAILBOX_t *mb;
99 void *ctx; 136 void *ctx;
@@ -107,7 +144,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
107 mb->un.varDmp.cv = 1; 144 mb->un.varDmp.cv = 1;
108 mb->un.varDmp.type = DMP_NV_PARAMS; 145 mb->un.varDmp.type = DMP_NV_PARAMS;
109 mb->un.varDmp.entry_index = offset; 146 mb->un.varDmp.entry_index = offset;
110 mb->un.varDmp.region_id = DMP_REGION_VPD; 147 mb->un.varDmp.region_id = region_id;
111 mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t)); 148 mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
112 mb->un.varDmp.co = 0; 149 mb->un.varDmp.co = 0;
113 mb->un.varDmp.resp_offset = 0; 150 mb->un.varDmp.resp_offset = 0;
@@ -1789,6 +1826,7 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1789 1826
1790/** 1827/**
1791 * lpfc_init_vpi - Initialize the INIT_VPI mailbox command 1828 * lpfc_init_vpi - Initialize the INIT_VPI mailbox command
1829 * @phba: pointer to the hba structure to init the VPI for.
1792 * @mbox: pointer to lpfc mbox command to initialize. 1830 * @mbox: pointer to lpfc mbox command to initialize.
1793 * @vpi: VPI to be initialized. 1831 * @vpi: VPI to be initialized.
1794 * 1832 *
@@ -1799,11 +1837,14 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1799 * successful virtual NPort login. 1837 * successful virtual NPort login.
1800 **/ 1838 **/
1801void 1839void
1802lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi) 1840lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
1803{ 1841{
1804 memset(mbox, 0, sizeof(*mbox)); 1842 memset(mbox, 0, sizeof(*mbox));
1805 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI); 1843 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
1806 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi); 1844 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
1845 vpi + phba->vpi_base);
1846 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
1847 phba->pport->vfi + phba->vfi_base);
1807} 1848}
1808 1849
1809/** 1850/**
@@ -1852,7 +1893,7 @@ lpfc_dump_fcoe_param(struct lpfc_hba *phba,
1852 /* dump_fcoe_param failed to allocate memory */ 1893 /* dump_fcoe_param failed to allocate memory */
1853 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 1894 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
1854 "2569 lpfc_dump_fcoe_param: memory" 1895 "2569 lpfc_dump_fcoe_param: memory"
1855 " allocation failed \n"); 1896 " allocation failed\n");
1856 return 1; 1897 return 1;
1857 } 1898 }
1858 1899
@@ -1864,8 +1905,8 @@ lpfc_dump_fcoe_param(struct lpfc_hba *phba,
1864 1905
1865 mb->mbxCommand = MBX_DUMP_MEMORY; 1906 mb->mbxCommand = MBX_DUMP_MEMORY;
1866 mb->un.varDmp.type = DMP_NV_PARAMS; 1907 mb->un.varDmp.type = DMP_NV_PARAMS;
1867 mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM; 1908 mb->un.varDmp.region_id = DMP_REGION_23;
1868 mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE; 1909 mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
1869 mb->un.varWords[3] = putPaddrLow(mp->phys); 1910 mb->un.varWords[3] = putPaddrLow(mp->phys);
1870 mb->un.varWords[4] = putPaddrHigh(mp->phys); 1911 mb->un.varWords[4] = putPaddrHigh(mp->phys);
1871 return 0; 1912 return 0;
@@ -1938,9 +1979,7 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
1938 memset(mbox, 0, sizeof(*mbox)); 1979 memset(mbox, 0, sizeof(*mbox));
1939 resume_rpi = &mbox->u.mqe.un.resume_rpi; 1980 resume_rpi = &mbox->u.mqe.un.resume_rpi;
1940 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI); 1981 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
1941 bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi); 1982 bf_set(lpfc_resume_rpi_index, resume_rpi, ndlp->nlp_rpi);
1942 bf_set(lpfc_resume_rpi_vpi, resume_rpi, 1983 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
1943 ndlp->vport->vpi + ndlp->vport->phba->vpi_base); 1984 resume_rpi->event_tag = ndlp->phba->fc_eventTag;
1944 bf_set(lpfc_resume_rpi_vfi, resume_rpi,
1945 ndlp->vport->vfi + ndlp->vport->phba->vfi_base);
1946} 1985}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index e198c917c13e..a1b6db6016da 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -110,17 +110,28 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
110 sizeof(struct lpfc_nodelist)); 110 sizeof(struct lpfc_nodelist));
111 if (!phba->nlp_mem_pool) 111 if (!phba->nlp_mem_pool)
112 goto fail_free_mbox_pool; 112 goto fail_free_mbox_pool;
113 phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool", 113
114 if (phba->sli_rev == LPFC_SLI_REV4) {
115 phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
114 phba->pcidev, 116 phba->pcidev,
115 LPFC_HDR_BUF_SIZE, align, 0); 117 LPFC_HDR_BUF_SIZE, align, 0);
116 if (!phba->lpfc_hrb_pool) 118 if (!phba->lpfc_hrb_pool)
117 goto fail_free_nlp_mem_pool; 119 goto fail_free_nlp_mem_pool;
118 phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool", 120
121 phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
119 phba->pcidev, 122 phba->pcidev,
120 LPFC_DATA_BUF_SIZE, align, 0); 123 LPFC_DATA_BUF_SIZE, align, 0);
121 if (!phba->lpfc_drb_pool) 124 if (!phba->lpfc_drb_pool)
122 goto fail_free_hbq_pool; 125 goto fail_free_hrb_pool;
123 126 phba->lpfc_hbq_pool = NULL;
127 } else {
128 phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",
129 phba->pcidev, LPFC_BPL_SIZE, align, 0);
130 if (!phba->lpfc_hbq_pool)
131 goto fail_free_nlp_mem_pool;
132 phba->lpfc_hrb_pool = NULL;
133 phba->lpfc_drb_pool = NULL;
134 }
124 /* vpi zero is reserved for the physical port so add 1 to max */ 135 /* vpi zero is reserved for the physical port so add 1 to max */
125 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG; 136 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
126 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); 137 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
@@ -132,7 +143,7 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
132 fail_free_dbq_pool: 143 fail_free_dbq_pool:
133 pci_pool_destroy(phba->lpfc_drb_pool); 144 pci_pool_destroy(phba->lpfc_drb_pool);
134 phba->lpfc_drb_pool = NULL; 145 phba->lpfc_drb_pool = NULL;
135 fail_free_hbq_pool: 146 fail_free_hrb_pool:
136 pci_pool_destroy(phba->lpfc_hrb_pool); 147 pci_pool_destroy(phba->lpfc_hrb_pool);
137 phba->lpfc_hrb_pool = NULL; 148 phba->lpfc_hrb_pool = NULL;
138 fail_free_nlp_mem_pool: 149 fail_free_nlp_mem_pool:
@@ -176,11 +187,17 @@ lpfc_mem_free(struct lpfc_hba *phba)
176 187
177 /* Free HBQ pools */ 188 /* Free HBQ pools */
178 lpfc_sli_hbqbuf_free_all(phba); 189 lpfc_sli_hbqbuf_free_all(phba);
179 pci_pool_destroy(phba->lpfc_drb_pool); 190 if (phba->lpfc_drb_pool)
191 pci_pool_destroy(phba->lpfc_drb_pool);
180 phba->lpfc_drb_pool = NULL; 192 phba->lpfc_drb_pool = NULL;
181 pci_pool_destroy(phba->lpfc_hrb_pool); 193 if (phba->lpfc_hrb_pool)
194 pci_pool_destroy(phba->lpfc_hrb_pool);
182 phba->lpfc_hrb_pool = NULL; 195 phba->lpfc_hrb_pool = NULL;
183 196
197 if (phba->lpfc_hbq_pool)
198 pci_pool_destroy(phba->lpfc_hbq_pool);
199 phba->lpfc_hbq_pool = NULL;
200
184 /* Free NLP memory pool */ 201 /* Free NLP memory pool */
185 mempool_destroy(phba->nlp_mem_pool); 202 mempool_destroy(phba->nlp_mem_pool);
186 phba->nlp_mem_pool = NULL; 203 phba->nlp_mem_pool = NULL;
@@ -380,7 +397,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
380 if (!hbqbp) 397 if (!hbqbp)
381 return NULL; 398 return NULL;
382 399
383 hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, 400 hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
384 &hbqbp->dbuf.phys); 401 &hbqbp->dbuf.phys);
385 if (!hbqbp->dbuf.virt) { 402 if (!hbqbp->dbuf.virt) {
386 kfree(hbqbp); 403 kfree(hbqbp);
@@ -405,7 +422,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
405void 422void
406lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) 423lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
407{ 424{
408 pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); 425 pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
409 kfree(hbqbp); 426 kfree(hbqbp);
410 return; 427 return;
411} 428}
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
index 27d1a88a98fe..d655ed3eebef 100644
--- a/drivers/scsi/lpfc/lpfc_nl.h
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -177,3 +177,23 @@ struct temp_event {
177 uint32_t data; 177 uint32_t data;
178}; 178};
179 179
180/* bsg definitions */
181#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
182#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
183
184struct set_ct_event {
185 uint32_t command;
186 uint32_t ev_req_id;
187 uint32_t ev_reg_id;
188};
189
190struct get_ct_event {
191 uint32_t command;
192 uint32_t ev_reg_id;
193 uint32_t ev_req_id;
194};
195
196struct get_ct_event_reply {
197 uint32_t immed_data;
198 uint32_t type;
199};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index da59c4f0168f..61d089703806 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -2142,7 +2142,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2142 } else if (resp_info & RESID_OVER) { 2142 } else if (resp_info & RESID_OVER) {
2143 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 2143 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2144 "9028 FCP command x%x residual overrun error. " 2144 "9028 FCP command x%x residual overrun error. "
2145 "Data: x%x x%x \n", cmnd->cmnd[0], 2145 "Data: x%x x%x\n", cmnd->cmnd[0],
2146 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 2146 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
2147 host_status = DID_ERROR; 2147 host_status = DID_ERROR;
2148 2148
@@ -2843,7 +2843,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2843 dif_op_str[scsi_get_prot_op(cmnd)]); 2843 dif_op_str[scsi_get_prot_op(cmnd)]);
2844 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2844 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2845 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x " 2845 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2846 "%02x %02x %02x %02x %02x \n", 2846 "%02x %02x %02x %02x %02x\n",
2847 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], 2847 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2848 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], 2848 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2849 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], 2849 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
@@ -2871,7 +2871,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2871 dif_op_str[scsi_get_prot_op(cmnd)]); 2871 dif_op_str[scsi_get_prot_op(cmnd)]);
2872 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2872 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2873 "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x " 2873 "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2874 "%02x %02x %02x %02x %02x \n", 2874 "%02x %02x %02x %02x %02x\n",
2875 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], 2875 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2876 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], 2876 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2877 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], 2877 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
@@ -3584,6 +3584,7 @@ struct scsi_host_template lpfc_template = {
3584 .use_clustering = ENABLE_CLUSTERING, 3584 .use_clustering = ENABLE_CLUSTERING,
3585 .shost_attrs = lpfc_hba_attrs, 3585 .shost_attrs = lpfc_hba_attrs,
3586 .max_sectors = 0xFFFF, 3586 .max_sectors = 0xFFFF,
3587 .vendor_id = LPFC_NL_VENDOR_ID,
3587}; 3588};
3588 3589
3589struct scsi_host_template lpfc_vport_template = { 3590struct scsi_host_template lpfc_vport_template = {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index acc43b061ba1..43cbe336f1f8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -4139,7 +4139,7 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4139 return -EIO; 4139 return -EIO;
4140 } 4140 }
4141 data_length = mqe->un.mb_words[5]; 4141 data_length = mqe->un.mb_words[5];
4142 if (data_length > DMP_FCOEPARAM_RGN_SIZE) { 4142 if (data_length > DMP_RGN23_SIZE) {
4143 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4143 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4144 kfree(mp); 4144 kfree(mp);
4145 return -EIO; 4145 return -EIO;
@@ -4304,7 +4304,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4304 */ 4304 */
4305 if (lpfc_sli4_read_fcoe_params(phba, mboxq)) 4305 if (lpfc_sli4_read_fcoe_params(phba, mboxq))
4306 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 4306 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4307 "2570 Failed to read FCoE parameters \n"); 4307 "2570 Failed to read FCoE parameters\n");
4308 4308
4309 /* Issue READ_REV to collect vpd and FW information. */ 4309 /* Issue READ_REV to collect vpd and FW information. */
4310 vpd_size = PAGE_SIZE; 4310 vpd_size = PAGE_SIZE;
@@ -4522,12 +4522,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4522 lpfc_sli4_rb_setup(phba); 4522 lpfc_sli4_rb_setup(phba);
4523 4523
4524 /* Start the ELS watchdog timer */ 4524 /* Start the ELS watchdog timer */
4525 /* 4525 mod_timer(&vport->els_tmofunc,
4526 * The driver for SLI4 is not yet ready to process timeouts 4526 jiffies + HZ * (phba->fc_ratov * 2));
4527 * or interrupts. Once it is, the comment bars can be removed.
4528 */
4529 /* mod_timer(&vport->els_tmofunc,
4530 * jiffies + HZ * (phba->fc_ratov*2)); */
4531 4527
4532 /* Start heart beat timer */ 4528 /* Start heart beat timer */
4533 mod_timer(&phba->hb_tmofunc, 4529 mod_timer(&phba->hb_tmofunc,
@@ -4706,13 +4702,13 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
4706 4702
4707 spin_lock_irqsave(&phba->hbalock, drvr_flag); 4703 spin_lock_irqsave(&phba->hbalock, drvr_flag);
4708 if (!pmbox) { 4704 if (!pmbox) {
4705 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4709 /* processing mbox queue from intr_handler */ 4706 /* processing mbox queue from intr_handler */
4710 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 4707 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
4711 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4708 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4712 return MBX_SUCCESS; 4709 return MBX_SUCCESS;
4713 } 4710 }
4714 processing_queue = 1; 4711 processing_queue = 1;
4715 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4716 pmbox = lpfc_mbox_get(phba); 4712 pmbox = lpfc_mbox_get(phba);
4717 if (!pmbox) { 4713 if (!pmbox) {
4718 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 4714 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -5279,6 +5275,18 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5279 unsigned long iflags; 5275 unsigned long iflags;
5280 int rc; 5276 int rc;
5281 5277
5278 rc = lpfc_mbox_dev_check(phba);
5279 if (unlikely(rc)) {
5280 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5281 "(%d):2544 Mailbox command x%x (x%x) "
5282 "cannot issue Data: x%x x%x\n",
5283 mboxq->vport ? mboxq->vport->vpi : 0,
5284 mboxq->u.mb.mbxCommand,
5285 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5286 psli->sli_flag, flag);
5287 goto out_not_finished;
5288 }
5289
5282 /* Detect polling mode and jump to a handler */ 5290 /* Detect polling mode and jump to a handler */
5283 if (!phba->sli4_hba.intr_enable) { 5291 if (!phba->sli4_hba.intr_enable) {
5284 if (flag == MBX_POLL) 5292 if (flag == MBX_POLL)
@@ -5338,17 +5346,6 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5338 psli->sli_flag, flag); 5346 psli->sli_flag, flag);
5339 goto out_not_finished; 5347 goto out_not_finished;
5340 } 5348 }
5341 rc = lpfc_mbox_dev_check(phba);
5342 if (unlikely(rc)) {
5343 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5344 "(%d):2544 Mailbox command x%x (x%x) "
5345 "cannot issue Data: x%x x%x\n",
5346 mboxq->vport ? mboxq->vport->vpi : 0,
5347 mboxq->u.mb.mbxCommand,
5348 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5349 psli->sli_flag, flag);
5350 goto out_not_finished;
5351 }
5352 5349
5353 /* Put the mailbox command to the driver internal FIFO */ 5350 /* Put the mailbox command to the driver internal FIFO */
5354 psli->slistat.mbox_busy++; 5351 psli->slistat.mbox_busy++;
@@ -5817,19 +5814,21 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5817/** 5814/**
5818 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution 5815 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
5819 * @phba: Pointer to HBA context object. 5816 * @phba: Pointer to HBA context object.
5820 * @piocb: Pointer to command iocb.
5821 * 5817 *
5822 * This routine performs a round robin SCSI command to SLI4 FCP WQ index 5818 * This routine performs a round robin SCSI command to SLI4 FCP WQ index
5823 * distribution. 5819 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
5820 * held.
5824 * 5821 *
5825 * Return: index into SLI4 fast-path FCP queue index. 5822 * Return: index into SLI4 fast-path FCP queue index.
5826 **/ 5823 **/
5827static uint32_t 5824static uint32_t
5828lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) 5825lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
5829{ 5826{
5830 static uint32_t fcp_qidx; 5827 ++phba->fcp_qidx;
5828 if (phba->fcp_qidx >= phba->cfg_fcp_wq_count)
5829 phba->fcp_qidx = 0;
5831 5830
5832 return fcp_qidx++ % phba->cfg_fcp_wq_count; 5831 return phba->fcp_qidx;
5833} 5832}
5834 5833
5835/** 5834/**
@@ -6156,7 +6155,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6156 return IOCB_ERROR; 6155 return IOCB_ERROR;
6157 6156
6158 if (piocb->iocb_flag & LPFC_IO_FCP) { 6157 if (piocb->iocb_flag & LPFC_IO_FCP) {
6159 fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb); 6158 fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
6160 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe)) 6159 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
6161 return IOCB_ERROR; 6160 return IOCB_ERROR;
6162 } else { 6161 } else {
@@ -6327,7 +6326,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
6327 KERN_ERR, 6326 KERN_ERR,
6328 LOG_SLI, 6327 LOG_SLI,
6329 "0346 Ring %d handler: unexpected ASYNC_STATUS" 6328 "0346 Ring %d handler: unexpected ASYNC_STATUS"
6330 " evt_code 0x%x \n" 6329 " evt_code 0x%x\n"
6331 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 6330 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
6332 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 6331 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
6333 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 6332 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
@@ -6790,6 +6789,33 @@ lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
6790 6789
6791 6790
6792/** 6791/**
6792 * lpfc_sli_bemem_bcopy - SLI memory copy function
6793 * @srcp: Source memory pointer.
6794 * @destp: Destination memory pointer.
6795 * @cnt: Number of words required to be copied.
6796 *
6797 * This function is used for copying data between a data structure
6798 * with big endian representation to local endianness.
6799 * This function can be called with or without lock.
6800 **/
6801void
6802lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
6803{
6804 uint32_t *src = srcp;
6805 uint32_t *dest = destp;
6806 uint32_t ldata;
6807 int i;
6808
6809 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
6810 ldata = *src;
6811 ldata = be32_to_cpu(ldata);
6812 *dest = ldata;
6813 src++;
6814 dest++;
6815 }
6816}
6817
6818/**
6793 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 6819 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
6794 * @phba: Pointer to HBA context object. 6820 * @phba: Pointer to HBA context object.
6795 * @pring: Pointer to driver SLI ring object. 6821 * @pring: Pointer to driver SLI ring object.
@@ -7678,12 +7704,6 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
7678 "online0_reg=0x%x, online1_reg=0x%x\n", 7704 "online0_reg=0x%x, online1_reg=0x%x\n",
7679 uerr_sta_lo, uerr_sta_hi, 7705 uerr_sta_lo, uerr_sta_hi,
7680 onlnreg0, onlnreg1); 7706 onlnreg0, onlnreg1);
7681 /* TEMP: as the driver error recover logic is not
7682 * fully developed, we just log the error message
7683 * and the device error attention action is now
7684 * temporarily disabled.
7685 */
7686 return 0;
7687 phba->work_status[0] = uerr_sta_lo; 7707 phba->work_status[0] = uerr_sta_lo;
7688 phba->work_status[1] = uerr_sta_hi; 7708 phba->work_status[1] = uerr_sta_hi;
7689 /* Set the driver HA work bitmap */ 7709 /* Set the driver HA work bitmap */
@@ -9499,8 +9519,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
9499 eq->host_index = 0; 9519 eq->host_index = 0;
9500 eq->hba_index = 0; 9520 eq->hba_index = 0;
9501 9521
9502 if (rc != MBX_TIMEOUT) 9522 mempool_free(mbox, phba->mbox_mem_pool);
9503 mempool_free(mbox, phba->mbox_mem_pool);
9504 return status; 9523 return status;
9505} 9524}
9506 9525
@@ -9604,10 +9623,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
9604 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 9623 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9605 cq->host_index = 0; 9624 cq->host_index = 0;
9606 cq->hba_index = 0; 9625 cq->hba_index = 0;
9607out:
9608 9626
9609 if (rc != MBX_TIMEOUT) 9627out:
9610 mempool_free(mbox, phba->mbox_mem_pool); 9628 mempool_free(mbox, phba->mbox_mem_pool);
9611 return status; 9629 return status;
9612} 9630}
9613 9631
@@ -9712,8 +9730,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9712 /* link the mq onto the parent cq child list */ 9730 /* link the mq onto the parent cq child list */
9713 list_add_tail(&mq->list, &cq->child_list); 9731 list_add_tail(&mq->list, &cq->child_list);
9714out: 9732out:
9715 if (rc != MBX_TIMEOUT) 9733 mempool_free(mbox, phba->mbox_mem_pool);
9716 mempool_free(mbox, phba->mbox_mem_pool);
9717 return status; 9734 return status;
9718} 9735}
9719 9736
@@ -9795,8 +9812,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
9795 /* link the wq onto the parent cq child list */ 9812 /* link the wq onto the parent cq child list */
9796 list_add_tail(&wq->list, &cq->child_list); 9813 list_add_tail(&wq->list, &cq->child_list);
9797out: 9814out:
9798 if (rc != MBX_TIMEOUT) 9815 mempool_free(mbox, phba->mbox_mem_pool);
9799 mempool_free(mbox, phba->mbox_mem_pool);
9800 return status; 9816 return status;
9801} 9817}
9802 9818
@@ -9970,8 +9986,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
9970 list_add_tail(&drq->list, &cq->child_list); 9986 list_add_tail(&drq->list, &cq->child_list);
9971 9987
9972out: 9988out:
9973 if (rc != MBX_TIMEOUT) 9989 mempool_free(mbox, phba->mbox_mem_pool);
9974 mempool_free(mbox, phba->mbox_mem_pool);
9975 return status; 9990 return status;
9976} 9991}
9977 9992
@@ -10026,8 +10041,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
10026 10041
10027 /* Remove eq from any list */ 10042 /* Remove eq from any list */
10028 list_del_init(&eq->list); 10043 list_del_init(&eq->list);
10029 if (rc != MBX_TIMEOUT) 10044 mempool_free(mbox, eq->phba->mbox_mem_pool);
10030 mempool_free(mbox, eq->phba->mbox_mem_pool);
10031 return status; 10045 return status;
10032} 10046}
10033 10047
@@ -10080,8 +10094,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
10080 } 10094 }
10081 /* Remove cq from any list */ 10095 /* Remove cq from any list */
10082 list_del_init(&cq->list); 10096 list_del_init(&cq->list);
10083 if (rc != MBX_TIMEOUT) 10097 mempool_free(mbox, cq->phba->mbox_mem_pool);
10084 mempool_free(mbox, cq->phba->mbox_mem_pool);
10085 return status; 10098 return status;
10086} 10099}
10087 10100
@@ -10134,8 +10147,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
10134 } 10147 }
10135 /* Remove mq from any list */ 10148 /* Remove mq from any list */
10136 list_del_init(&mq->list); 10149 list_del_init(&mq->list);
10137 if (rc != MBX_TIMEOUT) 10150 mempool_free(mbox, mq->phba->mbox_mem_pool);
10138 mempool_free(mbox, mq->phba->mbox_mem_pool);
10139 return status; 10151 return status;
10140} 10152}
10141 10153
@@ -10187,8 +10199,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
10187 } 10199 }
10188 /* Remove wq from any list */ 10200 /* Remove wq from any list */
10189 list_del_init(&wq->list); 10201 list_del_init(&wq->list);
10190 if (rc != MBX_TIMEOUT) 10202 mempool_free(mbox, wq->phba->mbox_mem_pool);
10191 mempool_free(mbox, wq->phba->mbox_mem_pool);
10192 return status; 10203 return status;
10193} 10204}
10194 10205
@@ -10258,8 +10269,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10258 } 10269 }
10259 list_del_init(&hrq->list); 10270 list_del_init(&hrq->list);
10260 list_del_init(&drq->list); 10271 list_del_init(&drq->list);
10261 if (rc != MBX_TIMEOUT) 10272 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10262 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10263 return status; 10273 return status;
10264} 10274}
10265 10275
@@ -10933,6 +10943,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10933 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 10943 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
10934 if (first_iocbq) { 10944 if (first_iocbq) {
10935 /* Initialize the first IOCB. */ 10945 /* Initialize the first IOCB. */
10946 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
10936 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 10947 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
10937 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 10948 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
10938 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id); 10949 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
@@ -10945,6 +10956,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10945 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 10956 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10946 LPFC_DATA_BUF_SIZE; 10957 LPFC_DATA_BUF_SIZE;
10947 first_iocbq->iocb.un.rcvels.remoteID = sid; 10958 first_iocbq->iocb.un.rcvels.remoteID = sid;
10959 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10960 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
10948 } 10961 }
10949 iocbq = first_iocbq; 10962 iocbq = first_iocbq;
10950 /* 10963 /*
@@ -10961,6 +10974,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10961 iocbq->iocb.ulpBdeCount++; 10974 iocbq->iocb.ulpBdeCount++;
10962 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = 10975 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
10963 LPFC_DATA_BUF_SIZE; 10976 LPFC_DATA_BUF_SIZE;
10977 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10978 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
10964 } else { 10979 } else {
10965 iocbq = lpfc_sli_get_iocbq(vport->phba); 10980 iocbq = lpfc_sli_get_iocbq(vport->phba);
10966 if (!iocbq) { 10981 if (!iocbq) {
@@ -10978,6 +10993,8 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10978 iocbq->iocb.ulpBdeCount = 1; 10993 iocbq->iocb.ulpBdeCount = 1;
10979 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 10994 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10980 LPFC_DATA_BUF_SIZE; 10995 LPFC_DATA_BUF_SIZE;
10996 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10997 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
10981 iocbq->iocb.un.rcvels.remoteID = sid; 10998 iocbq->iocb.un.rcvels.remoteID = sid;
10982 list_add_tail(&iocbq->list, &first_iocbq->list); 10999 list_add_tail(&iocbq->list, &first_iocbq->list);
10983 } 11000 }
@@ -11324,7 +11341,7 @@ lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11324 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11341 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11325 if (!mboxq) 11342 if (!mboxq)
11326 return -ENOMEM; 11343 return -ENOMEM;
11327 lpfc_init_vpi(mboxq, vpi); 11344 lpfc_init_vpi(phba, mboxq, vpi);
11328 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); 11345 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
11329 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 11346 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11330 if (rc != MBX_TIMEOUT) 11347 if (rc != MBX_TIMEOUT)
@@ -11519,6 +11536,7 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11519 uint32_t alloc_len, req_len; 11536 uint32_t alloc_len, req_len;
11520 struct lpfc_mbx_read_fcf_tbl *read_fcf; 11537 struct lpfc_mbx_read_fcf_tbl *read_fcf;
11521 11538
11539 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
11522 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11540 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11523 if (!mboxq) { 11541 if (!mboxq) {
11524 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -11570,7 +11588,140 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11570 if (rc == MBX_NOT_FINISHED) { 11588 if (rc == MBX_NOT_FINISHED) {
11571 lpfc_sli4_mbox_cmd_free(phba, mboxq); 11589 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11572 error = -EIO; 11590 error = -EIO;
11573 } else 11591 } else {
11592 spin_lock_irq(&phba->hbalock);
11593 phba->hba_flag |= FCF_DISC_INPROGRESS;
11594 spin_unlock_irq(&phba->hbalock);
11574 error = 0; 11595 error = 0;
11596 }
11575 return error; 11597 return error;
11576} 11598}
11599
11600/**
11601 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
11602 * @phba: pointer to lpfc hba data structure.
11603 *
11604 * This function read region 23 and parse TLV for port status to
11605 * decide if the user disaled the port. If the TLV indicates the
11606 * port is disabled, the hba_flag is set accordingly.
11607 **/
11608void
11609lpfc_sli_read_link_ste(struct lpfc_hba *phba)
11610{
11611 LPFC_MBOXQ_t *pmb = NULL;
11612 MAILBOX_t *mb;
11613 uint8_t *rgn23_data = NULL;
11614 uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset;
11615 int rc;
11616
11617 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11618 if (!pmb) {
11619 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11620 "2600 lpfc_sli_read_serdes_param failed to"
11621 " allocate mailbox memory\n");
11622 goto out;
11623 }
11624 mb = &pmb->u.mb;
11625
11626 /* Get adapter Region 23 data */
11627 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
11628 if (!rgn23_data)
11629 goto out;
11630
11631 do {
11632 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
11633 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
11634
11635 if (rc != MBX_SUCCESS) {
11636 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11637 "2601 lpfc_sli_read_link_ste failed to"
11638 " read config region 23 rc 0x%x Status 0x%x\n",
11639 rc, mb->mbxStatus);
11640 mb->un.varDmp.word_cnt = 0;
11641 }
11642 /*
11643 * dump mem may return a zero when finished or we got a
11644 * mailbox error, either way we are done.
11645 */
11646 if (mb->un.varDmp.word_cnt == 0)
11647 break;
11648 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
11649 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
11650
11651 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
11652 rgn23_data + offset,
11653 mb->un.varDmp.word_cnt);
11654 offset += mb->un.varDmp.word_cnt;
11655 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
11656
11657 data_size = offset;
11658 offset = 0;
11659
11660 if (!data_size)
11661 goto out;
11662
11663 /* Check the region signature first */
11664 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
11665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11666 "2619 Config region 23 has bad signature\n");
11667 goto out;
11668 }
11669 offset += 4;
11670
11671 /* Check the data structure version */
11672 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
11673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11674 "2620 Config region 23 has bad version\n");
11675 goto out;
11676 }
11677 offset += 4;
11678
11679 /* Parse TLV entries in the region */
11680 while (offset < data_size) {
11681 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
11682 break;
11683 /*
11684 * If the TLV is not driver specific TLV or driver id is
11685 * not linux driver id, skip the record.
11686 */
11687 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
11688 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
11689 (rgn23_data[offset + 3] != 0)) {
11690 offset += rgn23_data[offset + 1] * 4 + 4;
11691 continue;
11692 }
11693
11694 /* Driver found a driver specific TLV in the config region */
11695 sub_tlv_len = rgn23_data[offset + 1] * 4;
11696 offset += 4;
11697 tlv_offset = 0;
11698
11699 /*
11700 * Search for configured port state sub-TLV.
11701 */
11702 while ((offset < data_size) &&
11703 (tlv_offset < sub_tlv_len)) {
11704 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
11705 offset += 4;
11706 tlv_offset += 4;
11707 break;
11708 }
11709 if (rgn23_data[offset] != PORT_STE_TYPE) {
11710 offset += rgn23_data[offset + 1] * 4 + 4;
11711 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
11712 continue;
11713 }
11714
11715 /* This HBA contains PORT_STE configured */
11716 if (!rgn23_data[offset + 2])
11717 phba->hba_flag |= LINK_DISABLED;
11718
11719 goto out;
11720 }
11721 }
11722out:
11723 if (pmb)
11724 mempool_free(pmb, phba->mbox_mem_pool);
11725 kfree(rgn23_data);
11726 return;
11727}
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 3b276b47d18f..b5f4ba1a5c27 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -132,6 +132,7 @@ struct lpfc_sli4_link {
132 132
133struct lpfc_fcf { 133struct lpfc_fcf {
134 uint8_t fabric_name[8]; 134 uint8_t fabric_name[8];
135 uint8_t switch_name[8];
135 uint8_t mac_addr[6]; 136 uint8_t mac_addr[6];
136 uint16_t fcf_indx; 137 uint16_t fcf_indx;
137 uint16_t fcfi; 138 uint16_t fcfi;
@@ -150,6 +151,10 @@ struct lpfc_fcf {
150#define LPFC_REGION23_SIGNATURE "RG23" 151#define LPFC_REGION23_SIGNATURE "RG23"
151#define LPFC_REGION23_VERSION 1 152#define LPFC_REGION23_VERSION 1
152#define LPFC_REGION23_LAST_REC 0xff 153#define LPFC_REGION23_LAST_REC 0xff
154#define DRIVER_SPECIFIC_TYPE 0xA2
155#define LINUX_DRIVER_ID 0x20
156#define PORT_STE_TYPE 0x1
157
153struct lpfc_fip_param_hdr { 158struct lpfc_fip_param_hdr {
154 uint8_t type; 159 uint8_t type;
155#define FCOE_PARAM_TYPE 0xA0 160#define FCOE_PARAM_TYPE 0xA0
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 41094e02304b..9ae20af4bdb7 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.3" 21#define LPFC_DRIVER_VERSION "8.3.4"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index e0b49922193e..606efa767548 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -313,22 +313,6 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
313 goto error_out; 313 goto error_out;
314 } 314 }
315 315
316 /*
317 * In SLI4, the vpi must be activated before it can be used
318 * by the port.
319 */
320 if (phba->sli_rev == LPFC_SLI_REV4) {
321 rc = lpfc_sli4_init_vpi(phba, vpi);
322 if (rc) {
323 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
324 "1838 Failed to INIT_VPI on vpi %d "
325 "status %d\n", vpi, rc);
326 rc = VPORT_NORESOURCES;
327 lpfc_free_vpi(phba, vpi);
328 goto error_out;
329 }
330 }
331
332 /* Assign an unused board number */ 316 /* Assign an unused board number */
333 if ((instance = lpfc_get_instance()) < 0) { 317 if ((instance = lpfc_get_instance()) < 0) {
334 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 318 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
@@ -367,12 +351,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
367 goto error_out; 351 goto error_out;
368 } 352 }
369 353
370 memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8); 354 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
371 memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8); 355 u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
372 if (fc_vport->node_name != 0)
373 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
374 if (fc_vport->port_name != 0)
375 u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
376 356
377 memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8); 357 memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
378 memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8); 358 memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
@@ -404,7 +384,34 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
404 *(struct lpfc_vport **)fc_vport->dd_data = vport; 384 *(struct lpfc_vport **)fc_vport->dd_data = vport;
405 vport->fc_vport = fc_vport; 385 vport->fc_vport = fc_vport;
406 386
387 /*
388 * In SLI4, the vpi must be activated before it can be used
389 * by the port.
390 */
391 if ((phba->sli_rev == LPFC_SLI_REV4) &&
392 (pport->vfi_state & LPFC_VFI_REGISTERED)) {
393 rc = lpfc_sli4_init_vpi(phba, vpi);
394 if (rc) {
395 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
396 "1838 Failed to INIT_VPI on vpi %d "
397 "status %d\n", vpi, rc);
398 rc = VPORT_NORESOURCES;
399 lpfc_free_vpi(phba, vpi);
400 goto error_out;
401 }
402 } else if (phba->sli_rev == LPFC_SLI_REV4) {
403 /*
404 * Driver cannot INIT_VPI now. Set the flags to
405 * init_vpi when reg_vfi complete.
406 */
407 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
408 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
409 rc = VPORT_OK;
410 goto out;
411 }
412
407 if ((phba->link_state < LPFC_LINK_UP) || 413 if ((phba->link_state < LPFC_LINK_UP) ||
414 (pport->port_state < LPFC_FABRIC_CFG_LINK) ||
408 (phba->fc_topology == TOPOLOGY_LOOP)) { 415 (phba->fc_topology == TOPOLOGY_LOOP)) {
409 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); 416 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
410 rc = VPORT_OK; 417 rc = VPORT_OK;
@@ -661,7 +668,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
661 lpfc_printf_log(vport->phba, KERN_WARNING, 668 lpfc_printf_log(vport->phba, KERN_WARNING,
662 LOG_VPORT, 669 LOG_VPORT,
663 "1829 CT command failed to " 670 "1829 CT command failed to "
664 "delete objects on fabric. \n"); 671 "delete objects on fabric\n");
665 } 672 }
666 /* First look for the Fabric ndlp */ 673 /* First look for the Fabric ndlp */
667 ndlp = lpfc_findnode_did(vport, Fabric_DID); 674 ndlp = lpfc_findnode_did(vport, Fabric_DID);