aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Smart <James.Smart@Emulex.Com>2007-06-17 20:56:38 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2007-06-17 23:27:39 -0400
commit92d7f7b0cde3ad2260e7462b40867b57efd49851 (patch)
treefadb1d8f1a817c2f85937b5e9c3b830bdecb5555
parented957684294618602b48f1950b0c9bbcb036583f (diff)
[SCSI] lpfc: NPIV: add NPIV support on top of SLI-3
NPIV support is added to the driver. It utilizes the interfaces of the fc transport for the creation and deletion of vports. Within the driver, a new Scsi_Host is created for each NPIV instance, and is paired with a new instance of a FC port. This allows N FC Port elements to share a single Adapter. Signed-off-by: James Smart <James.Smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
-rw-r--r--drivers/scsi/lpfc/Makefile5
-rw-r--r--drivers/scsi/lpfc/lpfc.h156
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c308
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h71
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c520
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c1661
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c1140
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h125
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c285
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c131
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c46
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c298
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c355
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1034
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c508
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h113
20 files changed, 5021 insertions, 1763 deletions
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index d1be465d5f55..d94c9e0212a7 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -1,7 +1,7 @@
1#/******************************************************************* 1#/*******************************************************************
2# * This file is part of the Emulex Linux Device Driver for * 2# * This file is part of the Emulex Linux Device Driver for *
3# * Fibre Channel Host Bus Adapters. * 3# * Fibre Channel Host Bus Adapters. *
4# * Copyright (C) 2004-2005 Emulex. All rights reserved. * 4# * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5# * EMULEX and SLI are trademarks of Emulex. * 5# * EMULEX and SLI are trademarks of Emulex. *
6# * www.emulex.com * 6# * www.emulex.com *
7# * * 7# * *
@@ -27,4 +27,5 @@ endif
27obj-$(CONFIG_SCSI_LPFC) := lpfc.o 27obj-$(CONFIG_SCSI_LPFC) := lpfc.o
28 28
29lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \ 29lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
30 lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o 30 lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
31 lpfc_vport.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 74f4d18842cc..4b9019d7d508 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -34,6 +34,17 @@ struct lpfc_sli2_slim;
34#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 34#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
35#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ 35#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
36 36
37/*
38 * Following time intervals are used of adjusting SCSI device
39 * queue depths when there are driver resource error or Firmware
40 * resource error.
41 */
42#define QUEUE_RAMP_DOWN_INTERVAL (1 * HZ) /* 1 Second */
43#define QUEUE_RAMP_UP_INTERVAL (300 * HZ) /* 5 minutes */
44
45/* Number of exchanges reserved for discovery to complete */
46#define LPFC_DISC_IOCB_BUFF_COUNT 20
47
37/* Define macros for 64 bit support */ 48/* Define macros for 64 bit support */
38#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr))) 49#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
39#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32))) 50#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
@@ -97,6 +108,29 @@ typedef struct lpfc_vpd {
97 uint32_t sli2FwRev; 108 uint32_t sli2FwRev;
98 uint8_t sli2FwName[16]; 109 uint8_t sli2FwName[16];
99 } rev; 110 } rev;
111 struct {
112#ifdef __BIG_ENDIAN_BITFIELD
113 uint32_t rsvd2 :24; /* Reserved */
114 uint32_t cmv : 1; /* Configure Max VPIs */
115 uint32_t ccrp : 1; /* Config Command Ring Polling */
116 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
117 uint32_t chbs : 1; /* Cofigure Host Backing store */
118 uint32_t cinb : 1; /* Enable Interrupt Notification Block */
119 uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
120 uint32_t cmx : 1; /* Configure Max XRIs */
121 uint32_t cmr : 1; /* Configure Max RPIs */
122#else /* __LITTLE_ENDIAN */
123 uint32_t cmr : 1; /* Configure Max RPIs */
124 uint32_t cmx : 1; /* Configure Max XRIs */
125 uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
126 uint32_t cinb : 1; /* Enable Interrupt Notification Block */
127 uint32_t chbs : 1; /* Cofigure Host Backing store */
128 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
129 uint32_t ccrp : 1; /* Config Command Ring Polling */
130 uint32_t cmv : 1; /* Configure Max VPIs */
131 uint32_t rsvd2 :24; /* Reserved */
132#endif
133 } sli3Feat;
100} lpfc_vpd_t; 134} lpfc_vpd_t;
101 135
102struct lpfc_scsi_buf; 136struct lpfc_scsi_buf;
@@ -129,6 +163,7 @@ struct lpfc_stats {
129 uint32_t elsRcvRPS; 163 uint32_t elsRcvRPS;
130 uint32_t elsRcvRPL; 164 uint32_t elsRcvRPL;
131 uint32_t elsXmitFLOGI; 165 uint32_t elsXmitFLOGI;
166 uint32_t elsXmitFDISC;
132 uint32_t elsXmitPLOGI; 167 uint32_t elsXmitPLOGI;
133 uint32_t elsXmitPRLI; 168 uint32_t elsXmitPRLI;
134 uint32_t elsXmitADISC; 169 uint32_t elsXmitADISC;
@@ -174,18 +209,21 @@ struct lpfc_sysfs_mbox {
174 209
175struct lpfc_hba; 210struct lpfc_hba;
176 211
212
177enum discovery_state { 213enum discovery_state {
178 LPFC_STATE_UNKNOWN = 0, /* HBA state is unknown */ 214 LPFC_VPORT_UNKNOWN = 0, /* vport state is unknown */
179 LPFC_LOCAL_CFG_LINK = 6, /* local NPORT Id configured */ 215 LPFC_VPORT_FAILED = 1, /* vport has failed */
180 LPFC_FLOGI = 7, /* FLOGI sent to Fabric */ 216 LPFC_LOCAL_CFG_LINK = 6, /* local NPORT Id configured */
181 LPFC_FABRIC_CFG_LINK = 8, /* Fabric assigned NPORT Id 217 LPFC_FLOGI = 7, /* FLOGI sent to Fabric */
182 * configured */ 218 LPFC_FDISC = 8, /* FDISC sent for vport */
183 LPFC_NS_REG = 9, /* Register with NameServer */ 219 LPFC_FABRIC_CFG_LINK = 9, /* Fabric assigned NPORT Id
184 LPFC_NS_QRY = 10, /* Query NameServer for NPort ID list */ 220 * configured */
185 LPFC_BUILD_DISC_LIST = 11, /* Build ADISC and PLOGI lists for 221 LPFC_NS_REG = 10, /* Register with NameServer */
186 * device authentication / discovery */ 222 LPFC_NS_QRY = 11, /* Query NameServer for NPort ID list */
187 LPFC_DISC_AUTH = 12, /* Processing ADISC list */ 223 LPFC_BUILD_DISC_LIST = 12, /* Build ADISC and PLOGI lists for
188 LPFC_VPORT_READY = 32, 224 * device authentication / discovery */
225 LPFC_DISC_AUTH = 13, /* Processing ADISC list */
226 LPFC_VPORT_READY = 32,
189}; 227};
190 228
191enum hba_state { 229enum hba_state {
@@ -195,8 +233,9 @@ enum hba_state {
195 LPFC_INIT_MBX_CMDS = 3, /* Initialize HBA with mbox commands */ 233 LPFC_INIT_MBX_CMDS = 3, /* Initialize HBA with mbox commands */
196 LPFC_LINK_DOWN = 4, /* HBA initialized, link is down */ 234 LPFC_LINK_DOWN = 4, /* HBA initialized, link is down */
197 LPFC_LINK_UP = 5, /* Link is up - issue READ_LA */ 235 LPFC_LINK_UP = 5, /* Link is up - issue READ_LA */
198 LPFC_CLEAR_LA = 13, /* authentication cmplt - issue 236 LPFC_CLEAR_LA = 6, /* authentication cmplt - issue
199 * CLEAR_LA */ 237 * CLEAR_LA */
238 LPFC_HBA_READY = 32,
200 LPFC_HBA_ERROR = -1 239 LPFC_HBA_ERROR = -1
201}; 240};
202 241
@@ -209,26 +248,30 @@ struct lpfc_vport {
209#define LPFC_FABRIC_PORT 3 248#define LPFC_FABRIC_PORT 3
210 enum discovery_state port_state; 249 enum discovery_state port_state;
211 250
251 uint16_t vpi;
212 252
213 uint32_t fc_flag; /* FC flags */ 253 uint32_t fc_flag; /* FC flags */
214/* Several of these flags are HBA centric and should be moved to 254/* Several of these flags are HBA centric and should be moved to
215 * phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP) 255 * phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP)
216 */ 256 */
217#define FC_PT2PT 0x1 /* pt2pt with no fabric */ 257#define FC_PT2PT 0x1 /* pt2pt with no fabric */
218#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */ 258#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */
219#define FC_DISC_TMO 0x4 /* Discovery timer running */ 259#define FC_DISC_TMO 0x4 /* Discovery timer running */
220#define FC_PUBLIC_LOOP 0x8 /* Public loop */ 260#define FC_PUBLIC_LOOP 0x8 /* Public loop */
221#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */ 261#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */
222#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */ 262#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */
223#define FC_NLP_MORE 0x40 /* More node to process in node tbl */ 263#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
224#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */ 264#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
225#define FC_FABRIC 0x100 /* We are fabric attached */ 265#define FC_FABRIC 0x100 /* We are fabric attached */
226#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */ 266#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
227#define FC_RSCN_DISCOVERY 0x400 /* Authenticate all devices after RSCN*/ 267#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
228#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ 268#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
229#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ 269#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
230#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */ 270#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
231#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */ 271#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
272#define FC_RFF_NOT_SUPPORTED 0x40000 /* RFF_ID was rejected by switch */
273#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
274#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
232 275
233 struct list_head fc_nodes; 276 struct list_head fc_nodes;
234 277
@@ -269,6 +312,9 @@ struct lpfc_vport {
269#define WORKER_ELS_TMO 0x2 /* ELS timeout */ 312#define WORKER_ELS_TMO 0x2 /* ELS timeout */
270#define WORKER_MBOX_TMO 0x4 /* MBOX timeout */ 313#define WORKER_MBOX_TMO 0x4 /* MBOX timeout */
271#define WORKER_FDMI_TMO 0x8 /* FDMI timeout */ 314#define WORKER_FDMI_TMO 0x8 /* FDMI timeout */
315#define WORKER_FABRIC_BLOCK_TMO 0x10 /* fabric block timout */
316#define WORKER_RAMP_DOWN_QUEUE 0x20 /* Decrease Q depth */
317#define WORKER_RAMP_UP_QUEUE 0x40 /* Increase Q depth */
272 318
273 struct timer_list fc_fdmitmo; 319 struct timer_list fc_fdmitmo;
274 struct timer_list els_tmofunc; 320 struct timer_list els_tmofunc;
@@ -278,10 +324,10 @@ struct lpfc_vport {
278 uint8_t load_flag; 324 uint8_t load_flag;
279#define FC_LOADING 0x1 /* HBA in process of loading drvr */ 325#define FC_LOADING 0x1 /* HBA in process of loading drvr */
280#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */ 326#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
281 327 char *vname; /* Application assigned name */
328 struct fc_vport *fc_vport;
282}; 329};
283 330
284
285struct hbq_s { 331struct hbq_s {
286 uint16_t entry_count; /* Current number of HBQ slots */ 332 uint16_t entry_count; /* Current number of HBQ slots */
287 uint32_t next_hbqPutIdx; /* Index to next HBQ slot to use */ 333 uint32_t next_hbqPutIdx; /* Index to next HBQ slot to use */
@@ -289,33 +335,38 @@ struct hbq_s {
289 uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */ 335 uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */
290}; 336};
291 337
292#define MAX_HBQS 16 338#define LPFC_MAX_HBQS 16
339/* this matches the possition in the lpfc_hbq_defs array */
340#define LPFC_ELS_HBQ 0
293 341
294struct lpfc_hba { 342struct lpfc_hba {
295 struct lpfc_sli sli; 343 struct lpfc_sli sli;
296 uint32_t sli_rev; /* SLI2 or SLI3 */ 344 uint32_t sli_rev; /* SLI2 or SLI3 */
297 uint32_t sli3_options; /* Mask of enabled SLI3 options */ 345 uint32_t sli3_options; /* Mask of enabled SLI3 options */
298#define LPFC_SLI3_ENABLED 0x01 346#define LPFC_SLI3_ENABLED 0x01
299#define LPFC_SLI3_HBQ_ENABLED 0x02 347#define LPFC_SLI3_HBQ_ENABLED 0x02
300#define LPFC_SLI3_INB_ENABLED 0x04 348#define LPFC_SLI3_NPIV_ENABLED 0x04
349#define LPFC_SLI3_VPORT_TEARDOWN 0x08
301 uint32_t iocb_cmd_size; 350 uint32_t iocb_cmd_size;
302 uint32_t iocb_rsp_size; 351 uint32_t iocb_rsp_size;
303 352
304 enum hba_state link_state; 353 enum hba_state link_state;
305 uint32_t link_flag; /* link state flags */ 354 uint32_t link_flag; /* link state flags */
306#define LS_LOOPBACK_MODE 0x40000 /* NPort is in Loopback mode */ 355#define LS_LOOPBACK_MODE 0x1 /* NPort is in Loopback mode */
307 /* This flag is set while issuing */ 356 /* This flag is set while issuing */
308 /* INIT_LINK mailbox command */ 357 /* INIT_LINK mailbox command */
309#define LS_IGNORE_ERATT 0x80000 /* intr handler should ignore ERATT */ 358#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
359#define LS_IGNORE_ERATT 0x3 /* intr handler should ignore ERATT */
310 360
311 struct lpfc_sli2_slim *slim2p; 361 struct lpfc_sli2_slim *slim2p;
312 struct lpfc_dmabuf hbqslimp; 362 struct lpfc_dmabuf hbqslimp;
313 363
314 dma_addr_t slim2p_mapping; 364 dma_addr_t slim2p_mapping;
315 365
316
317 uint16_t pci_cfg_value; 366 uint16_t pci_cfg_value;
318 367
368 uint8_t work_found;
369#define LPFC_MAX_WORKER_ITERATION 4
319 370
320 uint8_t fc_linkspeed; /* Link speed after last READ_LA */ 371 uint8_t fc_linkspeed; /* Link speed after last READ_LA */
321 372
@@ -325,7 +376,7 @@ struct lpfc_hba {
325 struct timer_list fc_estabtmo; /* link establishment timer */ 376 struct timer_list fc_estabtmo; /* link establishment timer */
326 /* These fields used to be binfo */ 377 /* These fields used to be binfo */
327 uint32_t fc_pref_DID; /* preferred D_ID */ 378 uint32_t fc_pref_DID; /* preferred D_ID */
328 uint8_t fc_pref_ALPA; /* preferred AL_PA */ 379 uint8_t fc_pref_ALPA; /* preferred AL_PA */
329 uint32_t fc_edtov; /* E_D_TOV timer value */ 380 uint32_t fc_edtov; /* E_D_TOV timer value */
330 uint32_t fc_arbtov; /* ARB_TOV timer value */ 381 uint32_t fc_arbtov; /* ARB_TOV timer value */
331 uint32_t fc_ratov; /* R_A_TOV timer value */ 382 uint32_t fc_ratov; /* R_A_TOV timer value */
@@ -355,6 +406,8 @@ struct lpfc_hba {
355 uint32_t cfg_nodev_tmo; 406 uint32_t cfg_nodev_tmo;
356 uint32_t cfg_devloss_tmo; 407 uint32_t cfg_devloss_tmo;
357 uint32_t cfg_hba_queue_depth; 408 uint32_t cfg_hba_queue_depth;
409 uint32_t cfg_peer_port_login;
410 uint32_t cfg_vport_restrict_login;
358 uint32_t cfg_fcp_class; 411 uint32_t cfg_fcp_class;
359 uint32_t cfg_use_adisc; 412 uint32_t cfg_use_adisc;
360 uint32_t cfg_ack0; 413 uint32_t cfg_ack0;
@@ -391,11 +444,9 @@ struct lpfc_hba {
391 wait_queue_head_t *work_wait; 444 wait_queue_head_t *work_wait;
392 struct task_struct *worker_thread; 445 struct task_struct *worker_thread;
393 446
394 struct hbq_dmabuf *hbq_buffer_pool; 447 struct list_head hbq_buffer_list;
395 uint32_t hbq_buffer_count;
396 uint32_t hbq_buff_count; /* Current hbq buffers */
397 uint32_t hbq_count; /* Count of configured HBQs */ 448 uint32_t hbq_count; /* Count of configured HBQs */
398 struct hbq_s hbqs[MAX_HBQS]; /* local copy of hbq indicies */ 449 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
399 450
400 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ 451 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
401 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ 452 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
@@ -413,7 +464,7 @@ struct lpfc_hba {
413 464
414 struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */ 465 struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */
415 uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */ 466 uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */
416 uint32_t __iomem *hbq_get; /* Address in SLIM to HBQ get ptrs */ 467 uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
417 468
418 int brd_no; /* FC board number */ 469 int brd_no; /* FC board number */
419 470
@@ -464,6 +515,22 @@ struct lpfc_hba {
464 struct fc_host_statistics link_stats; 515 struct fc_host_statistics link_stats;
465 struct list_head port_list; 516 struct list_head port_list;
466 struct lpfc_vport *pport; /* physical lpfc_vport pointer */ 517 struct lpfc_vport *pport; /* physical lpfc_vport pointer */
518 uint16_t max_vpi; /* Maximum virtual nports */
519 uint16_t vpi_cnt; /* Nport count */
520#define LPFC_MAX_VPI 100 /* Max number of VPorts supported */
521 unsigned long *vpi_bmask; /* vpi allocation table */
522
523 /* Data structure used by fabric iocb scheduler */
524 struct list_head fabric_iocb_list;
525 atomic_t fabric_iocb_count;
526 struct timer_list fabric_block_timer;
527 unsigned long bit_flags;
528#define FABRIC_COMANDS_BLOCKED 0
529 atomic_t num_rsrc_err;
530 atomic_t num_cmd_success;
531 unsigned long last_rsrc_error_time;
532 unsigned long last_ramp_down_time;
533 unsigned long last_ramp_up_time;
467}; 534};
468 535
469static inline struct Scsi_Host * 536static inline struct Scsi_Host *
@@ -485,10 +552,9 @@ static inline int
485lpfc_is_link_up(struct lpfc_hba *phba) 552lpfc_is_link_up(struct lpfc_hba *phba)
486{ 553{
487 return phba->link_state == LPFC_LINK_UP || 554 return phba->link_state == LPFC_LINK_UP ||
488 phba->link_state == LPFC_CLEAR_LA; 555 phba->link_state == LPFC_CLEAR_LA ||
556 phba->link_state == LPFC_HBA_READY;
489} 557}
490 558
491
492
493#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ 559#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
494 560
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index b8adff8cea6a..5cb7924fe3d7 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -39,6 +39,7 @@
39#include "lpfc_version.h" 39#include "lpfc_version.h"
40#include "lpfc_compat.h" 40#include "lpfc_compat.h"
41#include "lpfc_crtn.h" 41#include "lpfc_crtn.h"
42#include "lpfc_vport.h"
42 43
43#define LPFC_DEF_DEVLOSS_TMO 30 44#define LPFC_DEF_DEVLOSS_TMO 30
44#define LPFC_MIN_DEVLOSS_TMO 1 45#define LPFC_MIN_DEVLOSS_TMO 1
@@ -139,7 +140,7 @@ lpfc_fwrev_show(struct class_device *cdev, char *buf)
139 char fwrev[32]; 140 char fwrev[32];
140 141
141 lpfc_decode_firmware_rev(phba, fwrev, 1); 142 lpfc_decode_firmware_rev(phba, fwrev, 1);
142 return snprintf(buf, PAGE_SIZE, "%s\n",fwrev); 143 return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev);
143} 144}
144 145
145static ssize_t 146static ssize_t
@@ -178,10 +179,11 @@ lpfc_state_show(struct class_device *cdev, char *buf)
178 case LPFC_INIT_MBX_CMDS: 179 case LPFC_INIT_MBX_CMDS:
179 case LPFC_LINK_DOWN: 180 case LPFC_LINK_DOWN:
180 case LPFC_HBA_ERROR: 181 case LPFC_HBA_ERROR:
181 len += snprintf(buf + len, PAGE_SIZE-len, "Link Down"); 182 len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n");
182 break; 183 break;
183 case LPFC_LINK_UP: 184 case LPFC_LINK_UP:
184 case LPFC_CLEAR_LA: 185 case LPFC_CLEAR_LA:
186 case LPFC_HBA_READY:
185 len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - \n"); 187 len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - \n");
186 188
187 switch (vport->port_state) { 189 switch (vport->port_state) {
@@ -190,8 +192,9 @@ lpfc_state_show(struct class_device *cdev, char *buf)
190 break; 192 break;
191 case LPFC_LOCAL_CFG_LINK: 193 case LPFC_LOCAL_CFG_LINK:
192 len += snprintf(buf + len, PAGE_SIZE-len, 194 len += snprintf(buf + len, PAGE_SIZE-len,
193 "configuring\n"); 195 "Configuring Link\n");
194 break; 196 break;
197 case LPFC_FDISC:
195 case LPFC_FLOGI: 198 case LPFC_FLOGI:
196 case LPFC_FABRIC_CFG_LINK: 199 case LPFC_FABRIC_CFG_LINK:
197 case LPFC_NS_REG: 200 case LPFC_NS_REG:
@@ -205,7 +208,11 @@ lpfc_state_show(struct class_device *cdev, char *buf)
205 len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n"); 208 len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n");
206 break; 209 break;
207 210
208 case LPFC_STATE_UNKNOWN: 211 case LPFC_VPORT_FAILED:
212 len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n");
213 break;
214
215 case LPFC_VPORT_UNKNOWN:
209 len += snprintf(buf + len, PAGE_SIZE - len, 216 len += snprintf(buf + len, PAGE_SIZE - len,
210 "Unknown\n"); 217 "Unknown\n");
211 break; 218 break;
@@ -433,6 +440,151 @@ lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count)
433} 440}
434 441
435static ssize_t 442static ssize_t
443lpfc_max_vpi_show(struct class_device *cdev, char *buf)
444{
445 struct Scsi_Host *shost = class_to_shost(cdev);
446 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
447 struct lpfc_hba *phba = vport->phba;
448
449 return snprintf(buf, PAGE_SIZE, "%d\n", phba->max_vpi);
450}
451
452static ssize_t
453lpfc_used_vpi_show(struct class_device *cdev, char *buf)
454{
455 struct Scsi_Host *shost = class_to_shost(cdev);
456 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
457 struct lpfc_hba *phba = vport->phba;
458
459 /* Don't count the physical port */
460 return snprintf(buf, PAGE_SIZE, "%d\n", phba->vpi_cnt-1);
461}
462
463int
464lpfc_get_hba_info(struct lpfc_hba *phba, uint32_t *mxri,
465 uint32_t *axri, uint32_t *mrpi, uint32_t *arpi)
466{
467 struct lpfc_sli *psli = &phba->sli;
468 LPFC_MBOXQ_t *pmboxq;
469 MAILBOX_t *pmb;
470 int rc = 0;
471
472 /*
473 * prevent udev from issuing mailbox commands until the port is
474 * configured.
475 */
476 if (phba->link_state < LPFC_LINK_DOWN ||
477 !phba->mbox_mem_pool ||
478 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
479 return 0;
480
481 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
482 return 0;
483
484 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
485 if (!pmboxq)
486 return 0;
487 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
488
489 pmb = &pmboxq->mb;
490 pmb->mbxCommand = MBX_READ_CONFIG;
491 pmb->mbxOwner = OWN_HOST;
492 pmboxq->context1 = NULL;
493
494 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
495 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
496 rc = MBX_NOT_FINISHED;
497 else
498 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
499
500 if (rc != MBX_SUCCESS) {
501 if (rc == MBX_TIMEOUT)
502 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
503 else
504 mempool_free(pmboxq, phba->mbox_mem_pool);
505 return 0;
506 }
507
508 if (mrpi)
509 *mrpi = pmb->un.varRdConfig.max_rpi;
510 if (arpi)
511 *arpi = pmb->un.varRdConfig.avail_rpi;
512 if (mxri)
513 *mxri = pmb->un.varRdConfig.max_xri;
514 if (axri)
515 *axri = pmb->un.varRdConfig.avail_xri;
516
517 mempool_free(pmboxq, phba->mbox_mem_pool);
518 return 1;
519}
520
521static ssize_t
522lpfc_max_rpi_show(struct class_device *cdev, char *buf)
523{
524 struct Scsi_Host *shost = class_to_shost(cdev);
525 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
526 struct lpfc_hba *phba = vport->phba;
527 uint32_t cnt;
528
529 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL))
530 return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
531 return snprintf(buf, PAGE_SIZE, "Unknown\n");
532}
533
534static ssize_t
535lpfc_used_rpi_show(struct class_device *cdev, char *buf)
536{
537 struct Scsi_Host *shost = class_to_shost(cdev);
538 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
539 struct lpfc_hba *phba = vport->phba;
540 uint32_t cnt, acnt;
541
542 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt))
543 return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
544 return snprintf(buf, PAGE_SIZE, "Unknown\n");
545}
546
547static ssize_t
548lpfc_max_xri_show(struct class_device *cdev, char *buf)
549{
550 struct Scsi_Host *shost = class_to_shost(cdev);
551 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
552 struct lpfc_hba *phba = vport->phba;
553 uint32_t cnt;
554
555 if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL))
556 return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
557 return snprintf(buf, PAGE_SIZE, "Unknown\n");
558}
559
560static ssize_t
561lpfc_used_xri_show(struct class_device *cdev, char *buf)
562{
563 struct Scsi_Host *shost = class_to_shost(cdev);
564 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
565 struct lpfc_hba *phba = vport->phba;
566 uint32_t cnt, acnt;
567
568 if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL))
569 return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
570 return snprintf(buf, PAGE_SIZE, "Unknown\n");
571}
572
573static ssize_t
574lpfc_npiv_info_show(struct class_device *cdev, char *buf)
575{
576 struct Scsi_Host *shost = class_to_shost(cdev);
577 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
578 struct lpfc_hba *phba = vport->phba;
579
580 if (!(phba->max_vpi))
581 return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
582 if (vport->port_type == LPFC_PHYSICAL_PORT)
583 return snprintf(buf, PAGE_SIZE, "NPIV Physical\n");
584 return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
585}
586
587static ssize_t
436lpfc_poll_show(struct class_device *cdev, char *buf) 588lpfc_poll_show(struct class_device *cdev, char *buf)
437{ 589{
438 struct Scsi_Host *shost = class_to_shost(cdev); 590 struct Scsi_Host *shost = class_to_shost(cdev);
@@ -640,6 +792,13 @@ static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
640static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, 792static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
641 lpfc_board_mode_show, lpfc_board_mode_store); 793 lpfc_board_mode_show, lpfc_board_mode_store);
642static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); 794static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
795static CLASS_DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
796static CLASS_DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
797static CLASS_DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
798static CLASS_DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
799static CLASS_DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
800static CLASS_DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
801static CLASS_DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
643 802
644 803
645static char *lpfc_soft_wwn_key = "C99G71SL8032A"; 804static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -829,6 +988,17 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
829static CLASS_DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR, 988static CLASS_DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
830 lpfc_poll_show, lpfc_poll_store); 989 lpfc_poll_show, lpfc_poll_store);
831 990
991int lpfc_sli_mode = 0;
992module_param(lpfc_sli_mode, int, 0);
993MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
994 " 0 - auto (SLI-3 if supported),"
995 " 2 - select SLI-2 even on SLI-3 capable HBAs,"
996 " 3 - select SLI-3");
997
998int lpfc_npiv_enable = 0;
999module_param(lpfc_npiv_enable, int, 0);
1000MODULE_PARM_DESC(lpfc_npiv_enable, "Enable NPIV functionality");
1001
832/* 1002/*
833# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear 1003# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
834# until the timer expires. Value range is [0,255]. Default value is 30. 1004# until the timer expires. Value range is [0,255]. Default value is 30.
@@ -985,6 +1155,33 @@ LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
985 "Max number of FCP commands we can queue to a lpfc HBA"); 1155 "Max number of FCP commands we can queue to a lpfc HBA");
986 1156
987/* 1157/*
1158# peer_port_login: This parameter allows/prevents logins
1159# between peer ports hosted on the same physical port.
1160# When this parameter is set 0 peer ports of same physical port
1161# are not allowed to login to each other.
1162# When this parameter is set 1 peer ports of same physical port
1163# are allowed to login to each other.
1164# Default value of this parameter is 0.
1165*/
1166LPFC_ATTR_R(peer_port_login, 0, 0, 1,
1167 "Allow peer ports on the same physical port to login to each "
1168 "other.");
1169
1170/*
1171# vport_restrict_login: This parameter allows/prevents logins
1172# between Virtual Ports and remote initiators.
1173# When this parameter is not set (0) Virtual Ports will accept PLOGIs from
1174# other initiators and will attempt to PLOGI all remote ports.
1175# When this parameter is set (1) Virtual Ports will reject PLOGIs from
1176# remote ports and will not attempt to PLOGI to other initiators.
1177# This parameter does not restrict to the physical port.
1178# This parameter does not restrict logins to Fabric resident remote ports.
1179# Default value of this parameter is 1.
1180*/
1181LPFC_ATTR_RW(vport_restrict_login, 1, 0, 1,
1182 "Restrict virtual ports login to remote initiators.");
1183
1184/*
988# Some disk devices have a "select ID" or "select Target" capability. 1185# Some disk devices have a "select ID" or "select Target" capability.
989# From a protocol standpoint "select ID" usually means select the 1186# From a protocol standpoint "select ID" usually means select the
990# Fibre channel "ALPA". In the FC-AL Profile there is an "informative 1187# Fibre channel "ALPA". In the FC-AL Profile there is an "informative
@@ -1127,6 +1324,7 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
1127LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible"); 1324LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible");
1128 1325
1129 1326
1327
1130struct class_device_attribute *lpfc_hba_attrs[] = { 1328struct class_device_attribute *lpfc_hba_attrs[] = {
1131 &class_device_attr_info, 1329 &class_device_attr_info,
1132 &class_device_attr_serialnum, 1330 &class_device_attr_serialnum,
@@ -1143,6 +1341,8 @@ struct class_device_attribute *lpfc_hba_attrs[] = {
1143 &class_device_attr_lpfc_log_verbose, 1341 &class_device_attr_lpfc_log_verbose,
1144 &class_device_attr_lpfc_lun_queue_depth, 1342 &class_device_attr_lpfc_lun_queue_depth,
1145 &class_device_attr_lpfc_hba_queue_depth, 1343 &class_device_attr_lpfc_hba_queue_depth,
1344 &class_device_attr_lpfc_peer_port_login,
1345 &class_device_attr_lpfc_vport_restrict_login,
1146 &class_device_attr_lpfc_nodev_tmo, 1346 &class_device_attr_lpfc_nodev_tmo,
1147 &class_device_attr_lpfc_devloss_tmo, 1347 &class_device_attr_lpfc_devloss_tmo,
1148 &class_device_attr_lpfc_fcp_class, 1348 &class_device_attr_lpfc_fcp_class,
@@ -1161,6 +1361,13 @@ struct class_device_attribute *lpfc_hba_attrs[] = {
1161 &class_device_attr_nport_evt_cnt, 1361 &class_device_attr_nport_evt_cnt,
1162 &class_device_attr_management_version, 1362 &class_device_attr_management_version,
1163 &class_device_attr_board_mode, 1363 &class_device_attr_board_mode,
1364 &class_device_attr_max_vpi,
1365 &class_device_attr_used_vpi,
1366 &class_device_attr_max_rpi,
1367 &class_device_attr_used_rpi,
1368 &class_device_attr_max_xri,
1369 &class_device_attr_used_xri,
1370 &class_device_attr_npiv_info,
1164 &class_device_attr_issue_reset, 1371 &class_device_attr_issue_reset,
1165 &class_device_attr_lpfc_poll, 1372 &class_device_attr_lpfc_poll,
1166 &class_device_attr_lpfc_poll_tmo, 1373 &class_device_attr_lpfc_poll_tmo,
@@ -1299,7 +1506,7 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
1299 } else { 1506 } else {
1300 if (phba->sysfs_mbox.state != SMBOX_WRITING || 1507 if (phba->sysfs_mbox.state != SMBOX_WRITING ||
1301 phba->sysfs_mbox.offset != off || 1508 phba->sysfs_mbox.offset != off ||
1302 phba->sysfs_mbox.mbox == NULL ) { 1509 phba->sysfs_mbox.mbox == NULL) {
1303 sysfs_mbox_idle(phba); 1510 sysfs_mbox_idle(phba);
1304 spin_unlock_irq(&phba->hbalock); 1511 spin_unlock_irq(&phba->hbalock);
1305 return -EAGAIN; 1512 return -EAGAIN;
@@ -1406,6 +1613,8 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
1406 return -EPERM; 1613 return -EPERM;
1407 } 1614 }
1408 1615
1616 phba->sysfs_mbox.mbox->vport = vport;
1617
1409 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { 1618 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
1410 sysfs_mbox_idle(phba); 1619 sysfs_mbox_idle(phba);
1411 spin_unlock_irq(&phba->hbalock); 1620 spin_unlock_irq(&phba->hbalock);
@@ -1480,12 +1689,12 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
1480 int error; 1689 int error;
1481 1690
1482 error = sysfs_create_bin_file(&shost->shost_classdev.kobj, 1691 error = sysfs_create_bin_file(&shost->shost_classdev.kobj,
1483 &sysfs_ctlreg_attr); 1692 &sysfs_ctlreg_attr);
1484 if (error) 1693 if (error)
1485 goto out; 1694 goto out;
1486 1695
1487 error = sysfs_create_bin_file(&shost->shost_classdev.kobj, 1696 error = sysfs_create_bin_file(&shost->shost_classdev.kobj,
1488 &sysfs_mbox_attr); 1697 &sysfs_mbox_attr);
1489 if (error) 1698 if (error)
1490 goto out_remove_ctlreg_attr; 1699 goto out_remove_ctlreg_attr;
1491 1700
@@ -1527,7 +1736,9 @@ lpfc_get_host_port_type(struct Scsi_Host *shost)
1527 1736
1528 spin_lock_irq(shost->host_lock); 1737 spin_lock_irq(shost->host_lock);
1529 1738
1530 if (lpfc_is_link_up(phba)) { 1739 if (vport->port_type == LPFC_NPIV_PORT) {
1740 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
1741 } else if (lpfc_is_link_up(phba)) {
1531 if (phba->fc_topology == TOPOLOGY_LOOP) { 1742 if (phba->fc_topology == TOPOLOGY_LOOP) {
1532 if (vport->fc_flag & FC_PUBLIC_LOOP) 1743 if (vport->fc_flag & FC_PUBLIC_LOOP)
1533 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 1744 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
@@ -1563,6 +1774,7 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
1563 break; 1774 break;
1564 case LPFC_LINK_UP: 1775 case LPFC_LINK_UP:
1565 case LPFC_CLEAR_LA: 1776 case LPFC_CLEAR_LA:
1777 case LPFC_HBA_READY:
1566 /* Links up, beyond this port_type reports state */ 1778 /* Links up, beyond this port_type reports state */
1567 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 1779 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1568 break; 1780 break;
@@ -1644,13 +1856,14 @@ lpfc_get_stats(struct Scsi_Host *shost)
1644 unsigned long seconds; 1856 unsigned long seconds;
1645 int rc = 0; 1857 int rc = 0;
1646 1858
1647 /* prevent udev from issuing mailbox commands 1859 /*
1648 * until the port is configured. 1860 * prevent udev from issuing mailbox commands until the port is
1649 */ 1861 * configured.
1862 */
1650 if (phba->link_state < LPFC_LINK_DOWN || 1863 if (phba->link_state < LPFC_LINK_DOWN ||
1651 !phba->mbox_mem_pool || 1864 !phba->mbox_mem_pool ||
1652 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) 1865 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
1653 return NULL; 1866 return NULL;
1654 1867
1655 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) 1868 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
1656 return NULL; 1869 return NULL;
@@ -1664,6 +1877,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
1664 pmb->mbxCommand = MBX_READ_STATUS; 1877 pmb->mbxCommand = MBX_READ_STATUS;
1665 pmb->mbxOwner = OWN_HOST; 1878 pmb->mbxOwner = OWN_HOST;
1666 pmboxq->context1 = NULL; 1879 pmboxq->context1 = NULL;
1880 pmboxq->vport = vport;
1667 1881
1668 if ((vport->fc_flag & FC_OFFLINE_MODE) || 1882 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1669 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 1883 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
@@ -1690,6 +1904,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
1690 pmb->mbxCommand = MBX_READ_LNK_STAT; 1904 pmb->mbxCommand = MBX_READ_LNK_STAT;
1691 pmb->mbxOwner = OWN_HOST; 1905 pmb->mbxOwner = OWN_HOST;
1692 pmboxq->context1 = NULL; 1906 pmboxq->context1 = NULL;
1907 pmboxq->vport = vport;
1693 1908
1694 if ((vport->fc_flag & FC_OFFLINE_MODE) || 1909 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1695 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 1910 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
@@ -1701,7 +1916,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
1701 if (rc == MBX_TIMEOUT) 1916 if (rc == MBX_TIMEOUT)
1702 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1917 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1703 else 1918 else
1704 mempool_free( pmboxq, phba->mbox_mem_pool); 1919 mempool_free(pmboxq, phba->mbox_mem_pool);
1705 return NULL; 1920 return NULL;
1706 } 1921 }
1707 1922
@@ -1769,6 +1984,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
1769 pmb->mbxOwner = OWN_HOST; 1984 pmb->mbxOwner = OWN_HOST;
1770 pmb->un.varWords[0] = 0x1; /* reset request */ 1985 pmb->un.varWords[0] = 0x1; /* reset request */
1771 pmboxq->context1 = NULL; 1986 pmboxq->context1 = NULL;
1987 pmboxq->vport = vport;
1772 1988
1773 if ((vport->fc_flag & FC_OFFLINE_MODE) || 1989 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1774 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 1990 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
@@ -1788,6 +2004,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
1788 pmb->mbxCommand = MBX_READ_LNK_STAT; 2004 pmb->mbxCommand = MBX_READ_LNK_STAT;
1789 pmb->mbxOwner = OWN_HOST; 2005 pmb->mbxOwner = OWN_HOST;
1790 pmboxq->context1 = NULL; 2006 pmboxq->context1 = NULL;
2007 pmboxq->vport = vport;
1791 2008
1792 if ((vport->fc_flag & FC_OFFLINE_MODE) || 2009 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1793 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 2010 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
@@ -1950,6 +2167,69 @@ struct fc_function_template lpfc_transport_functions = {
1950 .issue_fc_host_lip = lpfc_issue_lip, 2167 .issue_fc_host_lip = lpfc_issue_lip,
1951 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, 2168 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
1952 .terminate_rport_io = lpfc_terminate_rport_io, 2169 .terminate_rport_io = lpfc_terminate_rport_io,
2170
2171 .vport_create = lpfc_vport_create,
2172 .vport_delete = lpfc_vport_delete,
2173 .dd_fcvport_size = sizeof(struct lpfc_vport *),
2174};
2175
2176struct fc_function_template lpfc_vport_transport_functions = {
2177 /* fixed attributes the driver supports */
2178 .show_host_node_name = 1,
2179 .show_host_port_name = 1,
2180 .show_host_supported_classes = 1,
2181 .show_host_supported_fc4s = 1,
2182 .show_host_supported_speeds = 1,
2183 .show_host_maxframe_size = 1,
2184
2185 /* dynamic attributes the driver supports */
2186 .get_host_port_id = lpfc_get_host_port_id,
2187 .show_host_port_id = 1,
2188
2189 .get_host_port_type = lpfc_get_host_port_type,
2190 .show_host_port_type = 1,
2191
2192 .get_host_port_state = lpfc_get_host_port_state,
2193 .show_host_port_state = 1,
2194
2195 /* active_fc4s is shown but doesn't change (thus no get function) */
2196 .show_host_active_fc4s = 1,
2197
2198 .get_host_speed = lpfc_get_host_speed,
2199 .show_host_speed = 1,
2200
2201 .get_host_fabric_name = lpfc_get_host_fabric_name,
2202 .show_host_fabric_name = 1,
2203
2204 /*
2205 * The LPFC driver treats linkdown handling as target loss events
2206 * so there are no sysfs handlers for link_down_tmo.
2207 */
2208
2209 .get_fc_host_stats = lpfc_get_stats,
2210 .reset_fc_host_stats = lpfc_reset_stats,
2211
2212 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
2213 .show_rport_maxframe_size = 1,
2214 .show_rport_supported_classes = 1,
2215
2216 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
2217 .show_rport_dev_loss_tmo = 1,
2218
2219 .get_starget_port_id = lpfc_get_starget_port_id,
2220 .show_starget_port_id = 1,
2221
2222 .get_starget_node_name = lpfc_get_starget_node_name,
2223 .show_starget_node_name = 1,
2224
2225 .get_starget_port_name = lpfc_get_starget_port_name,
2226 .show_starget_port_name = 1,
2227
2228 .issue_fc_host_lip = lpfc_issue_lip,
2229 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
2230 .terminate_rport_io = lpfc_terminate_rport_io,
2231
2232 .vport_disable = lpfc_vport_disable,
1953}; 2233};
1954 2234
1955void 2235void
@@ -1972,6 +2252,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
1972 lpfc_discovery_threads_init(phba, lpfc_discovery_threads); 2252 lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
1973 lpfc_max_luns_init(phba, lpfc_max_luns); 2253 lpfc_max_luns_init(phba, lpfc_max_luns);
1974 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 2254 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
2255 lpfc_peer_port_login_init(phba, lpfc_peer_port_login);
2256 lpfc_vport_restrict_login_init(phba, lpfc_vport_restrict_login);
1975 lpfc_use_msi_init(phba, lpfc_use_msi); 2257 lpfc_use_msi_init(phba, lpfc_use_msi);
1976 lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo); 2258 lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo);
1977 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo); 2259 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 776930727058..94e788199568 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -28,15 +28,18 @@ int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
28void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 28void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
29void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport); 29void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport);
30void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); 30void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
31int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); 31int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
32void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); 32void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
33void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); 33void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
34int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *, 34int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
35 uint32_t); 35 LPFC_MBOXQ_t *, uint32_t);
36void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *); 36void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
37void lpfc_unreg_did(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *); 37void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
38void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
39void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
38void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 40void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
39 41
42void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove);
40int lpfc_linkdown(struct lpfc_hba *); 43int lpfc_linkdown(struct lpfc_hba *);
41void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 44void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
42 45
@@ -51,6 +54,10 @@ void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *);
51void lpfc_set_disctmo(struct lpfc_vport *); 54void lpfc_set_disctmo(struct lpfc_vport *);
52int lpfc_can_disctmo(struct lpfc_vport *); 55int lpfc_can_disctmo(struct lpfc_vport *);
53int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *); 56int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *);
57void lpfc_unreg_all_rpis(struct lpfc_vport *);
58void lpfc_unreg_default_rpis(struct lpfc_vport *);
59void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *);
60
54int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *, 61int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
55 struct lpfc_iocbq *, struct lpfc_nodelist *); 62 struct lpfc_iocbq *, struct lpfc_nodelist *);
56void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t); 63void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t);
@@ -60,25 +67,33 @@ struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
60void lpfc_disc_list_loopmap(struct lpfc_vport *); 67void lpfc_disc_list_loopmap(struct lpfc_vport *);
61void lpfc_disc_start(struct lpfc_vport *); 68void lpfc_disc_start(struct lpfc_vport *);
62void lpfc_disc_flush_list(struct lpfc_vport *); 69void lpfc_disc_flush_list(struct lpfc_vport *);
70void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
63void lpfc_disc_timeout(unsigned long); 71void lpfc_disc_timeout(unsigned long);
64 72
65struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); 73struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
66struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); 74struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
67 75
76void lpfc_worker_wake_up(struct lpfc_hba *);
68int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t); 77int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
69int lpfc_do_work(void *); 78int lpfc_do_work(void *);
70int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *, 79int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
71 uint32_t); 80 uint32_t);
72 81
82void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
83 struct lpfc_nodelist *);
84void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
73int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *, 85int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
74 struct serv_parm *, uint32_t); 86 struct serv_parm *, uint32_t);
75int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist * ndlp); 87int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
76int lpfc_els_abort_flogi(struct lpfc_hba *); 88int lpfc_els_abort_flogi(struct lpfc_hba *);
77int lpfc_initial_flogi(struct lpfc_vport *); 89int lpfc_initial_flogi(struct lpfc_vport *);
90int lpfc_initial_fdisc(struct lpfc_vport *);
91int lpfc_issue_els_fdisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
78int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t); 92int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t);
79int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); 93int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
80int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); 94int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
81int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); 95int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
96int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
82int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t); 97int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
83int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); 98int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
84int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *, 99int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
@@ -95,7 +110,7 @@ void lpfc_els_retry_delay_handler(struct lpfc_nodelist *);
95void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 110void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
96 struct lpfc_iocbq *); 111 struct lpfc_iocbq *);
97int lpfc_els_handle_rscn(struct lpfc_vport *); 112int lpfc_els_handle_rscn(struct lpfc_vport *);
98int lpfc_els_flush_rscn(struct lpfc_vport *); 113void lpfc_els_flush_rscn(struct lpfc_vport *);
99int lpfc_rscn_payload_check(struct lpfc_vport *, uint32_t); 114int lpfc_rscn_payload_check(struct lpfc_vport *, uint32_t);
100void lpfc_els_flush_cmd(struct lpfc_vport *); 115void lpfc_els_flush_cmd(struct lpfc_vport *);
101int lpfc_els_disc_adisc(struct lpfc_vport *); 116int lpfc_els_disc_adisc(struct lpfc_vport *);
@@ -105,7 +120,7 @@ void lpfc_els_timeout_handler(struct lpfc_vport *);
105 120
106void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 121void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
107 struct lpfc_iocbq *); 122 struct lpfc_iocbq *);
108int lpfc_ns_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); 123int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
109int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); 124int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
110void lpfc_fdmi_tmo(unsigned long); 125void lpfc_fdmi_tmo(unsigned long);
111void lpfc_fdmi_timeout_handler(struct lpfc_vport *vport); 126void lpfc_fdmi_timeout_handler(struct lpfc_vport *vport);
@@ -136,6 +151,7 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
136void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); 151void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
137void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 152void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
138LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); 153LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
154void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
139int lpfc_mbox_tmo_val(struct lpfc_hba *, int); 155int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
140 156
141void lpfc_config_hbq(struct lpfc_hba *, struct lpfc_hbq_init *, uint32_t , 157void lpfc_config_hbq(struct lpfc_hba *, struct lpfc_hbq_init *, uint32_t ,
@@ -144,6 +160,7 @@ struct lpfc_hbq_entry * lpfc_sli_next_hbq_slot(struct lpfc_hba *, uint32_t);
144 160
145int lpfc_mem_alloc(struct lpfc_hba *); 161int lpfc_mem_alloc(struct lpfc_hba *);
146void lpfc_mem_free(struct lpfc_hba *); 162void lpfc_mem_free(struct lpfc_hba *);
163void lpfc_stop_vport_timers(struct lpfc_vport *);
147 164
148void lpfc_poll_timeout(unsigned long ptr); 165void lpfc_poll_timeout(unsigned long ptr);
149void lpfc_poll_start_timer(struct lpfc_hba * phba); 166void lpfc_poll_start_timer(struct lpfc_hba * phba);
@@ -176,11 +193,10 @@ int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
176struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *, 193struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
177 struct lpfc_sli_ring *, 194 struct lpfc_sli_ring *,
178 dma_addr_t); 195 dma_addr_t);
179int lpfc_sli_hbqbuf_fill_hbq(struct lpfc_hba *); 196int lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *, uint32_t);
180void lpfc_sli_hbqbuf_free(struct lpfc_hba *, void *, dma_addr_t); 197int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t);
181void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *); 198void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *);
182struct hbq_dmabuf *lpfc_sli_hbqbuf_find(struct lpfc_hba *, uint32_t); 199struct hbq_dmabuf *lpfc_sli_hbqbuf_find(struct lpfc_hba *, uint32_t);
183void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
184int lpfc_sli_hbq_size(void); 200int lpfc_sli_hbq_size(void);
185int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *, 201int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
186 struct lpfc_iocbq *); 202 struct lpfc_iocbq *);
@@ -192,12 +208,15 @@ int lpfc_sli_abort_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
192void lpfc_mbox_timeout(unsigned long); 208void lpfc_mbox_timeout(unsigned long);
193void lpfc_mbox_timeout_handler(struct lpfc_hba *); 209void lpfc_mbox_timeout_handler(struct lpfc_hba *);
194 210
211struct lpfc_nodelist *__lpfc_find_node(struct lpfc_vport *, node_filter,
212 void *);
213struct lpfc_nodelist *lpfc_find_node(struct lpfc_vport *, node_filter, void *);
195struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t); 214struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
196struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *, 215struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
197 struct lpfc_name *); 216 struct lpfc_name *);
198 217
199int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, 218int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
200 uint32_t timeout); 219 uint32_t timeout);
201 220
202int lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, 221int lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
203 struct lpfc_sli_ring * pring, 222 struct lpfc_sli_ring * pring,
@@ -210,11 +229,13 @@ void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,
210 229
211void *lpfc_hbq_alloc(struct lpfc_hba *, int, dma_addr_t *); 230void *lpfc_hbq_alloc(struct lpfc_hba *, int, dma_addr_t *);
212void lpfc_hbq_free(struct lpfc_hba *, void *, dma_addr_t); 231void lpfc_hbq_free(struct lpfc_hba *, void *, dma_addr_t);
232void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
213 233
214void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *); 234void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *);
215void __lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t); 235void __lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
216void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t); 236void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
217 237
238void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
218/* Function prototypes. */ 239/* Function prototypes. */
219const char* lpfc_info(struct Scsi_Host *); 240const char* lpfc_info(struct Scsi_Host *);
220void lpfc_scan_start(struct Scsi_Host *); 241void lpfc_scan_start(struct Scsi_Host *);
@@ -226,14 +247,34 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *);
226extern struct class_device_attribute *lpfc_hba_attrs[]; 247extern struct class_device_attribute *lpfc_hba_attrs[];
227extern struct scsi_host_template lpfc_template; 248extern struct scsi_host_template lpfc_template;
228extern struct fc_function_template lpfc_transport_functions; 249extern struct fc_function_template lpfc_transport_functions;
250extern struct fc_function_template lpfc_vport_transport_functions;
229extern int lpfc_sli_mode; 251extern int lpfc_sli_mode;
252extern int lpfc_npiv_enable;
230 253
231void lpfc_get_hba_sym_node_name(struct lpfc_hba *phba, uint8_t *symbp); 254int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
232void lpfc_terminate_rport_io(struct fc_rport *); 255void lpfc_terminate_rport_io(struct fc_rport *);
233void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport); 256void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
234 257
235struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int); 258struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct fc_vport *);
236void lpfc_post_hba_setup_vport_init(struct lpfc_vport *); 259int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable);
260void lpfc_mbx_unreg_vpi(struct lpfc_vport *);
237void destroy_port(struct lpfc_vport *); 261void destroy_port(struct lpfc_vport *);
262int lpfc_get_instance(void);
263void lpfc_host_attrib_init(struct Scsi_Host *);
264
265/* Interface exported by fabric iocb scheduler */
266int lpfc_issue_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
267void lpfc_fabric_abort_vport(struct lpfc_vport *);
268void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
269void lpfc_fabric_abort_hba(struct lpfc_hba *);
270void lpfc_fabric_abort_flogi(struct lpfc_hba *);
271void lpfc_fabric_block_timeout(unsigned long);
272void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
273void lpfc_adjust_queue_depth(struct lpfc_hba *);
274void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
275void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
238 276
239#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 277#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
278#define HBA_EVENT_RSCN 5
279#define HBA_EVENT_LINK_UP 2
280#define HBA_EVENT_LINK_DOWN 3
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index e8ed5d7ccf9f..5584f395314c 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -40,6 +40,7 @@
40#include "lpfc_logmsg.h" 40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h" 41#include "lpfc_crtn.h"
42#include "lpfc_version.h" 42#include "lpfc_version.h"
43#include "lpfc_vport.h"
43 44
44#define HBA_PORTSPEED_UNKNOWN 0 /* Unknown - transceiver 45#define HBA_PORTSPEED_UNKNOWN 0 /* Unknown - transceiver
45 * incapable of reporting */ 46 * incapable of reporting */
@@ -74,15 +75,13 @@ lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
74 __FUNCTION__, __LINE__, 75 __FUNCTION__, __LINE__,
75 piocbq, mp, size, 76 piocbq, mp, size,
76 piocbq->iocb.ulpStatus); 77 piocbq->iocb.ulpStatus);
78
77} 79}
78 80
79static void 81static void
80lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 82lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
81 struct hbq_dmabuf *sp, uint32_t size) 83 struct lpfc_dmabuf *mp, uint32_t size)
82{ 84{
83 struct lpfc_dmabuf *mp = NULL;
84
85 mp = sp ? &sp->dbuf : NULL;
86 if (!mp) { 85 if (!mp) {
87 printk(KERN_ERR "%s (%d): Unsolited CT, no " 86 printk(KERN_ERR "%s (%d): Unsolited CT, no "
88 "HBQ buffer, piocbq = %p, status = x%x\n", 87 "HBQ buffer, piocbq = %p, status = x%x\n",
@@ -102,21 +101,26 @@ void
102lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 101lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
103 struct lpfc_iocbq *piocbq) 102 struct lpfc_iocbq *piocbq)
104{ 103{
104
105 struct lpfc_dmabuf *mp = NULL; 105 struct lpfc_dmabuf *mp = NULL;
106 struct hbq_dmabuf *sp = NULL;
107 IOCB_t *icmd = &piocbq->iocb; 106 IOCB_t *icmd = &piocbq->iocb;
108 int i; 107 int i;
109 struct lpfc_iocbq *iocbq; 108 struct lpfc_iocbq *iocbq;
110 dma_addr_t paddr; 109 dma_addr_t paddr;
111 uint32_t size; 110 uint32_t size;
111 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
112 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
113
114 piocbq->context2 = NULL;
115 piocbq->context3 = NULL;
112 116
113 if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) && 117 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
114 ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) { 118 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
119 } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
120 ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
115 /* Not enough posted buffers; Try posting more buffers */ 121 /* Not enough posted buffers; Try posting more buffers */
116 phba->fc_stat.NoRcvBuf++; 122 phba->fc_stat.NoRcvBuf++;
117 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 123 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
118 lpfc_sli_hbqbuf_fill_hbq(phba);
119 else
120 lpfc_post_buffer(phba, pring, 0, 1); 124 lpfc_post_buffer(phba, pring, 0, 1);
121 return; 125 return;
122 } 126 }
@@ -139,23 +143,14 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
139 } 143 }
140 144
141 size = icmd->un.cont64[0].tus.f.bdeSize; 145 size = icmd->un.cont64[0].tus.f.bdeSize;
142 sp = lpfc_sli_hbqbuf_find(phba, icmd->un.ulpWord[3]); 146 lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf1, size);
143 if (sp) 147 lpfc_in_buf_free(phba, bdeBuf1);
144 phba->hbq_buff_count--;
145 lpfc_ct_ignore_hbq_buffer(phba, iocbq, sp, size);
146 lpfc_sli_free_hbq(phba, sp);
147 if (icmd->ulpBdeCount == 2) { 148 if (icmd->ulpBdeCount == 2) {
148 sp = lpfc_sli_hbqbuf_find(phba, 149 lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf2,
149 icmd->un.ulpWord[15]);
150 if (sp)
151 phba->hbq_buff_count--;
152 lpfc_ct_ignore_hbq_buffer(phba, iocbq, sp,
153 size); 150 size);
154 lpfc_sli_free_hbq(phba, sp); 151 lpfc_in_buf_free(phba, bdeBuf2);
155 } 152 }
156
157 } 153 }
158 lpfc_sli_hbqbuf_fill_hbq(phba);
159 } else { 154 } else {
160 struct lpfc_iocbq *next; 155 struct lpfc_iocbq *next;
161 156
@@ -176,8 +171,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
176 paddr); 171 paddr);
177 size = icmd->un.cont64[i].tus.f.bdeSize; 172 size = icmd->un.cont64[i].tus.f.bdeSize;
178 lpfc_ct_unsol_buffer(phba, piocbq, mp, size); 173 lpfc_ct_unsol_buffer(phba, piocbq, mp, size);
179 lpfc_mbuf_free(phba, mp->virt, mp->phys); 174 lpfc_in_buf_free(phba, mp);
180 kfree(mp);
181 } 175 }
182 list_del(&iocbq->list); 176 list_del(&iocbq->list);
183 lpfc_sli_release_iocbq(phba, iocbq); 177 lpfc_sli_release_iocbq(phba, iocbq);
@@ -222,7 +216,8 @@ lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl,
222 216
223 INIT_LIST_HEAD(&mp->list); 217 INIT_LIST_HEAD(&mp->list);
224 218
225 if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT)) 219 if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT) ||
220 cmdcode == be16_to_cpu(SLI_CTNS_GFF_ID))
226 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); 221 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
227 else 222 else
228 mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys)); 223 mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
@@ -242,8 +237,8 @@ lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl,
242 237
243 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 238 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
244 /* build buffer ptr list for IOCB */ 239 /* build buffer ptr list for IOCB */
245 bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) ); 240 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
246 bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) ); 241 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
247 bpl->tus.f.bdeSize = (uint16_t) cnt; 242 bpl->tus.f.bdeSize = (uint16_t) cnt;
248 bpl->tus.w = le32_to_cpu(bpl->tus.w); 243 bpl->tus.w = le32_to_cpu(bpl->tus.w);
249 bpl++; 244 bpl++;
@@ -262,13 +257,14 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
262 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 257 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
263 struct lpfc_iocbq *), 258 struct lpfc_iocbq *),
264 struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry, 259 struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry,
265 uint32_t tmo) 260 uint32_t tmo, uint8_t retry)
266{ 261{
267 struct lpfc_hba *phba = vport->phba; 262 struct lpfc_hba *phba = vport->phba;
268 struct lpfc_sli *psli = &phba->sli; 263 struct lpfc_sli *psli = &phba->sli;
269 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 264 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
270 IOCB_t *icmd; 265 IOCB_t *icmd;
271 struct lpfc_iocbq *geniocb; 266 struct lpfc_iocbq *geniocb;
267 int rc;
272 268
273 /* Allocate buffer for command iocb */ 269 /* Allocate buffer for command iocb */
274 geniocb = lpfc_sli_get_iocbq(phba); 270 geniocb = lpfc_sli_get_iocbq(phba);
@@ -311,15 +307,25 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
311 icmd->ulpClass = CLASS3; 307 icmd->ulpClass = CLASS3;
312 icmd->ulpContext = ndlp->nlp_rpi; 308 icmd->ulpContext = ndlp->nlp_rpi;
313 309
310 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
311 /* For GEN_REQUEST64_CR, use the RPI */
312 icmd->ulpCt_h = 0;
313 icmd->ulpCt_l = 0;
314 }
315
314 /* Issue GEN REQ IOCB for NPORT <did> */ 316 /* Issue GEN REQ IOCB for NPORT <did> */
315 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 317 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
316 "%d:0119 Issue GEN REQ IOCB for NPORT x%x " 318 "%d (%d):0119 Issue GEN REQ IOCB to NPORT x%x "
317 "Data: x%x x%x\n", phba->brd_no, icmd->un.ulpWord[5], 319 "Data: x%x x%x\n", phba->brd_no, vport->vpi,
318 icmd->ulpIoTag, vport->port_state); 320 ndlp->nlp_DID, icmd->ulpIoTag,
321 vport->port_state);
319 geniocb->iocb_cmpl = cmpl; 322 geniocb->iocb_cmpl = cmpl;
320 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; 323 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
321 geniocb->vport = vport; 324 geniocb->vport = vport;
322 if (lpfc_sli_issue_iocb(phba, pring, geniocb, 0) == IOCB_ERROR) { 325 geniocb->retry = retry;
326 rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0);
327
328 if (rc == IOCB_ERROR) {
323 lpfc_sli_release_iocbq(phba, geniocb); 329 lpfc_sli_release_iocbq(phba, geniocb);
324 return 1; 330 return 1;
325 } 331 }
@@ -332,7 +338,7 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
332 struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp, 338 struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp,
333 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 339 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
334 struct lpfc_iocbq *), 340 struct lpfc_iocbq *),
335 uint32_t rsp_size) 341 uint32_t rsp_size, uint8_t retry)
336{ 342{
337 struct lpfc_hba *phba = vport->phba; 343 struct lpfc_hba *phba = vport->phba;
338 struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt; 344 struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt;
@@ -349,7 +355,7 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
349 return -ENOMEM; 355 return -ENOMEM;
350 356
351 status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0, 357 status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0,
352 cnt+1, 0); 358 cnt+1, 0, retry);
353 if (status) { 359 if (status) {
354 lpfc_free_ct_rsp(phba, outmp); 360 lpfc_free_ct_rsp(phba, outmp);
355 return -ENOMEM; 361 return -ENOMEM;
@@ -357,10 +363,23 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
357 return 0; 363 return 0;
358} 364}
359 365
366static struct lpfc_vport *
367lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
368
369 struct lpfc_vport *vport_curr;
370
371 list_for_each_entry(vport_curr, &phba->port_list, listentry) {
372 if ((vport_curr->fc_myDID) &&
373 (vport_curr->fc_myDID == did))
374 return vport_curr;
375 }
376
377 return NULL;
378}
379
360static int 380static int
361lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) 381lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
362{ 382{
363 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
364 struct lpfc_hba *phba = vport->phba; 383 struct lpfc_hba *phba = vport->phba;
365 struct lpfc_sli_ct_request *Response = 384 struct lpfc_sli_ct_request *Response =
366 (struct lpfc_sli_ct_request *) mp->virt; 385 (struct lpfc_sli_ct_request *) mp->virt;
@@ -372,6 +391,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
372 struct list_head head; 391 struct list_head head;
373 392
374 lpfc_set_disctmo(vport); 393 lpfc_set_disctmo(vport);
394 vport->num_disc_nodes = 0;
375 395
376 396
377 list_add_tail(&head, &mp->list); 397 list_add_tail(&head, &mp->list);
@@ -392,25 +412,64 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
392 /* Get next DID from NameServer List */ 412 /* Get next DID from NameServer List */
393 CTentry = *ctptr++; 413 CTentry = *ctptr++;
394 Did = ((be32_to_cpu(CTentry)) & Mask_DID); 414 Did = ((be32_to_cpu(CTentry)) & Mask_DID);
415
395 ndlp = NULL; 416 ndlp = NULL;
396 /* Check for rscn processing or not */ 417
397 if (Did != vport->fc_myDID) 418 /*
398 ndlp = lpfc_setup_disc_node(vport, Did); 419 * Check for rscn processing or not
399 if (ndlp) { 420 * To conserve rpi's, filter out addresses for other
400 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 421 * vports on the same physical HBAs.
401 "%d:0238 Process x%x NameServer" 422 */
402 " Rsp Data: x%x x%x x%x\n", 423 if ((Did != vport->fc_myDID) &&
403 phba->brd_no, 424 ((lpfc_find_vport_by_did(phba, Did) == NULL) ||
404 Did, ndlp->nlp_flag, 425 phba->cfg_peer_port_login)) {
405 vport->fc_flag, 426 if ((vport->port_type != LPFC_NPIV_PORT) ||
406 vport->fc_rscn_id_cnt); 427 (vport->fc_flag & FC_RFF_NOT_SUPPORTED) ||
407 } else { 428 (!phba->cfg_vport_restrict_login)) {
408 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 429 ndlp = lpfc_setup_disc_node(vport, Did);
409 "%d:0239 Skip x%x NameServer " 430 if (ndlp) {
410 "Rsp Data: x%x x%x x%x\n", 431 lpfc_printf_log(phba, KERN_INFO,
411 phba->brd_no, 432 LOG_DISCOVERY,
412 Did, Size, vport->fc_flag, 433 "%d (%d):0238 Process "
413 vport->fc_rscn_id_cnt); 434 "x%x NameServer Rsp"
435 "Data: x%x x%x x%x\n",
436 phba->brd_no,
437 vport->vpi, Did,
438 ndlp->nlp_flag,
439 vport->fc_flag,
440 vport->fc_rscn_id_cnt);
441 } else {
442 lpfc_printf_log(phba, KERN_INFO,
443 LOG_DISCOVERY,
444 "%d (%d):0239 Skip x%x "
445 "NameServer Rsp Data: "
446 "x%x x%x\n",
447 phba->brd_no,
448 vport->vpi, Did,
449 vport->fc_flag,
450 vport->fc_rscn_id_cnt);
451 }
452
453 } else {
454 if (!(vport->fc_flag & FC_RSCN_MODE) ||
455 (lpfc_rscn_payload_check(vport, Did))) {
456 if (lpfc_ns_cmd(vport,
457 SLI_CTNS_GFF_ID,
458 0, Did) == 0)
459 vport->num_disc_nodes++;
460 }
461 else {
462 lpfc_printf_log(phba, KERN_INFO,
463 LOG_DISCOVERY,
464 "%d (%d):0245 Skip x%x "
465 "NameServer Rsp Data: "
466 "x%x x%x\n",
467 phba->brd_no,
468 vport->vpi, Did,
469 vport->fc_flag,
470 vport->fc_rscn_id_cnt);
471 }
472 }
414 } 473 }
415 if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY))) 474 if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY)))
416 goto nsout1; 475 goto nsout1;
@@ -422,34 +481,19 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
422 481
423nsout1: 482nsout1:
424 list_del(&head); 483 list_del(&head);
425
426 /*
427 * The driver has cycled through all Nports in the RSCN payload.
428 * Complete the handling by cleaning up and marking the
429 * current driver state.
430 */
431 if (vport->port_state == LPFC_VPORT_READY) {
432 lpfc_els_flush_rscn(vport);
433 spin_lock_irq(shost->host_lock);
434 vport->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */
435 spin_unlock_irq(shost->host_lock);
436 }
437 return 0; 484 return 0;
438} 485}
439 486
440
441
442
443static void 487static void
444lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 488lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
445 struct lpfc_iocbq *rspiocb) 489 struct lpfc_iocbq *rspiocb)
446{ 490{
447 struct lpfc_vport *vport = cmdiocb->vport; 491 struct lpfc_vport *vport = cmdiocb->vport;
492 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
448 IOCB_t *irsp; 493 IOCB_t *irsp;
449 struct lpfc_dmabuf *bmp; 494 struct lpfc_dmabuf *bmp;
450 struct lpfc_dmabuf *inp; 495 struct lpfc_dmabuf *inp;
451 struct lpfc_dmabuf *outp; 496 struct lpfc_dmabuf *outp;
452 struct lpfc_nodelist *ndlp;
453 struct lpfc_sli_ct_request *CTrsp; 497 struct lpfc_sli_ct_request *CTrsp;
454 int rc; 498 int rc;
455 499
@@ -460,33 +504,41 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
460 outp = (struct lpfc_dmabuf *) cmdiocb->context2; 504 outp = (struct lpfc_dmabuf *) cmdiocb->context2;
461 bmp = (struct lpfc_dmabuf *) cmdiocb->context3; 505 bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
462 506
507 /* Don't bother processing response if vport is being torn down. */
508 if (vport->load_flag & FC_UNLOADING)
509 goto out;
510
463 irsp = &rspiocb->iocb; 511 irsp = &rspiocb->iocb;
464 if (irsp->ulpStatus) { 512 if (irsp->ulpStatus) {
465 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 513 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
466 ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) || 514 ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
467 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) 515 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)))
468 goto out; 516 goto err1;
469 517
470 /* Check for retry */ 518 /* Check for retry */
471 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { 519 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
472 vport->fc_ns_retry++; 520 vport->fc_ns_retry++;
473 /* CT command is being retried */ 521 /* CT command is being retried */
474 ndlp = lpfc_findnode_did(vport, NameServer_DID); 522 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
475 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 523 vport->fc_ns_retry, 0);
476 rc = lpfc_ns_cmd(vport, ndlp, SLI_CTNS_GID_FT); 524 if (rc == 0)
477 if (rc == 0) 525 goto out;
478 goto out; 526 }
479 } 527err1:
480 } 528 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
529 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
530 "%d (%d):0257 GID_FT Query error: 0x%x 0x%x\n",
531 phba->brd_no, vport->vpi, irsp->ulpStatus,
532 vport->fc_ns_retry);
481 } else { 533 } else {
482 /* Good status, continue checking */ 534 /* Good status, continue checking */
483 CTrsp = (struct lpfc_sli_ct_request *) outp->virt; 535 CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
484 if (CTrsp->CommandResponse.bits.CmdRsp == 536 if (CTrsp->CommandResponse.bits.CmdRsp ==
485 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { 537 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
486 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 538 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
487 "%d:0208 NameServer Rsp " 539 "%d (%d):0208 NameServer Rsp "
488 "Data: x%x\n", 540 "Data: x%x\n",
489 phba->brd_no, 541 phba->brd_no, vport->vpi,
490 vport->fc_flag); 542 vport->fc_flag);
491 lpfc_ns_rsp(vport, outp, 543 lpfc_ns_rsp(vport, outp,
492 (uint32_t) (irsp->un.genreq64.bdl.bdeSize)); 544 (uint32_t) (irsp->un.genreq64.bdl.bdeSize));
@@ -494,21 +546,19 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
494 be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { 546 be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
495 /* NameServer Rsp Error */ 547 /* NameServer Rsp Error */
496 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 548 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
497 "%d:0240 NameServer Rsp Error " 549 "%d (%d):0240 NameServer Rsp Error "
498 "Data: x%x x%x x%x x%x\n", 550 "Data: x%x x%x x%x x%x\n",
499 phba->brd_no, 551 phba->brd_no, vport->vpi,
500 CTrsp->CommandResponse.bits.CmdRsp, 552 CTrsp->CommandResponse.bits.CmdRsp,
501 (uint32_t) CTrsp->ReasonCode, 553 (uint32_t) CTrsp->ReasonCode,
502 (uint32_t) CTrsp->Explanation, 554 (uint32_t) CTrsp->Explanation,
503 vport->fc_flag); 555 vport->fc_flag);
504 } else { 556 } else {
505 /* NameServer Rsp Error */ 557 /* NameServer Rsp Error */
506 lpfc_printf_log(phba, 558 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
507 KERN_INFO, 559 "%d (%d):0241 NameServer Rsp Error "
508 LOG_DISCOVERY,
509 "%d:0241 NameServer Rsp Error "
510 "Data: x%x x%x x%x x%x\n", 560 "Data: x%x x%x x%x x%x\n",
511 phba->brd_no, 561 phba->brd_no, vport->vpi,
512 CTrsp->CommandResponse.bits.CmdRsp, 562 CTrsp->CommandResponse.bits.CmdRsp,
513 (uint32_t) CTrsp->ReasonCode, 563 (uint32_t) CTrsp->ReasonCode,
514 (uint32_t) CTrsp->Explanation, 564 (uint32_t) CTrsp->Explanation,
@@ -516,8 +566,111 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
516 } 566 }
517 } 567 }
518 /* Link up / RSCN discovery */ 568 /* Link up / RSCN discovery */
519 lpfc_disc_start(vport); 569 if (vport->num_disc_nodes == 0) {
570 /*
571 * The driver has cycled through all Nports in the RSCN payload.
572 * Complete the handling by cleaning up and marking the
573 * current driver state.
574 */
575 if (vport->port_state >= LPFC_DISC_AUTH) {
576 if (vport->fc_flag & FC_RSCN_MODE) {
577 lpfc_els_flush_rscn(vport);
578 spin_lock_irq(shost->host_lock);
579 vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
580 spin_unlock_irq(shost->host_lock);
581 }
582 else
583 lpfc_els_flush_rscn(vport);
584 }
585
586 lpfc_disc_start(vport);
587 }
588out:
589 lpfc_free_ct_rsp(phba, outp);
590 lpfc_mbuf_free(phba, inp->virt, inp->phys);
591 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
592 kfree(inp);
593 kfree(bmp);
594 lpfc_sli_release_iocbq(phba, cmdiocb);
595 return;
596}
597
598void
599lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
600 struct lpfc_iocbq *rspiocb)
601{
602 struct lpfc_vport *vport = cmdiocb->vport;
603 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
604 IOCB_t *irsp = &rspiocb->iocb;
605 struct lpfc_dmabuf *bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
606 struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1;
607 struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2;
608 struct lpfc_sli_ct_request *CTrsp;
609 int did;
610 uint8_t fbits;
611 struct lpfc_nodelist *ndlp;
612
613 did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId;
614 did = be32_to_cpu(did);
615
616 if (irsp->ulpStatus == IOSTAT_SUCCESS) {
617 /* Good status, continue checking */
618 CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
619 fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET];
620
621 if (CTrsp->CommandResponse.bits.CmdRsp ==
622 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
623 if ((fbits & FC4_FEATURE_INIT) &&
624 !(fbits & FC4_FEATURE_TARGET)) {
625 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
626 "%d (%d):0245 Skip x%x GFF "
627 "NameServer Rsp Data: (init) "
628 "x%x x%x\n", phba->brd_no,
629 vport->vpi, did, fbits,
630 vport->fc_rscn_id_cnt);
631 goto out;
632 }
633 }
634 }
635 /* This is a target port, unregistered port, or the GFF_ID failed */
636 ndlp = lpfc_setup_disc_node(vport, did);
637 if (ndlp) {
638 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
639 "%d (%d):0242 Process x%x GFF "
640 "NameServer Rsp Data: x%x x%x x%x\n",
641 phba->brd_no, vport->vpi,
642 did, ndlp->nlp_flag, vport->fc_flag,
643 vport->fc_rscn_id_cnt);
644 } else {
645 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
646 "%d (%d):0243 Skip x%x GFF "
647 "NameServer Rsp Data: x%x x%x\n",
648 phba->brd_no, vport->vpi, did,
649 vport->fc_flag, vport->fc_rscn_id_cnt);
650 }
520out: 651out:
652 /* Link up / RSCN discovery */
653 if (vport->num_disc_nodes)
654 vport->num_disc_nodes--;
655 if (vport->num_disc_nodes == 0) {
656 /*
657 * The driver has cycled through all Nports in the RSCN payload.
658 * Complete the handling by cleaning up and marking the
659 * current driver state.
660 */
661 if (vport->port_state >= LPFC_DISC_AUTH) {
662 if (vport->fc_flag & FC_RSCN_MODE) {
663 lpfc_els_flush_rscn(vport);
664 spin_lock_irq(shost->host_lock);
665 vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
666 spin_unlock_irq(shost->host_lock);
667 }
668 else
669 lpfc_els_flush_rscn(vport);
670 }
671 lpfc_disc_start(vport);
672 }
673
521 lpfc_free_ct_rsp(phba, outp); 674 lpfc_free_ct_rsp(phba, outp);
522 lpfc_mbuf_free(phba, inp->virt, inp->phys); 675 lpfc_mbuf_free(phba, inp->virt, inp->phys);
523 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 676 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
@@ -527,15 +680,19 @@ out:
527 return; 680 return;
528} 681}
529 682
683
530static void 684static void
531lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 685lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
532 struct lpfc_iocbq *rspiocb) 686 struct lpfc_iocbq *rspiocb)
533{ 687{
688 struct lpfc_vport *vport = cmdiocb->vport;
534 struct lpfc_dmabuf *bmp; 689 struct lpfc_dmabuf *bmp;
535 struct lpfc_dmabuf *inp; 690 struct lpfc_dmabuf *inp;
536 struct lpfc_dmabuf *outp; 691 struct lpfc_dmabuf *outp;
537 IOCB_t *irsp; 692 IOCB_t *irsp;
538 struct lpfc_sli_ct_request *CTrsp; 693 struct lpfc_sli_ct_request *CTrsp;
694 int cmdcode, rc;
695 uint8_t retry;
539 696
540 /* we pass cmdiocb to state machine which needs rspiocb as well */ 697 /* we pass cmdiocb to state machine which needs rspiocb as well */
541 cmdiocb->context_un.rsp_iocb = rspiocb; 698 cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -545,16 +702,40 @@ lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
545 bmp = (struct lpfc_dmabuf *) cmdiocb->context3; 702 bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
546 irsp = &rspiocb->iocb; 703 irsp = &rspiocb->iocb;
547 704
705 cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)->
706 CommandResponse.bits.CmdRsp);
548 CTrsp = (struct lpfc_sli_ct_request *) outp->virt; 707 CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
549 708
550 /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */ 709 /* NS request completes status <ulpStatus> CmdRsp <CmdRsp> */
551 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 710 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
552 "%d:0209 RFT request completes ulpStatus x%x " 711 "%d (%d):0209 NS request %x completes "
712 "ulpStatus x%x / x%x "
553 "CmdRsp x%x, Context x%x, Tag x%x\n", 713 "CmdRsp x%x, Context x%x, Tag x%x\n",
554 phba->brd_no, irsp->ulpStatus, 714 phba->brd_no, vport->vpi,
715 cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4],
555 CTrsp->CommandResponse.bits.CmdRsp, 716 CTrsp->CommandResponse.bits.CmdRsp,
556 cmdiocb->iocb.ulpContext, cmdiocb->iocb.ulpIoTag); 717 cmdiocb->iocb.ulpContext, cmdiocb->iocb.ulpIoTag);
557 718
719 if (irsp->ulpStatus) {
720 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
721 ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
722 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)))
723 goto out;
724
725 retry = cmdiocb->retry;
726 if (retry >= LPFC_MAX_NS_RETRY)
727 goto out;
728
729 retry++;
730 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
731 "%d (%d):0216 Retrying NS cmd %x\n",
732 phba->brd_no, vport->vpi, cmdcode);
733 rc = lpfc_ns_cmd(vport, cmdcode, retry, 0);
734 if (rc == 0)
735 goto out;
736 }
737
738out:
558 lpfc_free_ct_rsp(phba, outp); 739 lpfc_free_ct_rsp(phba, outp);
559 lpfc_mbuf_free(phba, inp->virt, inp->phys); 740 lpfc_mbuf_free(phba, inp->virt, inp->phys);
560 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 741 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
@@ -573,6 +754,14 @@ lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
573} 754}
574 755
575static void 756static void
757lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
758 struct lpfc_iocbq *rspiocb)
759{
760 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
761 return;
762}
763
764static void
576lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 765lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
577 struct lpfc_iocbq *rspiocb) 766 struct lpfc_iocbq *rspiocb)
578{ 767{
@@ -581,23 +770,54 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
581} 770}
582 771
583static void 772static void
584lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 773lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
585 struct lpfc_iocbq * rspiocb) 774 struct lpfc_iocbq *rspiocb)
586{ 775{
776 IOCB_t *irsp = &rspiocb->iocb;
777 struct lpfc_vport *vport = cmdiocb->vport;
778
779 if (irsp->ulpStatus != IOSTAT_SUCCESS)
780 vport->fc_flag |= FC_RFF_NOT_SUPPORTED;
781
587 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); 782 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
588 return; 783 return;
589} 784}
590 785
591void 786int
592lpfc_get_hba_sym_node_name(struct lpfc_hba *phba, uint8_t *symbp) 787lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
788 size_t size)
789{
790 int n;
791 uint8_t *wwn = vport->phba->wwpn;
792
793 n = snprintf(symbol, size,
794 "Emulex PPN-%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
795 wwn[0], wwn[1], wwn[2], wwn[3],
796 wwn[4], wwn[5], wwn[6], wwn[7]);
797
798 if (vport->port_type == LPFC_PHYSICAL_PORT)
799 return n;
800
801 if (n < size)
802 n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi);
803
804 if (n < size && vport->vname)
805 n += snprintf(symbol + n, size - n, " VName-%s", vport->vname);
806 return n;
807}
808
809int
810lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
811 size_t size)
593{ 812{
594 char fwrev[16]; 813 char fwrev[16];
814 int n;
595 815
596 lpfc_decode_firmware_rev(phba, fwrev, 0); 816 lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
597 817
598 sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName, 818 n = snprintf(symbol, size, "Emulex %s FV%s DV%s",
599 fwrev, lpfc_release_version); 819 vport->phba->ModelName, fwrev, lpfc_release_version);
600 return; 820 return n;
601} 821}
602 822
603/* 823/*
@@ -608,8 +828,10 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba *phba, uint8_t *symbp)
608 * LI_CTNS_RFT_ID 828 * LI_CTNS_RFT_ID
609 */ 829 */
610int 830int
611lpfc_ns_cmd(struct lpfc_vport *vport, struct lpfc_nodelist * ndlp, int cmdcode) 831lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
832 uint8_t retry, uint32_t context)
612{ 833{
834 struct lpfc_nodelist * ndlp;
613 struct lpfc_hba *phba = vport->phba; 835 struct lpfc_hba *phba = vport->phba;
614 struct lpfc_dmabuf *mp, *bmp; 836 struct lpfc_dmabuf *mp, *bmp;
615 struct lpfc_sli_ct_request *CtReq; 837 struct lpfc_sli_ct_request *CtReq;
@@ -617,6 +839,11 @@ lpfc_ns_cmd(struct lpfc_vport *vport, struct lpfc_nodelist * ndlp, int cmdcode)
617 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 839 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
618 struct lpfc_iocbq *) = NULL; 840 struct lpfc_iocbq *) = NULL;
619 uint32_t rsp_size = 1024; 841 uint32_t rsp_size = 1024;
842 size_t size;
843
844 ndlp = lpfc_findnode_did(vport, NameServer_DID);
845 if (ndlp == NULL || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
846 return 1;
620 847
621 /* fill in BDEs for command */ 848 /* fill in BDEs for command */
622 /* Allocate buffer for command payload */ 849 /* Allocate buffer for command payload */
@@ -640,24 +867,26 @@ lpfc_ns_cmd(struct lpfc_vport *vport, struct lpfc_nodelist * ndlp, int cmdcode)
640 goto ns_cmd_free_bmp; 867 goto ns_cmd_free_bmp;
641 868
642 /* NameServer Req */ 869 /* NameServer Req */
643 lpfc_printf_log(phba, 870 lpfc_printf_log(phba, KERN_INFO ,LOG_DISCOVERY,
644 KERN_INFO, 871 "%d (%d):0236 NameServer Req Data: x%x x%x x%x\n",
645 LOG_DISCOVERY, 872 phba->brd_no, vport->vpi, cmdcode, vport->fc_flag,
646 "%d:0236 NameServer Req Data: x%x x%x x%x\n",
647 phba->brd_no, cmdcode, vport->fc_flag,
648 vport->fc_rscn_id_cnt); 873 vport->fc_rscn_id_cnt);
649 874
650 bpl = (struct ulp_bde64 *) bmp->virt; 875 bpl = (struct ulp_bde64 *) bmp->virt;
651 memset(bpl, 0, sizeof(struct ulp_bde64)); 876 memset(bpl, 0, sizeof(struct ulp_bde64));
652 bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) ); 877 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
653 bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) ); 878 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
654 bpl->tus.f.bdeFlags = 0; 879 bpl->tus.f.bdeFlags = 0;
655 if (cmdcode == SLI_CTNS_GID_FT) 880 if (cmdcode == SLI_CTNS_GID_FT)
656 bpl->tus.f.bdeSize = GID_REQUEST_SZ; 881 bpl->tus.f.bdeSize = GID_REQUEST_SZ;
882 else if (cmdcode == SLI_CTNS_GFF_ID)
883 bpl->tus.f.bdeSize = GFF_REQUEST_SZ;
657 else if (cmdcode == SLI_CTNS_RFT_ID) 884 else if (cmdcode == SLI_CTNS_RFT_ID)
658 bpl->tus.f.bdeSize = RFT_REQUEST_SZ; 885 bpl->tus.f.bdeSize = RFT_REQUEST_SZ;
659 else if (cmdcode == SLI_CTNS_RNN_ID) 886 else if (cmdcode == SLI_CTNS_RNN_ID)
660 bpl->tus.f.bdeSize = RNN_REQUEST_SZ; 887 bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
888 else if (cmdcode == SLI_CTNS_RSPN_ID)
889 bpl->tus.f.bdeSize = RSPN_REQUEST_SZ;
661 else if (cmdcode == SLI_CTNS_RSNN_NN) 890 else if (cmdcode == SLI_CTNS_RSNN_NN)
662 bpl->tus.f.bdeSize = RSNN_REQUEST_SZ; 891 bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
663 else if (cmdcode == SLI_CTNS_RFF_ID) 892 else if (cmdcode == SLI_CTNS_RFF_ID)
@@ -678,13 +907,20 @@ lpfc_ns_cmd(struct lpfc_vport *vport, struct lpfc_nodelist * ndlp, int cmdcode)
678 CtReq->CommandResponse.bits.CmdRsp = 907 CtReq->CommandResponse.bits.CmdRsp =
679 be16_to_cpu(SLI_CTNS_GID_FT); 908 be16_to_cpu(SLI_CTNS_GID_FT);
680 CtReq->un.gid.Fc4Type = SLI_CTPT_FCP; 909 CtReq->un.gid.Fc4Type = SLI_CTPT_FCP;
681 if (vport->port_state < LPFC_VPORT_READY) 910 if (vport->port_state < LPFC_NS_QRY)
682 vport->port_state = LPFC_NS_QRY; 911 vport->port_state = LPFC_NS_QRY;
683 lpfc_set_disctmo(vport); 912 lpfc_set_disctmo(vport);
684 cmpl = lpfc_cmpl_ct_cmd_gid_ft; 913 cmpl = lpfc_cmpl_ct_cmd_gid_ft;
685 rsp_size = FC_MAX_NS_RSP; 914 rsp_size = FC_MAX_NS_RSP;
686 break; 915 break;
687 916
917 case SLI_CTNS_GFF_ID:
918 CtReq->CommandResponse.bits.CmdRsp =
919 be16_to_cpu(SLI_CTNS_GFF_ID);
920 CtReq->un.gff.PortId = be32_to_cpu(context);
921 cmpl = lpfc_cmpl_ct_cmd_gff_id;
922 break;
923
688 case SLI_CTNS_RFT_ID: 924 case SLI_CTNS_RFT_ID:
689 CtReq->CommandResponse.bits.CmdRsp = 925 CtReq->CommandResponse.bits.CmdRsp =
690 be16_to_cpu(SLI_CTNS_RFT_ID); 926 be16_to_cpu(SLI_CTNS_RFT_ID);
@@ -693,17 +929,6 @@ lpfc_ns_cmd(struct lpfc_vport *vport, struct lpfc_nodelist * ndlp, int cmdcode)
693 cmpl = lpfc_cmpl_ct_cmd_rft_id; 929 cmpl = lpfc_cmpl_ct_cmd_rft_id;
694 break; 930 break;
695 931
696 case SLI_CTNS_RFF_ID:
697 CtReq->CommandResponse.bits.CmdRsp =
698 be16_to_cpu(SLI_CTNS_RFF_ID);
699 CtReq->un.rff.PortId = be32_to_cpu(vport->fc_myDID);
700 CtReq->un.rff.feature_res = 0;
701 CtReq->un.rff.feature_tgt = 0;
702 CtReq->un.rff.type_code = FC_FCP_DATA;
703 CtReq->un.rff.feature_init = 1;
704 cmpl = lpfc_cmpl_ct_cmd_rff_id;
705 break;
706
707 case SLI_CTNS_RNN_ID: 932 case SLI_CTNS_RNN_ID:
708 CtReq->CommandResponse.bits.CmdRsp = 933 CtReq->CommandResponse.bits.CmdRsp =
709 be16_to_cpu(SLI_CTNS_RNN_ID); 934 be16_to_cpu(SLI_CTNS_RNN_ID);
@@ -713,18 +938,39 @@ lpfc_ns_cmd(struct lpfc_vport *vport, struct lpfc_nodelist * ndlp, int cmdcode)
713 cmpl = lpfc_cmpl_ct_cmd_rnn_id; 938 cmpl = lpfc_cmpl_ct_cmd_rnn_id;
714 break; 939 break;
715 940
941 case SLI_CTNS_RSPN_ID:
942 CtReq->CommandResponse.bits.CmdRsp =
943 be16_to_cpu(SLI_CTNS_RSPN_ID);
944 CtReq->un.rspn.PortId = be32_to_cpu(vport->fc_myDID);
945 size = sizeof(CtReq->un.rspn.symbname);
946 CtReq->un.rspn.len =
947 lpfc_vport_symbolic_port_name(vport,
948 CtReq->un.rspn.symbname, size);
949 cmpl = lpfc_cmpl_ct_cmd_rspn_id;
950 break;
716 case SLI_CTNS_RSNN_NN: 951 case SLI_CTNS_RSNN_NN:
717 CtReq->CommandResponse.bits.CmdRsp = 952 CtReq->CommandResponse.bits.CmdRsp =
718 be16_to_cpu(SLI_CTNS_RSNN_NN); 953 be16_to_cpu(SLI_CTNS_RSNN_NN);
719 memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename, 954 memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename,
720 sizeof (struct lpfc_name)); 955 sizeof (struct lpfc_name));
721 lpfc_get_hba_sym_node_name(phba, CtReq->un.rsnn.symbname); 956 size = sizeof(CtReq->un.rsnn.symbname);
722 CtReq->un.rsnn.len = strlen(CtReq->un.rsnn.symbname); 957 CtReq->un.rsnn.len =
958 lpfc_vport_symbolic_node_name(vport,
959 CtReq->un.rsnn.symbname, size);
723 cmpl = lpfc_cmpl_ct_cmd_rsnn_nn; 960 cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
724 break; 961 break;
962 case SLI_CTNS_RFF_ID:
963 vport->fc_flag &= ~FC_RFF_NOT_SUPPORTED;
964 CtReq->CommandResponse.bits.CmdRsp =
965 be16_to_cpu(SLI_CTNS_RFF_ID);
966 CtReq->un.rff.PortId = be32_to_cpu(vport->fc_myDID);;
967 CtReq->un.rff.fbits = FC4_FEATURE_INIT;
968 CtReq->un.rff.type_code = FC_FCP_DATA;
969 cmpl = lpfc_cmpl_ct_cmd_rff_id;
970 break;
725 } 971 }
726 972
727 if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size)) 973 if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry))
728 /* On success, The cmpl function will free the buffers */ 974 /* On success, The cmpl function will free the buffers */
729 return 0; 975 return 0;
730 976
@@ -757,8 +1003,9 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
757 if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { 1003 if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
758 /* FDMI rsp failed */ 1004 /* FDMI rsp failed */
759 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1005 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
760 "%d:0220 FDMI rsp failed Data: x%x\n", 1006 "%d (%d):0220 FDMI rsp failed Data: x%x\n",
761 phba->brd_no, be16_to_cpu(fdmi_cmd)); 1007 phba->brd_no, vport->vpi,
1008 be16_to_cpu(fdmi_cmd));
762 } 1009 }
763 1010
764 switch (be16_to_cpu(fdmi_cmd)) { 1011 switch (be16_to_cpu(fdmi_cmd)) {
@@ -828,9 +1075,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
828 1075
829 /* FDMI request */ 1076 /* FDMI request */
830 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1077 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
831 "%d:0218 FDMI Request Data: x%x x%x x%x\n", 1078 "%d (%d):0218 FDMI Request Data: x%x x%x x%x\n",
832 phba->brd_no, 1079 phba->brd_no, vport->vpi, vport->fc_flag,
833 vport->fc_flag, vport->port_state, cmdcode); 1080 vport->port_state, cmdcode);
834 1081
835 CtReq = (struct lpfc_sli_ct_request *) mp->virt; 1082 CtReq = (struct lpfc_sli_ct_request *) mp->virt;
836 1083
@@ -1134,15 +1381,15 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
1134 } 1381 }
1135 1382
1136 bpl = (struct ulp_bde64 *) bmp->virt; 1383 bpl = (struct ulp_bde64 *) bmp->virt;
1137 bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) ); 1384 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
1138 bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) ); 1385 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
1139 bpl->tus.f.bdeFlags = 0; 1386 bpl->tus.f.bdeFlags = 0;
1140 bpl->tus.f.bdeSize = size; 1387 bpl->tus.f.bdeSize = size;
1141 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1388 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1142 1389
1143 cmpl = lpfc_cmpl_ct_cmd_fdmi; 1390 cmpl = lpfc_cmpl_ct_cmd_fdmi;
1144 1391
1145 if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP)) 1392 if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0))
1146 return 0; 1393 return 0;
1147 1394
1148 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1395 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
@@ -1155,8 +1402,8 @@ fdmi_cmd_free_mp:
1155fdmi_cmd_exit: 1402fdmi_cmd_exit:
1156 /* Issue FDMI request failed */ 1403 /* Issue FDMI request failed */
1157 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1404 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1158 "%d:0244 Issue FDMI request failed Data: x%x\n", 1405 "%d (%d):0244 Issue FDMI request failed Data: x%x\n",
1159 phba->brd_no, cmdcode); 1406 phba->brd_no, vport->vpi, cmdcode);
1160 return 1; 1407 return 1;
1161} 1408}
1162 1409
@@ -1170,10 +1417,15 @@ lpfc_fdmi_tmo(unsigned long ptr)
1170 spin_lock_irqsave(&vport->work_port_lock, iflag); 1417 spin_lock_irqsave(&vport->work_port_lock, iflag);
1171 if (!(vport->work_port_events & WORKER_FDMI_TMO)) { 1418 if (!(vport->work_port_events & WORKER_FDMI_TMO)) {
1172 vport->work_port_events |= WORKER_FDMI_TMO; 1419 vport->work_port_events |= WORKER_FDMI_TMO;
1420 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
1421
1422 spin_lock_irqsave(&phba->hbalock, iflag);
1173 if (phba->work_wait) 1423 if (phba->work_wait)
1174 wake_up(phba->work_wait); 1424 lpfc_worker_wake_up(phba);
1425 spin_unlock_irqrestore(&phba->hbalock, iflag);
1175 } 1426 }
1176 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 1427 else
1428 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
1177} 1429}
1178 1430
1179void 1431void
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 20bace56c8fd..f23fe1e5fbb7 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -36,13 +36,14 @@ enum lpfc_work_type {
36 LPFC_EVT_WARM_START, 36 LPFC_EVT_WARM_START,
37 LPFC_EVT_KILL, 37 LPFC_EVT_KILL,
38 LPFC_EVT_ELS_RETRY, 38 LPFC_EVT_ELS_RETRY,
39 LPFC_EVT_DEV_LOSS,
39}; 40};
40 41
41/* structure used to queue event to the discovery tasklet */ 42/* structure used to queue event to the discovery tasklet */
42struct lpfc_work_evt { 43struct lpfc_work_evt {
43 struct list_head evt_listp; 44 struct list_head evt_listp;
44 void * evt_arg1; 45 void *evt_arg1;
45 void * evt_arg2; 46 void *evt_arg2;
46 enum lpfc_work_type evt; 47 enum lpfc_work_type evt;
47}; 48};
48 49
@@ -73,10 +74,12 @@ struct lpfc_nodelist {
73#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ 74#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
74 75
75 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ 76 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
77 struct timer_list nlp_initiator_tmr; /* Used with dev_loss */
76 struct fc_rport *rport; /* Corresponding FC transport 78 struct fc_rport *rport; /* Corresponding FC transport
77 port structure */ 79 port structure */
78 struct lpfc_vport *vport; 80 struct lpfc_vport *vport;
79 struct lpfc_work_evt els_retry_evt; 81 struct lpfc_work_evt els_retry_evt;
82 struct lpfc_work_evt dev_loss_evt;
80 unsigned long last_ramp_up_time; /* jiffy of last ramp up */ 83 unsigned long last_ramp_up_time; /* jiffy of last ramp up */
81 unsigned long last_q_full_time; /* jiffy of last queue full */ 84 unsigned long last_q_full_time; /* jiffy of last queue full */
82 struct kref kref; 85 struct kref kref;
@@ -99,6 +102,7 @@ struct lpfc_nodelist {
99#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from 102#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from
100 NPR list */ 103 NPR list */
101#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */ 104#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */
105#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
102 106
103/* There are 4 different double linked lists nodelist entries can reside on. 107/* There are 4 different double linked lists nodelist entries can reside on.
104 * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used 108 * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index d48247b3b654..f60c85d791c7 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -35,9 +35,13 @@
35#include "lpfc.h" 35#include "lpfc.h"
36#include "lpfc_logmsg.h" 36#include "lpfc_logmsg.h"
37#include "lpfc_crtn.h" 37#include "lpfc_crtn.h"
38#include "lpfc_vport.h"
38 39
39static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 40static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
40 struct lpfc_iocbq *); 41 struct lpfc_iocbq *);
42static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
43 struct lpfc_iocbq *);
44
41static int lpfc_max_els_tries = 3; 45static int lpfc_max_els_tries = 3;
42 46
43static int 47static int
@@ -58,10 +62,10 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
58 return 0; 62 return 0;
59 63
60 /* Pending Link Event during Discovery */ 64 /* Pending Link Event during Discovery */
61 lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY, 65 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
62 "%d:0237 Pending Link Event during " 66 "%d (%d):0237 Pending Link Event during "
63 "Discovery: State x%x\n", 67 "Discovery: State x%x\n",
64 phba->brd_no, phba->pport->port_state); 68 phba->brd_no, vport->vpi, phba->pport->port_state);
65 69
66 /* CLEAR_LA should re-enable link attention events and 70 /* CLEAR_LA should re-enable link attention events and
67 * we should then imediately take a LATT event. The 71 * we should then imediately take a LATT event. The
@@ -73,12 +77,10 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
73 vport->fc_flag |= FC_ABORT_DISCOVERY; 77 vport->fc_flag |= FC_ABORT_DISCOVERY;
74 spin_unlock_irq(shost->host_lock); 78 spin_unlock_irq(shost->host_lock);
75 79
76 if (phba->link_state != LPFC_CLEAR_LA) { 80 if (phba->link_state != LPFC_CLEAR_LA)
77 lpfc_issue_clear_la(phba, vport); 81 lpfc_issue_clear_la(phba, vport);
78 }
79 82
80 return 1; 83 return 1;
81
82} 84}
83 85
84static struct lpfc_iocbq * 86static struct lpfc_iocbq *
@@ -106,7 +108,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
106 108
107 /* fill in BDEs for command */ 109 /* fill in BDEs for command */
108 /* Allocate buffer for command payload */ 110 /* Allocate buffer for command payload */
109 if (((pcmd = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) || 111 if (((pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
110 ((pcmd->virt = lpfc_mbuf_alloc(phba, 112 ((pcmd->virt = lpfc_mbuf_alloc(phba,
111 MEM_PRI, &(pcmd->phys))) == 0)) { 113 MEM_PRI, &(pcmd->phys))) == 0)) {
112 kfree(pcmd); 114 kfree(pcmd);
@@ -119,7 +121,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
119 121
120 /* Allocate buffer for response payload */ 122 /* Allocate buffer for response payload */
121 if (expectRsp) { 123 if (expectRsp) {
122 prsp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 124 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
123 if (prsp) 125 if (prsp)
124 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 126 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
125 &prsp->phys); 127 &prsp->phys);
@@ -136,7 +138,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
136 } 138 }
137 139
138 /* Allocate buffer for Buffer ptr list */ 140 /* Allocate buffer for Buffer ptr list */
139 pbuflist = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 141 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
140 if (pbuflist) 142 if (pbuflist)
141 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 143 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
142 &pbuflist->phys); 144 &pbuflist->phys);
@@ -157,18 +159,26 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
157 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL; 159 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
158 icmd->un.elsreq64.remoteID = did; /* DID */ 160 icmd->un.elsreq64.remoteID = did; /* DID */
159 if (expectRsp) { 161 if (expectRsp) {
160 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 162 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
161 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 163 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
162 icmd->ulpTimeout = phba->fc_ratov * 2; 164 icmd->ulpTimeout = phba->fc_ratov * 2;
163 } else { 165 } else {
164 icmd->un.elsreq64.bdl.bdeSize = sizeof (struct ulp_bde64); 166 icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
165 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 167 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
166 } 168 }
167
168 icmd->ulpBdeCount = 1; 169 icmd->ulpBdeCount = 1;
169 icmd->ulpLe = 1; 170 icmd->ulpLe = 1;
170 icmd->ulpClass = CLASS3; 171 icmd->ulpClass = CLASS3;
171 172
173 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
174 icmd->un.elsreq64.myID = vport->fc_myDID;
175
176 /* For ELS_REQUEST64_CR, use the VPI by default */
177 icmd->ulpContext = vport->vpi;
178 icmd->ulpCt_h = 0;
179 icmd->ulpCt_l = 1;
180 }
181
172 bpl = (struct ulp_bde64 *) pbuflist->virt; 182 bpl = (struct ulp_bde64 *) pbuflist->virt;
173 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); 183 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
174 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); 184 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
@@ -186,7 +196,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
186 } 196 }
187 197
188 /* Save for completion so we can release these resources */ 198 /* Save for completion so we can release these resources */
189 elsiocb->context1 = lpfc_nlp_get(ndlp); 199 if (elscmd != ELS_CMD_LS_RJT)
200 elsiocb->context1 = lpfc_nlp_get(ndlp);
190 elsiocb->context2 = pcmd; 201 elsiocb->context2 = pcmd;
191 elsiocb->context3 = pbuflist; 202 elsiocb->context3 = pbuflist;
192 elsiocb->retry = retry; 203 elsiocb->retry = retry;
@@ -200,16 +211,16 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
200 if (expectRsp) { 211 if (expectRsp) {
201 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 212 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
202 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 213 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
203 "%d:0116 Xmit ELS command x%x to remote " 214 "%d (%d):0116 Xmit ELS command x%x to remote "
204 "NPORT x%x I/O tag: x%x, port state: x%x\n", 215 "NPORT x%x I/O tag: x%x, port state: x%x\n",
205 phba->brd_no, elscmd, did, 216 phba->brd_no, vport->vpi, elscmd, did,
206 elsiocb->iotag, vport->port_state); 217 elsiocb->iotag, vport->port_state);
207 } else { 218 } else {
208 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 219 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
209 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 220 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
210 "%d:0117 Xmit ELS response x%x to remote " 221 "%d (%d):0117 Xmit ELS response x%x to remote "
211 "NPORT x%x I/O tag: x%x, size: x%x\n", 222 "NPORT x%x I/O tag: x%x, size: x%x\n",
212 phba->brd_no, elscmd, 223 phba->brd_no, vport->vpi, elscmd,
213 ndlp->nlp_DID, elsiocb->iotag, cmdSize); 224 ndlp->nlp_DID, elsiocb->iotag, cmdSize);
214 } 225 }
215 226
@@ -218,15 +229,76 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
218 229
219 230
220static int 231static int
221lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 232lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
222 struct serv_parm *sp, IOCB_t *irsp)
223{ 233{
224 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
225 struct lpfc_hba *phba = vport->phba; 234 struct lpfc_hba *phba = vport->phba;
226 LPFC_MBOXQ_t *mbox; 235 LPFC_MBOXQ_t *mbox;
227 struct lpfc_dmabuf *mp; 236 struct lpfc_dmabuf *mp;
237 struct lpfc_nodelist *ndlp;
238 struct serv_parm *sp;
228 int rc; 239 int rc;
229 240
241 sp = &phba->fc_fabparam;
242 ndlp = lpfc_findnode_did(vport, Fabric_DID);
243 if (!ndlp)
244 goto fail;
245
246 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
247 if (!mbox)
248 goto fail;
249
250 vport->port_state = LPFC_FABRIC_CFG_LINK;
251 lpfc_config_link(phba, mbox);
252 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
253 mbox->vport = vport;
254
255 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
256 if (rc == MBX_NOT_FINISHED)
257 goto fail_free_mbox;
258
259 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
260 if (!mbox)
261 goto fail;
262 rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
263 0);
264 if (rc)
265 goto fail_free_mbox;
266
267 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
268 mbox->vport = vport;
269 mbox->context2 = lpfc_nlp_get(ndlp);
270
271 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
272 if (rc == MBX_NOT_FINISHED)
273 goto fail_issue_reg_login;
274
275 return 0;
276
277fail_issue_reg_login:
278 lpfc_nlp_put(ndlp);
279 mp = (struct lpfc_dmabuf *) mbox->context1;
280 lpfc_mbuf_free(phba, mp->virt, mp->phys);
281 kfree(mp);
282fail_free_mbox:
283 mempool_free(mbox, phba->mbox_mem_pool);
284
285fail:
286 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
287 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
288 "%d (%d):0249 Cannot issue Register Fabric login\n",
289 phba->brd_no, vport->vpi);
290 return -ENXIO;
291}
292
293static int
294lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
295 struct serv_parm *sp, IOCB_t *irsp)
296{
297 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
298 struct lpfc_hba *phba = vport->phba;
299 struct lpfc_nodelist *np;
300 struct lpfc_nodelist *next_np;
301
230 spin_lock_irq(shost->host_lock); 302 spin_lock_irq(shost->host_lock);
231 vport->fc_flag |= FC_FABRIC; 303 vport->fc_flag |= FC_FABRIC;
232 spin_unlock_irq(shost->host_lock); 304 spin_unlock_irq(shost->host_lock);
@@ -251,7 +323,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
251 323
252 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 324 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
253 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 325 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
254 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name)); 326 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
255 ndlp->nlp_class_sup = 0; 327 ndlp->nlp_class_sup = 0;
256 if (sp->cls1.classValid) 328 if (sp->cls1.classValid)
257 ndlp->nlp_class_sup |= FC_COS_CLASS1; 329 ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -265,47 +337,59 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
265 sp->cmn.bbRcvSizeLsb; 337 sp->cmn.bbRcvSizeLsb;
266 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 338 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
267 339
268 ndlp->nlp_sid = irsp->un.ulpWord[4] & Mask_DID; 340 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
269 341 if (sp->cmn.response_multiple_NPort) {
270 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 342 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_VPORT,
271 if (!mbox) 343 "%d:1816 FLOGI NPIV supported, "
272 goto fail; 344 "response data 0x%x\n",
345 phba->brd_no,
346 sp->cmn.response_multiple_NPort);
347 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
273 348
274 vport->port_state = LPFC_FABRIC_CFG_LINK; 349 } else {
275 lpfc_config_link(phba, mbox); 350 /* Because we asked f/w for NPIV it still expects us
276 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 351 to call reg_vnpid atleast for the physcial host */
277 mbox->vport = vport; 352 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_VPORT,
278 353 "%d:1817 Fabric does not support NPIV "
279 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); 354 "- configuring single port mode.\n",
280 if (rc == MBX_NOT_FINISHED) 355 phba->brd_no);
281 goto fail_free_mbox; 356 phba->vpi_cnt = 1;
357 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
358 }
359 }
282 360
283 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 361 if ((vport->fc_prevDID != vport->fc_myDID) &&
284 if (!mbox) 362 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
285 goto fail;
286 rc = lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0);
287 if (rc)
288 goto fail_free_mbox;
289 363
290 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 364 /* If our NportID changed, we need to ensure all
291 mbox->vport = vport; 365 * remaining NPORTs get unreg_login'ed.
292 mbox->context2 = lpfc_nlp_get(ndlp); 366 */
367 list_for_each_entry_safe(np, next_np,
368 &vport->fc_nodes, nlp_listp) {
369 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
370 !(np->nlp_flag & NLP_NPR_ADISC))
371 continue;
372 spin_lock_irq(shost->host_lock);
373 np->nlp_flag &= ~NLP_NPR_ADISC;
374 spin_unlock_irq(shost->host_lock);
375 lpfc_unreg_rpi(vport, np);
376 }
377 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
378 lpfc_mbx_unreg_vpi(vport);
379 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
380 }
381 }
293 382
294 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); 383 ndlp->nlp_sid = irsp->un.ulpWord[4] & Mask_DID;
295 if (rc == MBX_NOT_FINISHED) 384 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
296 goto fail_issue_reg_login;
297 385
386 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
387 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) {
388 lpfc_register_new_vport(phba, vport, ndlp);
389 return 0;
390 }
391 lpfc_issue_fabric_reglogin(vport);
298 return 0; 392 return 0;
299
300 fail_issue_reg_login:
301 lpfc_nlp_put(ndlp);
302 mp = (struct lpfc_dmabuf *) mbox->context1;
303 lpfc_mbuf_free(phba, mp->virt, mp->phys);
304 kfree(mp);
305 fail_free_mbox:
306 mempool_free(mbox, phba->mbox_mem_pool);
307 fail:
308 return -ENXIO;
309} 393}
310 394
311/* 395/*
@@ -322,12 +406,13 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
322 406
323 spin_lock_irq(shost->host_lock); 407 spin_lock_irq(shost->host_lock);
324 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 408 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
409 phba->vpi_cnt = 1;
325 spin_unlock_irq(shost->host_lock); 410 spin_unlock_irq(shost->host_lock);
326 411
327 phba->fc_edtov = FF_DEF_EDTOV; 412 phba->fc_edtov = FF_DEF_EDTOV;
328 phba->fc_ratov = FF_DEF_RATOV; 413 phba->fc_ratov = FF_DEF_RATOV;
329 rc = memcmp(&vport->fc_portname, &sp->portName, 414 rc = memcmp(&vport->fc_portname, &sp->portName,
330 sizeof(struct lpfc_name)); 415 sizeof(vport->fc_portname));
331 if (rc >= 0) { 416 if (rc >= 0) {
332 /* This side will initiate the PLOGI */ 417 /* This side will initiate the PLOGI */
333 spin_lock_irq(shost->host_lock); 418 spin_lock_irq(shost->host_lock);
@@ -352,7 +437,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
352 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 437 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
353 mbox->vport = vport; 438 mbox->vport = vport;
354 rc = lpfc_sli_issue_mbox(phba, mbox, 439 rc = lpfc_sli_issue_mbox(phba, mbox,
355 MBX_NOWAIT | MBX_STOP_IOCB); 440 MBX_NOWAIT | MBX_STOP_IOCB);
356 if (rc == MBX_NOT_FINISHED) { 441 if (rc == MBX_NOT_FINISHED) {
357 mempool_free(mbox, phba->mbox_mem_pool); 442 mempool_free(mbox, phba->mbox_mem_pool);
358 goto fail; 443 goto fail;
@@ -392,7 +477,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
392 /* Start discovery - this should just do CLEAR_LA */ 477 /* Start discovery - this should just do CLEAR_LA */
393 lpfc_disc_start(vport); 478 lpfc_disc_start(vport);
394 return 0; 479 return 0;
395 fail: 480fail:
396 return -ENXIO; 481 return -ENXIO;
397} 482}
398 483
@@ -422,6 +507,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
422 /* FLOGI failed, so there is no fabric */ 507 /* FLOGI failed, so there is no fabric */
423 spin_lock_irq(shost->host_lock); 508 spin_lock_irq(shost->host_lock);
424 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 509 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
510 phba->vpi_cnt = 1;
425 spin_unlock_irq(shost->host_lock); 511 spin_unlock_irq(shost->host_lock);
426 512
427 /* If private loop, then allow max outstanding els to be 513 /* If private loop, then allow max outstanding els to be
@@ -433,11 +519,10 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
433 } 519 }
434 520
435 /* FLOGI failure */ 521 /* FLOGI failure */
436 lpfc_printf_log(phba, 522 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
437 KERN_INFO, 523 "%d (%d):0100 FLOGI failure Data: x%x x%x "
438 LOG_ELS, 524 "x%x\n",
439 "%d:0100 FLOGI failure Data: x%x x%x x%x\n", 525 phba->brd_no, vport->vpi,
440 phba->brd_no,
441 irsp->ulpStatus, irsp->un.ulpWord[4], 526 irsp->ulpStatus, irsp->un.ulpWord[4],
442 irsp->ulpTimeout); 527 irsp->ulpTimeout);
443 goto flogifail; 528 goto flogifail;
@@ -453,9 +538,9 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
453 538
454 /* FLOGI completes successfully */ 539 /* FLOGI completes successfully */
455 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 540 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
456 "%d:0101 FLOGI completes sucessfully " 541 "%d (%d):0101 FLOGI completes sucessfully "
457 "Data: x%x x%x x%x x%x\n", 542 "Data: x%x x%x x%x x%x\n",
458 phba->brd_no, 543 phba->brd_no, vport->vpi,
459 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 544 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
460 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); 545 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
461 546
@@ -475,6 +560,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
475 560
476flogifail: 561flogifail:
477 lpfc_nlp_put(ndlp); 562 lpfc_nlp_put(ndlp);
563 phba->vpi_cnt = 1;
478 564
479 if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT || 565 if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
480 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED && 566 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED &&
@@ -506,9 +592,10 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
506 592
507 pring = &phba->sli.ring[LPFC_ELS_RING]; 593 pring = &phba->sli.ring[LPFC_ELS_RING];
508 594
509 cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm)); 595 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
510 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 596 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
511 ndlp->nlp_DID, ELS_CMD_FLOGI); 597 ndlp->nlp_DID, ELS_CMD_FLOGI);
598
512 if (!elsiocb) 599 if (!elsiocb)
513 return 1; 600 return 1;
514 601
@@ -517,8 +604,8 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
517 604
518 /* For FLOGI request, remainder of payload is service parameters */ 605 /* For FLOGI request, remainder of payload is service parameters */
519 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 606 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
520 pcmd += sizeof (uint32_t); 607 pcmd += sizeof(uint32_t);
521 memcpy(pcmd, &vport->fc_sparam, sizeof (struct serv_parm)); 608 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
522 sp = (struct serv_parm *) pcmd; 609 sp = (struct serv_parm *) pcmd;
523 610
524 /* Setup CSPs accordingly for Fabric */ 611 /* Setup CSPs accordingly for Fabric */
@@ -532,6 +619,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
532 if (sp->cmn.fcphHigh < FC_PH3) 619 if (sp->cmn.fcphHigh < FC_PH3)
533 sp->cmn.fcphHigh = FC_PH3; 620 sp->cmn.fcphHigh = FC_PH3;
534 621
622 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
623 sp->cmn.request_multiple_Nport = 1;
624
625 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
626 icmd->ulpCt_h = 1;
627 icmd->ulpCt_l = 0;
628 }
629
535 tmo = phba->fc_ratov; 630 tmo = phba->fc_ratov;
536 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 631 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
537 lpfc_set_disctmo(vport); 632 lpfc_set_disctmo(vport);
@@ -539,7 +634,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
539 634
540 phba->fc_stat.elsXmitFLOGI++; 635 phba->fc_stat.elsXmitFLOGI++;
541 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; 636 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
542 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 637 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
543 if (rc == IOCB_ERROR) { 638 if (rc == IOCB_ERROR) {
544 lpfc_els_free_iocb(phba, elsiocb); 639 lpfc_els_free_iocb(phba, elsiocb);
545 return 1; 640 return 1;
@@ -572,8 +667,9 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
572 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR && 667 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
573 icmd->un.elsreq64.bdl.ulpIoTag32) { 668 icmd->un.elsreq64.bdl.ulpIoTag32) {
574 ndlp = (struct lpfc_nodelist *)(iocb->context1); 669 ndlp = (struct lpfc_nodelist *)(iocb->context1);
575 if (ndlp && (ndlp->nlp_DID == Fabric_DID)) 670 if (ndlp && (ndlp->nlp_DID == Fabric_DID)) {
576 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 671 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
672 }
577 } 673 }
578 } 674 }
579 spin_unlock_irq(&phba->hbalock); 675 spin_unlock_irq(&phba->hbalock);
@@ -604,6 +700,28 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
604 return 1; 700 return 1;
605} 701}
606 702
703int
704lpfc_initial_fdisc(struct lpfc_vport *vport)
705{
706 struct lpfc_hba *phba = vport->phba;
707 struct lpfc_nodelist *ndlp;
708
709 /* First look for the Fabric ndlp */
710 ndlp = lpfc_findnode_did(vport, Fabric_DID);
711 if (!ndlp) {
712 /* Cannot find existing Fabric ndlp, so allocate a new one */
713 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
714 if (!ndlp)
715 return 0;
716 lpfc_nlp_init(vport, ndlp, Fabric_DID);
717 } else {
718 lpfc_dequeue_node(vport, ndlp);
719 }
720 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
721 lpfc_nlp_put(ndlp);
722 }
723 return 1;
724}
607static void 725static void
608lpfc_more_plogi(struct lpfc_vport *vport) 726lpfc_more_plogi(struct lpfc_vport *vport)
609{ 727{
@@ -615,9 +733,9 @@ lpfc_more_plogi(struct lpfc_vport *vport)
615 733
616 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 734 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
617 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 735 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
618 "%d:0232 Continue discovery with %d PLOGIs to go " 736 "%d (%d):0232 Continue discovery with %d PLOGIs to go "
619 "Data: x%x x%x x%x\n", 737 "Data: x%x x%x x%x\n",
620 phba->brd_no, vport->num_disc_nodes, 738 phba->brd_no, vport->vpi, vport->num_disc_nodes,
621 vport->fc_plogi_cnt, vport->fc_flag, vport->port_state); 739 vport->fc_plogi_cnt, vport->fc_flag, vport->port_state);
622 740
623 /* Check to see if there are more PLOGIs to be sent */ 741 /* Check to see if there are more PLOGIs to be sent */
@@ -629,14 +747,13 @@ lpfc_more_plogi(struct lpfc_vport *vport)
629} 747}
630 748
631static struct lpfc_nodelist * 749static struct lpfc_nodelist *
632lpfc_plogi_confirm_nport(struct lpfc_hba *phba, struct lpfc_dmabuf *prsp, 750lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
633 struct lpfc_nodelist *ndlp) 751 struct lpfc_nodelist *ndlp)
634{ 752{
635 struct lpfc_vport *vport = ndlp->vport; 753 struct lpfc_vport *vport = ndlp->vport;
636 struct lpfc_nodelist *new_ndlp; 754 struct lpfc_nodelist *new_ndlp;
637 uint32_t *lp;
638 struct serv_parm *sp; 755 struct serv_parm *sp;
639 uint8_t name[sizeof (struct lpfc_name)]; 756 uint8_t name[sizeof(struct lpfc_name)];
640 uint32_t rc; 757 uint32_t rc;
641 758
642 /* Fabric nodes can have the same WWPN so we don't bother searching 759 /* Fabric nodes can have the same WWPN so we don't bother searching
@@ -645,8 +762,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, struct lpfc_dmabuf *prsp,
645 if (ndlp->nlp_type & NLP_FABRIC) 762 if (ndlp->nlp_type & NLP_FABRIC)
646 return ndlp; 763 return ndlp;
647 764
648 lp = (uint32_t *) prsp->virt; 765 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
649 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
650 memset(name, 0, sizeof(struct lpfc_name)); 766 memset(name, 0, sizeof(struct lpfc_name));
651 767
652 /* Now we find out if the NPort we are logging into, matches the WWPN 768 /* Now we find out if the NPort we are logging into, matches the WWPN
@@ -701,8 +817,12 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
701 817
702 irsp = &rspiocb->iocb; 818 irsp = &rspiocb->iocb;
703 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 819 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
704
705 if (!ndlp) { 820 if (!ndlp) {
821 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
822 "%d (%d):0136 PLOGI completes to NPort x%x "
823 "with no ndlp. Data: x%x x%x x%x\n",
824 phba->brd_no, vport->vpi, irsp->un.elsreq64.remoteID,
825 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpIoTag);
706 goto out; 826 goto out;
707 } 827 }
708 828
@@ -717,11 +837,11 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
717 837
718 /* PLOGI completes to NPort <nlp_DID> */ 838 /* PLOGI completes to NPort <nlp_DID> */
719 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 839 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
720 "%d:0102 PLOGI completes to NPort x%x " 840 "%d (%d):0102 PLOGI completes to NPort x%x "
721 "Data: x%x x%x x%x x%x x%x\n", 841 "Data: x%x x%x x%x x%x x%x\n",
722 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus, 842 phba->brd_no, vport->vpi, ndlp->nlp_DID,
723 irsp->un.ulpWord[4], irsp->ulpTimeout, disc, 843 irsp->ulpStatus, irsp->un.ulpWord[4],
724 vport->num_disc_nodes); 844 irsp->ulpTimeout, disc, vport->num_disc_nodes);
725 845
726 /* Check to see if link went down during discovery */ 846 /* Check to see if link went down during discovery */
727 if (lpfc_els_chk_latt(vport)) { 847 if (lpfc_els_chk_latt(vport)) {
@@ -748,24 +868,33 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
748 } 868 }
749 869
750 /* PLOGI failed */ 870 /* PLOGI failed */
871 if (ndlp->nlp_DID == NameServer_DID) {
872 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
873 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
874 "%d (%d):0250 Nameserver login error: "
875 "0x%x / 0x%x\n",
876 phba->brd_no, vport->vpi,
877 irsp->ulpStatus, irsp->un.ulpWord[4]);
878 }
879
751 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 880 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
752 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 881 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
753 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) || 882 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
754 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) || 883 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
755 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) { 884 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
756 rc = NLP_STE_FREED_NODE; 885 rc = NLP_STE_FREED_NODE;
757 } else { 886 } else {
758 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, 887 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
759 NLP_EVT_CMPL_PLOGI); 888 NLP_EVT_CMPL_PLOGI);
760 } 889 }
761 } else { 890 } else {
762 /* Good status, call state machine */ 891 /* Good status, call state machine */
763 prsp = list_entry(((struct lpfc_dmabuf *) 892 prsp = list_entry(((struct lpfc_dmabuf *)
764 cmdiocb->context2)->list.next, 893 cmdiocb->context2)->list.next,
765 struct lpfc_dmabuf, list); 894 struct lpfc_dmabuf, list);
766 ndlp = lpfc_plogi_confirm_nport(phba, prsp, ndlp); 895 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
767 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, 896 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
768 NLP_EVT_CMPL_PLOGI); 897 NLP_EVT_CMPL_PLOGI);
769 } 898 }
770 899
771 if (disc && vport->num_disc_nodes) { 900 if (disc && vport->num_disc_nodes) {
@@ -811,11 +940,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
811 struct lpfc_sli *psli; 940 struct lpfc_sli *psli;
812 uint8_t *pcmd; 941 uint8_t *pcmd;
813 uint16_t cmdsize; 942 uint16_t cmdsize;
943 int ret;
814 944
815 psli = &phba->sli; 945 psli = &phba->sli;
816 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 946 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
817 947
818 cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm)); 948 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
819 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, NULL, did, 949 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, NULL, did,
820 ELS_CMD_PLOGI); 950 ELS_CMD_PLOGI);
821 if (!elsiocb) 951 if (!elsiocb)
@@ -826,8 +956,8 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
826 956
827 /* For PLOGI request, remainder of payload is service parameters */ 957 /* For PLOGI request, remainder of payload is service parameters */
828 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 958 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
829 pcmd += sizeof (uint32_t); 959 pcmd += sizeof(uint32_t);
830 memcpy(pcmd, &vport->fc_sparam, sizeof (struct serv_parm)); 960 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
831 sp = (struct serv_parm *) pcmd; 961 sp = (struct serv_parm *) pcmd;
832 962
833 if (sp->cmn.fcphLow < FC_PH_4_3) 963 if (sp->cmn.fcphLow < FC_PH_4_3)
@@ -838,7 +968,9 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
838 968
839 phba->fc_stat.elsXmitPLOGI++; 969 phba->fc_stat.elsXmitPLOGI++;
840 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 970 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
841 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 971 ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
972
973 if (ret == IOCB_ERROR) {
842 lpfc_els_free_iocb(phba, elsiocb); 974 lpfc_els_free_iocb(phba, elsiocb);
843 return 1; 975 return 1;
844 } 976 }
@@ -867,10 +999,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
867 999
868 /* PRLI completes to NPort <nlp_DID> */ 1000 /* PRLI completes to NPort <nlp_DID> */
869 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1001 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
870 "%d:0103 PRLI completes to NPort x%x " 1002 "%d (%d):0103 PRLI completes to NPort x%x "
871 "Data: x%x x%x x%x x%x\n", 1003 "Data: x%x x%x x%x x%x\n",
872 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus, 1004 phba->brd_no, vport->vpi, ndlp->nlp_DID,
873 irsp->un.ulpWord[4], irsp->ulpTimeout, 1005 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout,
874 vport->num_disc_nodes); 1006 vport->num_disc_nodes);
875 1007
876 vport->fc_prli_sent--; 1008 vport->fc_prli_sent--;
@@ -887,18 +1019,18 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
887 /* PRLI failed */ 1019 /* PRLI failed */
888 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1020 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
889 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1021 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
890 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) || 1022 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
891 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) || 1023 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
892 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) { 1024 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
893 goto out; 1025 goto out;
894 } else { 1026 } else {
895 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1027 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
896 NLP_EVT_CMPL_PRLI); 1028 NLP_EVT_CMPL_PRLI);
897 } 1029 }
898 } else { 1030 } else {
899 /* Good status, call state machine */ 1031 /* Good status, call state machine */
900 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1032 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
901 NLP_EVT_CMPL_PRLI); 1033 NLP_EVT_CMPL_PRLI);
902 } 1034 }
903 1035
904out: 1036out:
@@ -923,7 +1055,7 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
923 psli = &phba->sli; 1055 psli = &phba->sli;
924 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1056 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
925 1057
926 cmdsize = (sizeof (uint32_t) + sizeof (PRLI)); 1058 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
927 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1059 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
928 ndlp->nlp_DID, ELS_CMD_PRLI); 1060 ndlp->nlp_DID, ELS_CMD_PRLI);
929 if (!elsiocb) 1061 if (!elsiocb)
@@ -933,9 +1065,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
933 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1065 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
934 1066
935 /* For PRLI request, remainder of payload is service parameters */ 1067 /* For PRLI request, remainder of payload is service parameters */
936 memset(pcmd, 0, (sizeof (PRLI) + sizeof (uint32_t))); 1068 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
937 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI; 1069 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
938 pcmd += sizeof (uint32_t); 1070 pcmd += sizeof(uint32_t);
939 1071
940 /* For PRLI, remainder of payload is PRLI parameter page */ 1072 /* For PRLI, remainder of payload is PRLI parameter page */
941 npr = (PRLI *) pcmd; 1073 npr = (PRLI *) pcmd;
@@ -982,9 +1114,9 @@ lpfc_more_adisc(struct lpfc_vport *vport)
982 1114
983 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 1115 /* Continue discovery with <num_disc_nodes> ADISCs to go */
984 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1116 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
985 "%d:0210 Continue discovery with %d ADISCs to go " 1117 "%d (%d):0210 Continue discovery with %d ADISCs to go "
986 "Data: x%x x%x x%x\n", 1118 "Data: x%x x%x x%x\n",
987 phba->brd_no, vport->num_disc_nodes, 1119 phba->brd_no, vport->vpi, vport->num_disc_nodes,
988 vport->fc_adisc_cnt, vport->fc_flag, vport->port_state); 1120 vport->fc_adisc_cnt, vport->fc_flag, vport->port_state);
989 1121
990 /* Check to see if there are more ADISCs to be sent */ 1122 /* Check to see if there are more ADISCs to be sent */
@@ -1048,11 +1180,11 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1048 1180
1049 /* ADISC completes to NPort <nlp_DID> */ 1181 /* ADISC completes to NPort <nlp_DID> */
1050 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1182 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1051 "%d:0104 ADISC completes to NPort x%x " 1183 "%d (%d):0104 ADISC completes to NPort x%x "
1052 "Data: x%x x%x x%x x%x x%x\n", 1184 "Data: x%x x%x x%x x%x x%x\n",
1053 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus, 1185 phba->brd_no, vport->vpi, ndlp->nlp_DID,
1054 irsp->un.ulpWord[4], irsp->ulpTimeout, disc, 1186 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout,
1055 vport->num_disc_nodes); 1187 disc, vport->num_disc_nodes);
1056 1188
1057 /* Check to see if link went down during discovery */ 1189 /* Check to see if link went down during discovery */
1058 if (lpfc_els_chk_latt(vport)) { 1190 if (lpfc_els_chk_latt(vport)) {
@@ -1095,12 +1227,41 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1095 1227
1096 /* Check to see if we are done with ADISC authentication */ 1228 /* Check to see if we are done with ADISC authentication */
1097 if (vport->num_disc_nodes == 0) { 1229 if (vport->num_disc_nodes == 0) {
1098 lpfc_can_disctmo(vport); 1230 /* If we get here, there is nothing left to ADISC */
1099 /* If we get here, there is nothing left to wait for */ 1231 /*
1100 if (vport->port_state < LPFC_VPORT_READY && 1232 * For NPIV, cmpl_reg_vpi will set port_state to READY,
1101 phba->link_state != LPFC_CLEAR_LA) { 1233 * and continue discovery.
1234 */
1235 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1236 !(vport->fc_flag & FC_RSCN_MODE)) {
1237 lpfc_issue_reg_vpi(phba, vport);
1238 goto out;
1239 }
1240 /*
1241 * For SLI2, we need to set port_state to READY
1242 * and continue discovery.
1243 */
1244 if (vport->port_state < LPFC_VPORT_READY) {
1245 /* If we get here, there is nothing to ADISC */
1102 if (vport->port_type == LPFC_PHYSICAL_PORT) 1246 if (vport->port_type == LPFC_PHYSICAL_PORT)
1103 lpfc_issue_clear_la(phba, vport); 1247 lpfc_issue_clear_la(phba, vport);
1248
1249 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1250 vport->num_disc_nodes = 0;
1251 /* go thru NPR list, issue ELS PLOGIs */
1252 if (vport->fc_npr_cnt)
1253 lpfc_els_disc_plogi(vport);
1254
1255 if (!vport->num_disc_nodes) {
1256 spin_lock_irq(shost->host_lock);
1257 vport->fc_flag &=
1258 ~FC_NDISC_ACTIVE;
1259 spin_unlock_irq(
1260 shost->host_lock);
1261 lpfc_can_disctmo(vport);
1262 }
1263 }
1264 vport->port_state = LPFC_VPORT_READY;
1104 } else { 1265 } else {
1105 lpfc_rscn_disc(vport); 1266 lpfc_rscn_disc(vport);
1106 } 1267 }
@@ -1125,7 +1286,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1125 uint8_t *pcmd; 1286 uint8_t *pcmd;
1126 uint16_t cmdsize; 1287 uint16_t cmdsize;
1127 1288
1128 cmdsize = (sizeof (uint32_t) + sizeof (ADISC)); 1289 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
1129 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1290 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1130 ndlp->nlp_DID, ELS_CMD_ADISC); 1291 ndlp->nlp_DID, ELS_CMD_ADISC);
1131 if (!elsiocb) 1292 if (!elsiocb)
@@ -1136,13 +1297,13 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1136 1297
1137 /* For ADISC request, remainder of payload is service parameters */ 1298 /* For ADISC request, remainder of payload is service parameters */
1138 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 1299 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
1139 pcmd += sizeof (uint32_t); 1300 pcmd += sizeof(uint32_t);
1140 1301
1141 /* Fill in ADISC payload */ 1302 /* Fill in ADISC payload */
1142 ap = (ADISC *) pcmd; 1303 ap = (ADISC *) pcmd;
1143 ap->hardAL_PA = phba->fc_pref_ALPA; 1304 ap->hardAL_PA = phba->fc_pref_ALPA;
1144 memcpy(&ap->portName, &vport->fc_portname, sizeof (struct lpfc_name)); 1305 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
1145 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof (struct lpfc_name)); 1306 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
1146 ap->DID = be32_to_cpu(vport->fc_myDID); 1307 ap->DID = be32_to_cpu(vport->fc_myDID);
1147 1308
1148 phba->fc_stat.elsXmitADISC++; 1309 phba->fc_stat.elsXmitADISC++;
@@ -1181,16 +1342,25 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1181 1342
1182 /* LOGO completes to NPort <nlp_DID> */ 1343 /* LOGO completes to NPort <nlp_DID> */
1183 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1344 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1184 "%d:0105 LOGO completes to NPort x%x " 1345 "%d (%d):0105 LOGO completes to NPort x%x "
1185 "Data: x%x x%x x%x x%x\n", 1346 "Data: x%x x%x x%x x%x\n",
1186 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus, 1347 phba->brd_no, vport->vpi, ndlp->nlp_DID,
1187 irsp->un.ulpWord[4], irsp->ulpTimeout, 1348 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout,
1188 vport->num_disc_nodes); 1349 vport->num_disc_nodes);
1189 1350
1190 /* Check to see if link went down during discovery */ 1351 /* Check to see if link went down during discovery */
1191 if (lpfc_els_chk_latt(vport)) 1352 if (lpfc_els_chk_latt(vport))
1192 goto out; 1353 goto out;
1193 1354
1355 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
1356 /* NLP_EVT_DEVICE_RM should unregister the RPI
1357 * which should abort all outstanding IOs.
1358 */
1359 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1360 NLP_EVT_DEVICE_RM);
1361 goto out;
1362 }
1363
1194 if (irsp->ulpStatus) { 1364 if (irsp->ulpStatus) {
1195 /* Check for retry */ 1365 /* Check for retry */
1196 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 1366 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
@@ -1199,20 +1369,20 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1199 /* LOGO failed */ 1369 /* LOGO failed */
1200 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1370 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1201 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1371 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1202 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) || 1372 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
1203 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) || 1373 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
1204 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) { 1374 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
1205 goto out; 1375 goto out;
1206 } else { 1376 } else {
1207 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1377 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1208 NLP_EVT_CMPL_LOGO); 1378 NLP_EVT_CMPL_LOGO);
1209 } 1379 }
1210 } else { 1380 } else {
1211 /* Good status, call state machine. 1381 /* Good status, call state machine.
1212 * This will unregister the rpi if needed. 1382 * This will unregister the rpi if needed.
1213 */ 1383 */
1214 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1384 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1215 NLP_EVT_CMPL_LOGO); 1385 NLP_EVT_CMPL_LOGO);
1216 } 1386 }
1217 1387
1218out: 1388out:
@@ -1232,11 +1402,12 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1232 struct lpfc_sli *psli; 1402 struct lpfc_sli *psli;
1233 uint8_t *pcmd; 1403 uint8_t *pcmd;
1234 uint16_t cmdsize; 1404 uint16_t cmdsize;
1405 int rc;
1235 1406
1236 psli = &phba->sli; 1407 psli = &phba->sli;
1237 pring = &psli->ring[LPFC_ELS_RING]; 1408 pring = &psli->ring[LPFC_ELS_RING];
1238 1409
1239 cmdsize = (2 * sizeof (uint32_t)) + sizeof (struct lpfc_name); 1410 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
1240 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1411 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1241 ndlp->nlp_DID, ELS_CMD_LOGO); 1412 ndlp->nlp_DID, ELS_CMD_LOGO);
1242 if (!elsiocb) 1413 if (!elsiocb)
@@ -1245,19 +1416,21 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1245 icmd = &elsiocb->iocb; 1416 icmd = &elsiocb->iocb;
1246 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1417 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1247 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 1418 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
1248 pcmd += sizeof (uint32_t); 1419 pcmd += sizeof(uint32_t);
1249 1420
1250 /* Fill in LOGO payload */ 1421 /* Fill in LOGO payload */
1251 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 1422 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
1252 pcmd += sizeof (uint32_t); 1423 pcmd += sizeof(uint32_t);
1253 memcpy(pcmd, &vport->fc_portname, sizeof (struct lpfc_name)); 1424 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
1254 1425
1255 phba->fc_stat.elsXmitLOGO++; 1426 phba->fc_stat.elsXmitLOGO++;
1256 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 1427 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
1257 spin_lock_irq(shost->host_lock); 1428 spin_lock_irq(shost->host_lock);
1258 ndlp->nlp_flag |= NLP_LOGO_SND; 1429 ndlp->nlp_flag |= NLP_LOGO_SND;
1259 spin_unlock_irq(shost->host_lock); 1430 spin_unlock_irq(shost->host_lock);
1260 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1431 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
1432
1433 if (rc == IOCB_ERROR) {
1261 spin_lock_irq(shost->host_lock); 1434 spin_lock_irq(shost->host_lock);
1262 ndlp->nlp_flag &= ~NLP_LOGO_SND; 1435 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1263 spin_unlock_irq(shost->host_lock); 1436 spin_unlock_irq(shost->host_lock);
@@ -1277,11 +1450,10 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1277 irsp = &rspiocb->iocb; 1450 irsp = &rspiocb->iocb;
1278 1451
1279 /* ELS cmd tag <ulpIoTag> completes */ 1452 /* ELS cmd tag <ulpIoTag> completes */
1280 lpfc_printf_log(phba, 1453 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1281 KERN_INFO, 1454 "%d (%d):0106 ELS cmd tag x%x completes Data: x%x x%x "
1282 LOG_ELS, 1455 "x%x\n",
1283 "%d:0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 1456 phba->brd_no, vport->vpi,
1284 phba->brd_no,
1285 irsp->ulpIoTag, irsp->ulpStatus, 1457 irsp->ulpIoTag, irsp->ulpStatus,
1286 irsp->un.ulpWord[4], irsp->ulpTimeout); 1458 irsp->un.ulpWord[4], irsp->ulpTimeout);
1287 1459
@@ -1305,7 +1477,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1305 1477
1306 psli = &phba->sli; 1478 psli = &phba->sli;
1307 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1479 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1308 cmdsize = (sizeof (uint32_t) + sizeof (SCR)); 1480 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
1309 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1481 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1310 if (!ndlp) 1482 if (!ndlp)
1311 return 1; 1483 return 1;
@@ -1324,10 +1496,10 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1324 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1496 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1325 1497
1326 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 1498 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
1327 pcmd += sizeof (uint32_t); 1499 pcmd += sizeof(uint32_t);
1328 1500
1329 /* For SCR, remainder of payload is SCR parameter page */ 1501 /* For SCR, remainder of payload is SCR parameter page */
1330 memset(pcmd, 0, sizeof (SCR)); 1502 memset(pcmd, 0, sizeof(SCR));
1331 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 1503 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
1332 1504
1333 phba->fc_stat.elsXmitSCR++; 1505 phba->fc_stat.elsXmitSCR++;
@@ -1358,7 +1530,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1358 1530
1359 psli = &phba->sli; 1531 psli = &phba->sli;
1360 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1532 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1361 cmdsize = (sizeof (uint32_t) + sizeof (FARP)); 1533 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
1362 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1534 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1363 if (!ndlp) 1535 if (!ndlp)
1364 return 1; 1536 return 1;
@@ -1376,25 +1548,25 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1376 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1548 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1377 1549
1378 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 1550 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
1379 pcmd += sizeof (uint32_t); 1551 pcmd += sizeof(uint32_t);
1380 1552
1381 /* Fill in FARPR payload */ 1553 /* Fill in FARPR payload */
1382 fp = (FARP *) (pcmd); 1554 fp = (FARP *) (pcmd);
1383 memset(fp, 0, sizeof (FARP)); 1555 memset(fp, 0, sizeof(FARP));
1384 lp = (uint32_t *) pcmd; 1556 lp = (uint32_t *) pcmd;
1385 *lp++ = be32_to_cpu(nportid); 1557 *lp++ = be32_to_cpu(nportid);
1386 *lp++ = be32_to_cpu(vport->fc_myDID); 1558 *lp++ = be32_to_cpu(vport->fc_myDID);
1387 fp->Rflags = 0; 1559 fp->Rflags = 0;
1388 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 1560 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
1389 1561
1390 memcpy(&fp->RportName, &vport->fc_portname, sizeof (struct lpfc_name)); 1562 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
1391 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof (struct lpfc_name)); 1563 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
1392 ondlp = lpfc_findnode_did(vport, nportid); 1564 ondlp = lpfc_findnode_did(vport, nportid);
1393 if (ondlp) { 1565 if (ondlp) {
1394 memcpy(&fp->OportName, &ondlp->nlp_portname, 1566 memcpy(&fp->OportName, &ondlp->nlp_portname,
1395 sizeof (struct lpfc_name)); 1567 sizeof(struct lpfc_name));
1396 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 1568 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
1397 sizeof (struct lpfc_name)); 1569 sizeof(struct lpfc_name));
1398 } 1570 }
1399 1571
1400 phba->fc_stat.elsXmitFARPR++; 1572 phba->fc_stat.elsXmitFARPR++;
@@ -1470,18 +1642,17 @@ lpfc_els_retry_delay(unsigned long ptr)
1470{ 1642{
1471 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr; 1643 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
1472 struct lpfc_vport *vport = ndlp->vport; 1644 struct lpfc_vport *vport = ndlp->vport;
1473 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1474 struct lpfc_hba *phba = vport->phba; 1645 struct lpfc_hba *phba = vport->phba;
1475 unsigned long iflag; 1646 unsigned long flags;
1476 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 1647 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
1477 1648
1478 ndlp = (struct lpfc_nodelist *) ptr; 1649 ndlp = (struct lpfc_nodelist *) ptr;
1479 phba = ndlp->vport->phba; 1650 phba = ndlp->vport->phba;
1480 evtp = &ndlp->els_retry_evt; 1651 evtp = &ndlp->els_retry_evt;
1481 1652
1482 spin_lock_irqsave(shost->host_lock, iflag); 1653 spin_lock_irqsave(&phba->hbalock, flags);
1483 if (!list_empty(&evtp->evt_listp)) { 1654 if (!list_empty(&evtp->evt_listp)) {
1484 spin_unlock_irqrestore(shost->host_lock, iflag); 1655 spin_unlock_irqrestore(&phba->hbalock, flags);
1485 return; 1656 return;
1486 } 1657 }
1487 1658
@@ -1489,9 +1660,9 @@ lpfc_els_retry_delay(unsigned long ptr)
1489 evtp->evt = LPFC_EVT_ELS_RETRY; 1660 evtp->evt = LPFC_EVT_ELS_RETRY;
1490 list_add_tail(&evtp->evt_listp, &phba->work_list); 1661 list_add_tail(&evtp->evt_listp, &phba->work_list);
1491 if (phba->work_wait) 1662 if (phba->work_wait)
1492 wake_up(phba->work_wait); 1663 lpfc_worker_wake_up(phba);
1493 1664
1494 spin_unlock_irqrestore(shost->host_lock, iflag); 1665 spin_unlock_irqrestore(&phba->hbalock, flags);
1495 return; 1666 return;
1496} 1667}
1497 1668
@@ -1550,6 +1721,9 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
1550 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1721 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1551 } 1722 }
1552 break; 1723 break;
1724 case ELS_CMD_FDISC:
1725 lpfc_issue_els_fdisc(vport, ndlp, retry);
1726 break;
1553 } 1727 }
1554 return; 1728 return;
1555} 1729}
@@ -1598,7 +1772,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1598 switch ((irsp->un.ulpWord[4] & 0xff)) { 1772 switch ((irsp->un.ulpWord[4] & 0xff)) {
1599 case IOERR_LOOP_OPEN_FAILURE: 1773 case IOERR_LOOP_OPEN_FAILURE:
1600 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 1774 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
1601 delay = 1; 1775 delay = 1000;
1602 retry = 1; 1776 retry = 1;
1603 break; 1777 break;
1604 1778
@@ -1606,9 +1780,21 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1606 retry = 1; 1780 retry = 1;
1607 break; 1781 break;
1608 1782
1783 case IOERR_ILLEGAL_COMMAND:
1784 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) &&
1785 (cmd == ELS_CMD_FDISC)) {
1786 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1787 "%d (%d):0124 FDISC failed (3/6) retrying...\n",
1788 phba->brd_no, vport->vpi);
1789 lpfc_mbx_unreg_vpi(vport);
1790 retry = 1;
1791 /* Always retry for this case */
1792 cmdiocb->retry = 0;
1793 }
1794 break;
1795
1609 case IOERR_NO_RESOURCES: 1796 case IOERR_NO_RESOURCES:
1610 if (cmd == ELS_CMD_PLOGI) 1797 delay = 100;
1611 delay = 1;
1612 retry = 1; 1798 retry = 1;
1613 break; 1799 break;
1614 1800
@@ -1641,27 +1827,56 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1641 if (stat.un.b.lsRjtRsnCodeExp == 1827 if (stat.un.b.lsRjtRsnCodeExp ==
1642 LSEXP_CMD_IN_PROGRESS) { 1828 LSEXP_CMD_IN_PROGRESS) {
1643 if (cmd == ELS_CMD_PLOGI) { 1829 if (cmd == ELS_CMD_PLOGI) {
1644 delay = 1; 1830 delay = 1000;
1645 maxretry = 48; 1831 maxretry = 48;
1646 } 1832 }
1647 retry = 1; 1833 retry = 1;
1648 break; 1834 break;
1649 } 1835 }
1650 if (cmd == ELS_CMD_PLOGI) { 1836 if (cmd == ELS_CMD_PLOGI) {
1651 delay = 1; 1837 delay = 1000;
1652 maxretry = lpfc_max_els_tries + 1; 1838 maxretry = lpfc_max_els_tries + 1;
1653 retry = 1; 1839 retry = 1;
1654 break; 1840 break;
1655 } 1841 }
1842 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1843 (cmd == ELS_CMD_FDISC) &&
1844 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
1845 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1846 "%d (%d):0125 FDISC Failed (x%x)."
1847 " Fabric out of resources\n",
1848 phba->brd_no, vport->vpi, stat.un.lsRjtError);
1849 lpfc_vport_set_state(vport,
1850 FC_VPORT_NO_FABRIC_RSCS);
1851 }
1656 break; 1852 break;
1657 1853
1658 case LSRJT_LOGICAL_BSY: 1854 case LSRJT_LOGICAL_BSY:
1659 if (cmd == ELS_CMD_PLOGI) { 1855 if (cmd == ELS_CMD_PLOGI) {
1660 delay = 1; 1856 delay = 1000;
1661 maxretry = 48; 1857 maxretry = 48;
1858 } else if (cmd == ELS_CMD_FDISC) {
1859 /* Always retry for this case */
1860 cmdiocb->retry = 0;
1662 } 1861 }
1663 retry = 1; 1862 retry = 1;
1664 break; 1863 break;
1864
1865 case LSRJT_LOGICAL_ERR:
1866 case LSRJT_PROTOCOL_ERR:
1867 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1868 (cmd == ELS_CMD_FDISC) &&
1869 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
1870 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
1871 ) {
1872 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1873 "%d (%d):0123 FDISC Failed (x%x)."
1874 " Fabric Detected Bad WWN\n",
1875 phba->brd_no, vport->vpi, stat.un.lsRjtError);
1876 lpfc_vport_set_state(vport,
1877 FC_VPORT_FABRIC_REJ_WWN);
1878 }
1879 break;
1665 } 1880 }
1666 break; 1881 break;
1667 1882
@@ -1688,15 +1903,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1688 1903
1689 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 1904 /* Retry ELS command <elsCmd> to remote NPORT <did> */
1690 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1905 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1691 "%d:0107 Retry ELS command x%x to remote " 1906 "%d (%d):0107 Retry ELS command x%x to remote "
1692 "NPORT x%x Data: x%x x%x\n", 1907 "NPORT x%x Data: x%x x%x\n",
1693 phba->brd_no, 1908 phba->brd_no, vport->vpi,
1694 cmd, did, cmdiocb->retry, delay); 1909 cmd, did, cmdiocb->retry, delay);
1695 1910
1696 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) { 1911 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) {
1697 /* If discovery / RSCN timer is running, reset it */ 1912 /* If discovery / RSCN timer is running, reset it */
1698 if (timer_pending(&vport->fc_disctmo) || 1913 if (timer_pending(&vport->fc_disctmo) ||
1699 (vport->fc_flag & FC_RSCN_MODE)) 1914 (vport->fc_flag & FC_RSCN_MODE))
1700 lpfc_set_disctmo(vport); 1915 lpfc_set_disctmo(vport);
1701 } 1916 }
1702 1917
@@ -1705,7 +1920,9 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1705 phba->fc_stat.elsDelayRetry++; 1920 phba->fc_stat.elsDelayRetry++;
1706 ndlp->nlp_retry = cmdiocb->retry; 1921 ndlp->nlp_retry = cmdiocb->retry;
1707 1922
1708 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 1923 /* delay is specified in milliseconds */
1924 mod_timer(&ndlp->nlp_delayfunc,
1925 jiffies + msecs_to_jiffies(delay));
1709 spin_lock_irq(shost->host_lock); 1926 spin_lock_irq(shost->host_lock);
1710 ndlp->nlp_flag |= NLP_DELAY_TMO; 1927 ndlp->nlp_flag |= NLP_DELAY_TMO;
1711 spin_unlock_irq(shost->host_lock); 1928 spin_unlock_irq(shost->host_lock);
@@ -1720,6 +1937,9 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1720 case ELS_CMD_FLOGI: 1937 case ELS_CMD_FLOGI:
1721 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 1938 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
1722 return 1; 1939 return 1;
1940 case ELS_CMD_FDISC:
1941 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
1942 return 1;
1723 case ELS_CMD_PLOGI: 1943 case ELS_CMD_PLOGI:
1724 if (ndlp) { 1944 if (ndlp) {
1725 ndlp->nlp_prev_state = ndlp->nlp_state; 1945 ndlp->nlp_prev_state = ndlp->nlp_state;
@@ -1748,9 +1968,9 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1748 1968
1749 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 1969 /* No retry ELS command <elsCmd> to remote NPORT <did> */
1750 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1970 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1751 "%d:0108 No retry ELS command x%x to remote NPORT x%x " 1971 "%d (%d):0108 No retry ELS command x%x to remote "
1752 "Data: x%x\n", 1972 "NPORT x%x Data: x%x\n",
1753 phba->brd_no, 1973 phba->brd_no, vport->vpi,
1754 cmd, did, cmdiocb->retry); 1974 cmd, did, cmdiocb->retry);
1755 1975
1756 return 0; 1976 return 0;
@@ -1798,10 +2018,10 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1798 2018
1799 /* ACC to LOGO completes to NPort <nlp_DID> */ 2019 /* ACC to LOGO completes to NPort <nlp_DID> */
1800 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2020 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1801 "%d:0109 ACC to LOGO completes to NPort x%x " 2021 "%d (%d):0109 ACC to LOGO completes to NPort x%x "
1802 "Data: x%x x%x x%x\n", 2022 "Data: x%x x%x x%x\n",
1803 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, 2023 phba->brd_no, vport->vpi, ndlp->nlp_DID,
1804 ndlp->nlp_state, ndlp->nlp_rpi); 2024 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
1805 2025
1806 switch (ndlp->nlp_state) { 2026 switch (ndlp->nlp_state) {
1807 case NLP_STE_UNUSED_NODE: /* node is just allocated */ 2027 case NLP_STE_UNUSED_NODE: /* node is just allocated */
@@ -1848,9 +2068,9 @@ lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1848 2068
1849 /* ELS response tag <ulpIoTag> completes */ 2069 /* ELS response tag <ulpIoTag> completes */
1850 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2070 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1851 "%d:0110 ELS response tag x%x completes " 2071 "%d (%d):0110 ELS response tag x%x completes "
1852 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 2072 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
1853 phba->brd_no, 2073 phba->brd_no, vport->vpi,
1854 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, 2074 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
1855 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, 2075 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
1856 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 2076 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
@@ -1926,7 +2146,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
1926 2146
1927 switch (flag) { 2147 switch (flag) {
1928 case ELS_CMD_ACC: 2148 case ELS_CMD_ACC:
1929 cmdsize = sizeof (uint32_t); 2149 cmdsize = sizeof(uint32_t);
1930 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 2150 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
1931 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 2151 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
1932 if (!elsiocb) { 2152 if (!elsiocb) {
@@ -1940,10 +2160,10 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
1940 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 2160 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1941 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2161 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1942 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 2162 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
1943 pcmd += sizeof (uint32_t); 2163 pcmd += sizeof(uint32_t);
1944 break; 2164 break;
1945 case ELS_CMD_PLOGI: 2165 case ELS_CMD_PLOGI:
1946 cmdsize = (sizeof (struct serv_parm) + sizeof (uint32_t)); 2166 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
1947 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 2167 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
1948 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 2168 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
1949 if (!elsiocb) 2169 if (!elsiocb)
@@ -1957,11 +2177,11 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
1957 elsiocb->context_un.mbox = mbox; 2177 elsiocb->context_un.mbox = mbox;
1958 2178
1959 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 2179 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
1960 pcmd += sizeof (uint32_t); 2180 pcmd += sizeof(uint32_t);
1961 memcpy(pcmd, &vport->fc_sparam, sizeof (struct serv_parm)); 2181 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1962 break; 2182 break;
1963 case ELS_CMD_PRLO: 2183 case ELS_CMD_PRLO:
1964 cmdsize = sizeof (uint32_t) + sizeof (PRLO); 2184 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
1965 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 2185 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
1966 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 2186 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
1967 if (!elsiocb) 2187 if (!elsiocb)
@@ -1972,7 +2192,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
1972 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2192 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1973 2193
1974 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 2194 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
1975 sizeof (uint32_t) + sizeof (PRLO)); 2195 sizeof(uint32_t) + sizeof(PRLO));
1976 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 2196 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
1977 els_pkt_ptr = (ELS_PKT *) pcmd; 2197 els_pkt_ptr = (ELS_PKT *) pcmd;
1978 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 2198 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
@@ -1988,9 +2208,9 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
1988 2208
1989 /* Xmit ELS ACC response tag <ulpIoTag> */ 2209 /* Xmit ELS ACC response tag <ulpIoTag> */
1990 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2210 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1991 "%d:0128 Xmit ELS ACC response tag x%x, XRI: x%x, " 2211 "%d (%d):0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
1992 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n", 2212 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
1993 phba->brd_no, elsiocb->iotag, 2213 phba->brd_no, vport->vpi, elsiocb->iotag,
1994 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2214 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
1995 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2215 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
1996 2216
@@ -2029,7 +2249,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
2029 psli = &phba->sli; 2249 psli = &phba->sli;
2030 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 2250 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2031 2251
2032 cmdsize = 2 * sizeof (uint32_t); 2252 cmdsize = 2 * sizeof(uint32_t);
2033 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 2253 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2034 ndlp->nlp_DID, ELS_CMD_LS_RJT); 2254 ndlp->nlp_DID, ELS_CMD_LS_RJT);
2035 if (!elsiocb) 2255 if (!elsiocb)
@@ -2041,14 +2261,15 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
2041 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2261 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2042 2262
2043 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 2263 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
2044 pcmd += sizeof (uint32_t); 2264 pcmd += sizeof(uint32_t);
2045 *((uint32_t *) (pcmd)) = rejectError; 2265 *((uint32_t *) (pcmd)) = rejectError;
2046 2266
2047 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 2267 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
2048 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2268 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2049 "%d:0129 Xmit ELS RJT x%x response tag x%x xri x%x, " 2269 "%d (%d):0129 Xmit ELS RJT x%x response tag x%x "
2050 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 2270 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
2051 phba->brd_no, rejectError, elsiocb->iotag, 2271 "rpi x%x\n",
2272 phba->brd_no, vport->vpi, rejectError, elsiocb->iotag,
2052 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2273 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2053 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2274 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2054 2275
@@ -2076,7 +2297,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2076 uint16_t cmdsize; 2297 uint16_t cmdsize;
2077 int rc; 2298 int rc;
2078 2299
2079 cmdsize = sizeof (uint32_t) + sizeof (ADISC); 2300 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
2080 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 2301 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2081 ndlp->nlp_DID, ELS_CMD_ACC); 2302 ndlp->nlp_DID, ELS_CMD_ACC);
2082 if (!elsiocb) 2303 if (!elsiocb)
@@ -2088,21 +2309,21 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2088 2309
2089 /* Xmit ADISC ACC response tag <ulpIoTag> */ 2310 /* Xmit ADISC ACC response tag <ulpIoTag> */
2090 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2311 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2091 "%d:0130 Xmit ADISC ACC response iotag x%x xri: " 2312 "%d (%d):0130 Xmit ADISC ACC response iotag x%x xri: "
2092 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 2313 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
2093 phba->brd_no, elsiocb->iotag, 2314 phba->brd_no, vport->vpi, elsiocb->iotag,
2094 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2315 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2095 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2316 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2096 2317
2097 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2318 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2098 2319
2099 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 2320 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2100 pcmd += sizeof (uint32_t); 2321 pcmd += sizeof(uint32_t);
2101 2322
2102 ap = (ADISC *) (pcmd); 2323 ap = (ADISC *) (pcmd);
2103 ap->hardAL_PA = phba->fc_pref_ALPA; 2324 ap->hardAL_PA = phba->fc_pref_ALPA;
2104 memcpy(&ap->portName, &vport->fc_portname, sizeof (struct lpfc_name)); 2325 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2105 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof (struct lpfc_name)); 2326 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2106 ap->DID = be32_to_cpu(vport->fc_myDID); 2327 ap->DID = be32_to_cpu(vport->fc_myDID);
2107 2328
2108 phba->fc_stat.elsXmitACC++; 2329 phba->fc_stat.elsXmitACC++;
@@ -2134,9 +2355,9 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2134 psli = &phba->sli; 2355 psli = &phba->sli;
2135 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 2356 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2136 2357
2137 cmdsize = sizeof (uint32_t) + sizeof (PRLI); 2358 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
2138 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 2359 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2139 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK))); 2360 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
2140 if (!elsiocb) 2361 if (!elsiocb)
2141 return 1; 2362 return 1;
2142 2363
@@ -2146,19 +2367,19 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2146 2367
2147 /* Xmit PRLI ACC response tag <ulpIoTag> */ 2368 /* Xmit PRLI ACC response tag <ulpIoTag> */
2148 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2369 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2149 "%d:0131 Xmit PRLI ACC response tag x%x xri x%x, " 2370 "%d (%d):0131 Xmit PRLI ACC response tag x%x xri x%x, "
2150 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 2371 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
2151 phba->brd_no, elsiocb->iotag, 2372 phba->brd_no, vport->vpi, elsiocb->iotag,
2152 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2373 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2153 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2374 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2154 2375
2155 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2376 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2156 2377
2157 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 2378 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
2158 pcmd += sizeof (uint32_t); 2379 pcmd += sizeof(uint32_t);
2159 2380
2160 /* For PRLI, remainder of payload is PRLI parameter page */ 2381 /* For PRLI, remainder of payload is PRLI parameter page */
2161 memset(pcmd, 0, sizeof (PRLI)); 2382 memset(pcmd, 0, sizeof(PRLI));
2162 2383
2163 npr = (PRLI *) pcmd; 2384 npr = (PRLI *) pcmd;
2164 vpd = &phba->vpd; 2385 vpd = &phba->vpd;
@@ -2208,10 +2429,10 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
2208 psli = &phba->sli; 2429 psli = &phba->sli;
2209 pring = &psli->ring[LPFC_ELS_RING]; 2430 pring = &psli->ring[LPFC_ELS_RING];
2210 2431
2211 cmdsize = sizeof (uint32_t) + sizeof (uint32_t) 2432 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
2212 + (2 * sizeof (struct lpfc_name)); 2433 + (2 * sizeof(struct lpfc_name));
2213 if (format) 2434 if (format)
2214 cmdsize += sizeof (RNID_TOP_DISC); 2435 cmdsize += sizeof(RNID_TOP_DISC);
2215 2436
2216 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 2437 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2217 ndlp->nlp_DID, ELS_CMD_ACC); 2438 ndlp->nlp_DID, ELS_CMD_ACC);
@@ -2224,30 +2445,30 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
2224 2445
2225 /* Xmit RNID ACC response tag <ulpIoTag> */ 2446 /* Xmit RNID ACC response tag <ulpIoTag> */
2226 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2447 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2227 "%d:0132 Xmit RNID ACC response tag x%x " 2448 "%d (%d):0132 Xmit RNID ACC response tag x%x "
2228 "xri x%x\n", 2449 "xri x%x\n",
2229 phba->brd_no, elsiocb->iotag, 2450 phba->brd_no, vport->vpi, elsiocb->iotag,
2230 elsiocb->iocb.ulpContext); 2451 elsiocb->iocb.ulpContext);
2231 2452
2232 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2453 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2233 2454
2234 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 2455 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2235 pcmd += sizeof (uint32_t); 2456 pcmd += sizeof(uint32_t);
2236 2457
2237 memset(pcmd, 0, sizeof (RNID)); 2458 memset(pcmd, 0, sizeof(RNID));
2238 rn = (RNID *) (pcmd); 2459 rn = (RNID *) (pcmd);
2239 rn->Format = format; 2460 rn->Format = format;
2240 rn->CommonLen = (2 * sizeof (struct lpfc_name)); 2461 rn->CommonLen = (2 * sizeof(struct lpfc_name));
2241 memcpy(&rn->portName, &vport->fc_portname, sizeof (struct lpfc_name)); 2462 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2242 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof (struct lpfc_name)); 2463 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2243 switch (format) { 2464 switch (format) {
2244 case 0: 2465 case 0:
2245 rn->SpecificLen = 0; 2466 rn->SpecificLen = 0;
2246 break; 2467 break;
2247 case RNID_TOPOLOGY_DISC: 2468 case RNID_TOPOLOGY_DISC:
2248 rn->SpecificLen = sizeof (RNID_TOP_DISC); 2469 rn->SpecificLen = sizeof(RNID_TOP_DISC);
2249 memcpy(&rn->un.topologyDisc.portName, 2470 memcpy(&rn->un.topologyDisc.portName,
2250 &vport->fc_portname, sizeof (struct lpfc_name)); 2471 &vport->fc_portname, sizeof(struct lpfc_name));
2251 rn->un.topologyDisc.unitType = RNID_HBA; 2472 rn->un.topologyDisc.unitType = RNID_HBA;
2252 rn->un.topologyDisc.physPort = 0; 2473 rn->un.topologyDisc.physPort = 0;
2253 rn->un.topologyDisc.attachedNodes = 0; 2474 rn->un.topologyDisc.attachedNodes = 0;
@@ -2344,22 +2565,15 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
2344 return sentplogi; 2565 return sentplogi;
2345} 2566}
2346 2567
2347int 2568void
2348lpfc_els_flush_rscn(struct lpfc_vport *vport) 2569lpfc_els_flush_rscn(struct lpfc_vport *vport)
2349{ 2570{
2350 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2571 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2351 struct lpfc_hba *phba = vport->phba; 2572 struct lpfc_hba *phba = vport->phba;
2352 struct lpfc_dmabuf *mp;
2353 int i; 2573 int i;
2354 2574
2355 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 2575 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
2356 mp = vport->fc_rscn_id_list[i]; 2576 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
2357 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
2358 lpfc_sli_hbqbuf_free(phba, mp->virt, mp->phys);
2359 else {
2360 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2361 kfree(mp);
2362 }
2363 vport->fc_rscn_id_list[i] = NULL; 2577 vport->fc_rscn_id_list[i] = NULL;
2364 } 2578 }
2365 spin_lock_irq(shost->host_lock); 2579 spin_lock_irq(shost->host_lock);
@@ -2367,7 +2581,6 @@ lpfc_els_flush_rscn(struct lpfc_vport *vport)
2367 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 2581 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
2368 spin_unlock_irq(shost->host_lock); 2582 spin_unlock_irq(shost->host_lock);
2369 lpfc_can_disctmo(vport); 2583 lpfc_can_disctmo(vport);
2370 return 0;
2371} 2584}
2372 2585
2373int 2586int
@@ -2375,13 +2588,11 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
2375{ 2588{
2376 D_ID ns_did; 2589 D_ID ns_did;
2377 D_ID rscn_did; 2590 D_ID rscn_did;
2378 struct lpfc_dmabuf *mp;
2379 uint32_t *lp; 2591 uint32_t *lp;
2380 uint32_t payload_len, cmd, i, match; 2592 uint32_t payload_len, i;
2381 struct lpfc_hba *phba = vport->phba; 2593 struct lpfc_hba *phba = vport->phba;
2382 2594
2383 ns_did.un.word = did; 2595 ns_did.un.word = did;
2384 match = 0;
2385 2596
2386 /* Never match fabric nodes for RSCNs */ 2597 /* Never match fabric nodes for RSCNs */
2387 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 2598 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
@@ -2392,45 +2603,40 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
2392 return did; 2603 return did;
2393 2604
2394 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 2605 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
2395 mp = vport->fc_rscn_id_list[i]; 2606 lp = vport->fc_rscn_id_list[i]->virt;
2396 lp = (uint32_t *) mp->virt; 2607 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
2397 cmd = *lp++; 2608 payload_len -= sizeof(uint32_t); /* take off word 0 */
2398 payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
2399 payload_len -= sizeof (uint32_t); /* take off word 0 */
2400 while (payload_len) { 2609 while (payload_len) {
2401 rscn_did.un.word = *lp++; 2610 rscn_did.un.word = be32_to_cpu(*lp++);
2402 rscn_did.un.word = be32_to_cpu(rscn_did.un.word); 2611 payload_len -= sizeof(uint32_t);
2403 payload_len -= sizeof (uint32_t);
2404 switch (rscn_did.un.b.resv) { 2612 switch (rscn_did.un.b.resv) {
2405 case 0: /* Single N_Port ID effected */ 2613 case 0: /* Single N_Port ID effected */
2406 if (ns_did.un.word == rscn_did.un.word) 2614 if (ns_did.un.word == rscn_did.un.word)
2407 match = did; 2615 return did;
2408 break; 2616 break;
2409 case 1: /* Whole N_Port Area effected */ 2617 case 1: /* Whole N_Port Area effected */
2410 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 2618 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
2411 && (ns_did.un.b.area == rscn_did.un.b.area)) 2619 && (ns_did.un.b.area == rscn_did.un.b.area))
2412 match = did; 2620 return did;
2413 break; 2621 break;
2414 case 2: /* Whole N_Port Domain effected */ 2622 case 2: /* Whole N_Port Domain effected */
2415 if (ns_did.un.b.domain == rscn_did.un.b.domain) 2623 if (ns_did.un.b.domain == rscn_did.un.b.domain)
2416 match = did; 2624 return did;
2417 break;
2418 case 3: /* Whole Fabric effected */
2419 match = did;
2420 break; 2625 break;
2421 default: 2626 default:
2422 /* Unknown Identifier in RSCN node */ 2627 /* Unknown Identifier in RSCN node */
2423 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2628 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2424 "%d:0217 Unknown Identifier in " 2629 "%d (%d):0217 Unknown "
2425 "RSCN payload Data: x%x\n", 2630 "Identifier in RSCN payload "
2426 phba->brd_no, rscn_did.un.word); 2631 "Data: x%x\n",
2427 break; 2632 phba->brd_no, vport->vpi,
2428 } 2633 rscn_did.un.word);
2429 if (match) 2634 case 3: /* Whole Fabric effected */
2430 break; 2635 return did;
2431 } 2636 }
2432 } 2637 }
2433 return match; 2638 }
2639 return 0;
2434} 2640}
2435 2641
2436static int 2642static int
@@ -2448,7 +2654,7 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
2448 continue; 2654 continue;
2449 2655
2450 lpfc_disc_state_machine(vport, ndlp, NULL, 2656 lpfc_disc_state_machine(vport, ndlp, NULL,
2451 NLP_EVT_DEVICE_RECOVERY); 2657 NLP_EVT_DEVICE_RECOVERY);
2452 2658
2453 /* 2659 /*
2454 * Make sure NLP_DELAY_TMO is NOT running after a device 2660 * Make sure NLP_DELAY_TMO is NOT running after a device
@@ -2468,25 +2674,26 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2468 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2674 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2469 struct lpfc_hba *phba = vport->phba; 2675 struct lpfc_hba *phba = vport->phba;
2470 struct lpfc_dmabuf *pcmd; 2676 struct lpfc_dmabuf *pcmd;
2471 uint32_t *lp; 2677 struct lpfc_vport *next_vport;
2678 uint32_t *lp, *datap;
2472 IOCB_t *icmd; 2679 IOCB_t *icmd;
2473 uint32_t payload_len, cmd; 2680 uint32_t payload_len, length, nportid, *cmd;
2681 int rscn_cnt = vport->fc_rscn_id_cnt;
2682 int rscn_id = 0, hba_id = 0;
2474 int i; 2683 int i;
2475 2684
2476 icmd = &cmdiocb->iocb; 2685 icmd = &cmdiocb->iocb;
2477 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 2686 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2478 lp = (uint32_t *) pcmd->virt; 2687 lp = (uint32_t *) pcmd->virt;
2479 2688
2480 cmd = *lp++; 2689 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
2481 payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */ 2690 payload_len -= sizeof(uint32_t); /* take off word 0 */
2482 payload_len -= sizeof (uint32_t); /* take off word 0 */
2483 cmd &= ELS_CMD_MASK;
2484 2691
2485 /* RSCN received */ 2692 /* RSCN received */
2486 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 2693 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2487 "%d:0214 RSCN received Data: x%x x%x x%x x%x\n", 2694 "%d (%d):0214 RSCN received Data: x%x x%x x%x x%x\n",
2488 phba->brd_no, vport->fc_flag, payload_len, *lp, 2695 phba->brd_no, vport->vpi, vport->fc_flag, payload_len,
2489 vport->fc_rscn_id_cnt); 2696 *lp, rscn_cnt);
2490 2697
2491 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 2698 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
2492 fc_host_post_event(shost, fc_get_event_number(), 2699 fc_host_post_event(shost, fc_get_event_number(),
@@ -2497,32 +2704,77 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2497 */ 2704 */
2498 if (vport->port_state <= LPFC_NS_QRY) { 2705 if (vport->port_state <= LPFC_NS_QRY) {
2499 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 2706 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
2500 newnode); 2707 newnode);
2501 return 0; 2708 return 0;
2502 } 2709 }
2503 2710
2711 /* If this RSCN just contains NPortIDs for other vports on this HBA,
2712 * just ACC and ignore it.
2713 */
2714 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2715 !(phba->cfg_peer_port_login)) {
2716 i = payload_len;
2717 datap = lp;
2718 while (i > 0) {
2719 nportid = *datap++;
2720 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
2721 i -= sizeof(uint32_t);
2722 rscn_id++;
2723 list_for_each_entry(next_vport, &phba->port_list,
2724 listentry) {
2725 if (nportid == next_vport->fc_myDID) {
2726 hba_id++;
2727 break;
2728 }
2729 }
2730 }
2731 if (rscn_id == hba_id) {
2732 /* ALL NPortIDs in RSCN are on HBA */
2733 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2734 "%d (%d):0214 Ignore RSCN Data: x%x x%x x%x x%x\n",
2735 phba->brd_no, vport->vpi, vport->fc_flag, payload_len,
2736 *lp, rscn_cnt);
2737 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
2738 ndlp, NULL, newnode);
2739 return 0;
2740 }
2741 }
2742
2504 /* If we are already processing an RSCN, save the received 2743 /* If we are already processing an RSCN, save the received
2505 * RSCN payload buffer, cmdiocb->context2 to process later. 2744 * RSCN payload buffer, cmdiocb->context2 to process later.
2506 */ 2745 */
2507 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 2746 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
2508 if ((vport->fc_rscn_id_cnt < FC_MAX_HOLD_RSCN) && 2747 vport->fc_flag |= FC_RSCN_DEFERRED;
2748 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
2509 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 2749 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
2510 spin_lock_irq(shost->host_lock); 2750 spin_lock_irq(shost->host_lock);
2511 vport->fc_flag |= FC_RSCN_MODE; 2751 vport->fc_flag |= FC_RSCN_MODE;
2512 spin_unlock_irq(shost->host_lock); 2752 spin_unlock_irq(shost->host_lock);
2513 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 2753 if (rscn_cnt) {
2514 2754 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
2515 /* If we zero, cmdiocb->context2, the calling 2755 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
2516 * routine will not try to free it. 2756 }
2517 */ 2757 if ((rscn_cnt) &&
2518 cmdiocb->context2 = NULL; 2758 (payload_len + length <= LPFC_BPL_SIZE)) {
2759 *cmd &= ELS_CMD_MASK;
2760 *cmd |= be32_to_cpu(payload_len + length);
2761 memcpy(((uint8_t *)cmd) + length, lp,
2762 payload_len);
2763 } else {
2764 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
2765 vport->fc_rscn_id_cnt++;
2766 /* If we zero, cmdiocb->context2, the calling
2767 * routine will not try to free it.
2768 */
2769 cmdiocb->context2 = NULL;
2770 }
2519 2771
2520 /* Deferred RSCN */ 2772 /* Deferred RSCN */
2521 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 2773 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2522 "%d:0235 Deferred RSCN " 2774 "%d (%d):0235 Deferred RSCN "
2523 "Data: x%x x%x x%x\n", 2775 "Data: x%x x%x x%x\n",
2524 phba->brd_no, vport->fc_rscn_id_cnt, 2776 phba->brd_no, vport->vpi,
2525 vport->fc_flag, 2777 vport->fc_rscn_id_cnt, vport->fc_flag,
2526 vport->port_state); 2778 vport->port_state);
2527 } else { 2779 } else {
2528 spin_lock_irq(shost->host_lock); 2780 spin_lock_irq(shost->host_lock);
@@ -2530,10 +2782,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2530 spin_unlock_irq(shost->host_lock); 2782 spin_unlock_irq(shost->host_lock);
2531 /* ReDiscovery RSCN */ 2783 /* ReDiscovery RSCN */
2532 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 2784 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2533 "%d:0234 ReDiscovery RSCN " 2785 "%d (%d):0234 ReDiscovery RSCN "
2534 "Data: x%x x%x x%x\n", 2786 "Data: x%x x%x x%x\n",
2535 phba->brd_no, vport->fc_rscn_id_cnt, 2787 phba->brd_no, vport->vpi,
2536 vport->fc_flag, 2788 vport->fc_rscn_id_cnt, vport->fc_flag,
2537 vport->port_state); 2789 vport->port_state);
2538 } 2790 }
2539 /* Send back ACC */ 2791 /* Send back ACC */
@@ -2542,6 +2794,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2542 2794
2543 /* send RECOVERY event for ALL nodes that match RSCN payload */ 2795 /* send RECOVERY event for ALL nodes that match RSCN payload */
2544 lpfc_rscn_recovery_check(vport); 2796 lpfc_rscn_recovery_check(vport);
2797 vport->fc_flag &= ~FC_RSCN_DEFERRED;
2545 return 0; 2798 return 0;
2546 } 2799 }
2547 2800
@@ -2572,13 +2825,19 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
2572 struct lpfc_nodelist *ndlp; 2825 struct lpfc_nodelist *ndlp;
2573 struct lpfc_hba *phba = vport->phba; 2826 struct lpfc_hba *phba = vport->phba;
2574 2827
2828 /* Ignore RSCN if the port is being torn down. */
2829 if (vport->load_flag & FC_UNLOADING) {
2830 lpfc_els_flush_rscn(vport);
2831 return 0;
2832 }
2833
2575 /* Start timer for RSCN processing */ 2834 /* Start timer for RSCN processing */
2576 lpfc_set_disctmo(vport); 2835 lpfc_set_disctmo(vport);
2577 2836
2578 /* RSCN processed */ 2837 /* RSCN processed */
2579 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 2838 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2580 "%d:0215 RSCN processed Data: x%x x%x x%x x%x\n", 2839 "%d (%d):0215 RSCN processed Data: x%x x%x x%x x%x\n",
2581 phba->brd_no, 2840 phba->brd_no, vport->vpi,
2582 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 2841 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
2583 vport->port_state); 2842 vport->port_state);
2584 2843
@@ -2587,7 +2846,7 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
2587 ndlp = lpfc_findnode_did(vport, NameServer_DID); 2846 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2588 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 2847 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
2589 /* Good ndlp, issue CT Request to NameServer */ 2848 /* Good ndlp, issue CT Request to NameServer */
2590 if (lpfc_ns_cmd(vport, ndlp, SLI_CTNS_GID_FT) == 0) 2849 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
2591 /* Wait for NameServer query cmpl before we can 2850 /* Wait for NameServer query cmpl before we can
2592 continue */ 2851 continue */
2593 return 1; 2852 return 1;
@@ -2649,9 +2908,9 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2649 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 2908 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
2650 Loop Mode */ 2909 Loop Mode */
2651 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 2910 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
2652 "%d:0113 An FLOGI ELS command x%x was received " 2911 "%d (%d):0113 An FLOGI ELS command x%x was "
2653 "from DID x%x in Loop Mode\n", 2912 "received from DID x%x in Loop Mode\n",
2654 phba->brd_no, cmd, did); 2913 phba->brd_no, vport->vpi, cmd, did);
2655 return 1; 2914 return 1;
2656 } 2915 }
2657 2916
@@ -2663,7 +2922,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2663 */ 2922 */
2664 2923
2665 rc = memcmp(&vport->fc_portname, &sp->portName, 2924 rc = memcmp(&vport->fc_portname, &sp->portName,
2666 sizeof (struct lpfc_name)); 2925 sizeof(struct lpfc_name));
2667 2926
2668 if (!rc) { 2927 if (!rc) {
2669 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2928 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -2802,7 +3061,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2802 3061
2803 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3062 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2804 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3063 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2805 pcmd += sizeof (uint32_t); /* Skip past command */ 3064 pcmd += sizeof(uint32_t); /* Skip past command */
2806 rps_rsp = (RPS_RSP *)pcmd; 3065 rps_rsp = (RPS_RSP *)pcmd;
2807 3066
2808 if (phba->fc_topology != TOPOLOGY_LOOP) 3067 if (phba->fc_topology != TOPOLOGY_LOOP)
@@ -2823,9 +3082,10 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2823 3082
2824 /* Xmit ELS RPS ACC response tag <ulpIoTag> */ 3083 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
2825 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 3084 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2826 "%d:0118 Xmit ELS RPS ACC response tag x%x xri x%x, " 3085 "%d (%d):0118 Xmit ELS RPS ACC response tag x%x "
2827 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 3086 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
2828 phba->brd_no, elsiocb->iotag, 3087 "rpi x%x\n",
3088 phba->brd_no, ndlp->vport->vpi, elsiocb->iotag,
2829 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 3089 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2830 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 3090 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2831 3091
@@ -2865,14 +3125,17 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2865 if ((flag == 0) || 3125 if ((flag == 0) ||
2866 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) || 3126 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
2867 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname, 3127 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
2868 sizeof (struct lpfc_name)) == 0))) { 3128 sizeof(struct lpfc_name)) == 0))) {
2869 3129
3130 printk("Fix me....\n");
3131 dump_stack();
2870 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 3132 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
2871 if (mbox) { 3133 if (mbox) {
2872 lpfc_read_lnk_stat(phba, mbox); 3134 lpfc_read_lnk_stat(phba, mbox);
2873 mbox->context1 = 3135 mbox->context1 =
2874 (void *)((unsigned long)cmdiocb->iocb.ulpContext); 3136 (void *)((unsigned long) cmdiocb->iocb.ulpContext);
2875 mbox->context2 = lpfc_nlp_get(ndlp); 3137 mbox->context2 = lpfc_nlp_get(ndlp);
3138 mbox->vport = vport;
2876 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; 3139 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
2877 if (lpfc_sli_issue_mbox (phba, mbox, 3140 if (lpfc_sli_issue_mbox (phba, mbox,
2878 (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED) 3141 (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED)
@@ -2915,7 +3178,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
2915 3178
2916 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3179 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2917 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3180 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2918 pcmd += sizeof (uint16_t); 3181 pcmd += sizeof(uint16_t);
2919 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 3182 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
2920 pcmd += sizeof(uint16_t); 3183 pcmd += sizeof(uint16_t);
2921 3184
@@ -2932,9 +3195,10 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
2932 3195
2933 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 3196 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
2934 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 3197 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2935 "%d:0120 Xmit ELS RPL ACC response tag x%x xri x%x, " 3198 "%d (%d):0120 Xmit ELS RPL ACC response tag x%x "
2936 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 3199 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
2937 phba->brd_no, elsiocb->iotag, 3200 "rpi x%x\n",
3201 phba->brd_no, vport->vpi, elsiocb->iotag,
2938 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 3202 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2939 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 3203 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2940 3204
@@ -3008,8 +3272,8 @@ lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3008 3272
3009 /* FARP-REQ received from DID <did> */ 3273 /* FARP-REQ received from DID <did> */
3010 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 3274 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3011 "%d:0601 FARP-REQ received from DID x%x\n", 3275 "%d (%d):0601 FARP-REQ received from DID x%x\n",
3012 phba->brd_no, did); 3276 phba->brd_no, vport->vpi, did);
3013 3277
3014 /* We will only support match on WWPN or WWNN */ 3278 /* We will only support match on WWPN or WWNN */
3015 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 3279 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
@@ -3020,14 +3284,14 @@ lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3020 /* If this FARP command is searching for my portname */ 3284 /* If this FARP command is searching for my portname */
3021 if (fp->Mflags & FARP_MATCH_PORT) { 3285 if (fp->Mflags & FARP_MATCH_PORT) {
3022 if (memcmp(&fp->RportName, &vport->fc_portname, 3286 if (memcmp(&fp->RportName, &vport->fc_portname,
3023 sizeof (struct lpfc_name)) == 0) 3287 sizeof(struct lpfc_name)) == 0)
3024 cnt = 1; 3288 cnt = 1;
3025 } 3289 }
3026 3290
3027 /* If this FARP command is searching for my nodename */ 3291 /* If this FARP command is searching for my nodename */
3028 if (fp->Mflags & FARP_MATCH_NODE) { 3292 if (fp->Mflags & FARP_MATCH_NODE) {
3029 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 3293 if (memcmp(&fp->RnodeName, &vport->fc_nodename,
3030 sizeof (struct lpfc_name)) == 0) 3294 sizeof(struct lpfc_name)) == 0)
3031 cnt = 1; 3295 cnt = 1;
3032 } 3296 }
3033 3297
@@ -3068,8 +3332,8 @@ lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3068 cmd = *lp++; 3332 cmd = *lp++;
3069 /* FARP-RSP received from DID <did> */ 3333 /* FARP-RSP received from DID <did> */
3070 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 3334 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3071 "%d:0600 FARP-RSP received from DID x%x\n", 3335 "%d (%d):0600 FARP-RSP received from DID x%x\n",
3072 phba->brd_no, did); 3336 phba->brd_no, vport->vpi, did);
3073 /* ACCEPT the Farp resp request */ 3337 /* ACCEPT the Farp resp request */
3074 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 3338 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
3075 3339
@@ -3090,8 +3354,8 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3090 3354
3091 /* FAN received */ 3355 /* FAN received */
3092 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 3356 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3093 "%d:0265 FAN received\n", 3357 "%d (%d):0265 FAN received\n",
3094 phba->brd_no); 3358 phba->brd_no, vport->vpi);
3095 3359
3096 icmd = &cmdiocb->iocb; 3360 icmd = &cmdiocb->iocb;
3097 did = icmd->un.elsreq64.remoteID; 3361 did = icmd->un.elsreq64.remoteID;
@@ -3099,7 +3363,7 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3099 lp = (uint32_t *)pcmd->virt; 3363 lp = (uint32_t *)pcmd->virt;
3100 3364
3101 cmd = *lp++; 3365 cmd = *lp++;
3102 fp = (FAN *)lp; 3366 fp = (FAN *) lp;
3103 3367
3104 /* FAN received; Fan does not have a reply sequence */ 3368 /* FAN received; Fan does not have a reply sequence */
3105 3369
@@ -3178,10 +3442,15 @@ lpfc_els_timeout(unsigned long ptr)
3178 spin_lock_irqsave(&vport->work_port_lock, iflag); 3442 spin_lock_irqsave(&vport->work_port_lock, iflag);
3179 if ((vport->work_port_events & WORKER_ELS_TMO) == 0) { 3443 if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
3180 vport->work_port_events |= WORKER_ELS_TMO; 3444 vport->work_port_events |= WORKER_ELS_TMO;
3445 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3446
3447 spin_lock_irqsave(&phba->hbalock, iflag);
3181 if (phba->work_wait) 3448 if (phba->work_wait)
3182 wake_up(phba->work_wait); 3449 lpfc_worker_wake_up(phba);
3450 spin_unlock_irqrestore(&phba->hbalock, iflag);
3183 } 3451 }
3184 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 3452 else
3453 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3185 return; 3454 return;
3186} 3455}
3187 3456
@@ -3221,17 +3490,19 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
3221 if (pcmd) 3490 if (pcmd)
3222 els_command = *(uint32_t *) (pcmd->virt); 3491 els_command = *(uint32_t *) (pcmd->virt);
3223 3492
3224 if ((els_command == ELS_CMD_FARP) 3493 if (els_command == ELS_CMD_FARP ||
3225 || (els_command == ELS_CMD_FARPR)) { 3494 els_command == ELS_CMD_FARPR ||
3495 els_command == ELS_CMD_FDISC)
3496 continue;
3497
3498 if (vport != piocb->vport)
3226 continue; 3499 continue;
3227 }
3228 3500
3229 if (piocb->drvrTimeout > 0) { 3501 if (piocb->drvrTimeout > 0) {
3230 if (piocb->drvrTimeout >= timeout) { 3502 if (piocb->drvrTimeout >= timeout)
3231 piocb->drvrTimeout -= timeout; 3503 piocb->drvrTimeout -= timeout;
3232 } else { 3504 else
3233 piocb->drvrTimeout = 0; 3505 piocb->drvrTimeout = 0;
3234 }
3235 continue; 3506 continue;
3236 } 3507 }
3237 3508
@@ -3245,11 +3516,10 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
3245 remote_ID = ndlp->nlp_DID; 3516 remote_ID = ndlp->nlp_DID;
3246 } 3517 }
3247 3518
3248 lpfc_printf_log(phba, 3519 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3249 KERN_ERR, 3520 "%d (%d):0127 ELS timeout Data: x%x x%x x%x "
3250 LOG_ELS, 3521 "x%x\n",
3251 "%d:0127 ELS timeout Data: x%x x%x x%x x%x\n", 3522 phba->brd_no, vport->vpi, els_command,
3252 phba->brd_no, els_command,
3253 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 3523 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
3254 3524
3255 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 3525 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
@@ -3268,6 +3538,11 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
3268 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3538 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3269 struct lpfc_iocbq *tmp_iocb, *piocb; 3539 struct lpfc_iocbq *tmp_iocb, *piocb;
3270 IOCB_t *cmd = NULL; 3540 IOCB_t *cmd = NULL;
3541 struct lpfc_dmabuf *pcmd;
3542 uint32_t *elscmd;
3543 uint32_t els_command;
3544
3545 lpfc_fabric_abort_vport(vport);
3271 3546
3272 spin_lock_irq(&phba->hbalock); 3547 spin_lock_irq(&phba->hbalock);
3273 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 3548 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
@@ -3284,6 +3559,10 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
3284 cmd->ulpCommand == CMD_ABORT_XRI_CN) 3559 cmd->ulpCommand == CMD_ABORT_XRI_CN)
3285 continue; 3560 continue;
3286 3561
3562 pcmd = (struct lpfc_dmabuf *) piocb->context2;
3563 elscmd = (uint32_t *) (pcmd->virt);
3564 els_command = *elscmd;
3565
3287 if (piocb->vport != vport) 3566 if (piocb->vport != vport)
3288 continue; 3567 continue;
3289 3568
@@ -3306,7 +3585,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
3306 while (!list_empty(&completions)) { 3585 while (!list_empty(&completions)) {
3307 piocb = list_get_first(&completions, struct lpfc_iocbq, list); 3586 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
3308 cmd = &piocb->iocb; 3587 cmd = &piocb->iocb;
3309 list_del(&piocb->list); 3588 list_del_init(&piocb->list);
3310 3589
3311 if (!piocb->iocb_cmpl) 3590 if (!piocb->iocb_cmpl)
3312 lpfc_sli_release_iocbq(phba, piocb); 3591 lpfc_sli_release_iocbq(phba, piocb);
@@ -3322,21 +3601,20 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
3322 3601
3323static void 3602static void
3324lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3603lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3325 struct lpfc_vport *vport, struct lpfc_dmabuf *mp, 3604 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
3326 struct lpfc_iocbq *elsiocb)
3327{ 3605{
3328 struct lpfc_nodelist *ndlp; 3606 struct lpfc_nodelist *ndlp;
3329 struct ls_rjt stat; 3607 struct ls_rjt stat;
3330 uint32_t *lp; 3608 uint32_t *payload;
3331 uint32_t cmd, did, newnode, rjt_err = 0; 3609 uint32_t cmd, did, newnode, rjt_err = 0;
3332 IOCB_t *icmd = &elsiocb->iocb; 3610 IOCB_t *icmd = &elsiocb->iocb;
3333 3611
3334 if (!vport || !mp) 3612 if (vport == NULL || elsiocb->context2 == NULL)
3335 goto dropit; 3613 goto dropit;
3336 3614
3337 newnode = 0; 3615 newnode = 0;
3338 lp = (uint32_t *) mp->virt; 3616 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
3339 cmd = *lp++; 3617 cmd = *payload;
3340 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 3618 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
3341 lpfc_post_buffer(phba, pring, 1, 1); 3619 lpfc_post_buffer(phba, pring, 1, 1);
3342 3620
@@ -3347,6 +3625,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3347 if (lpfc_els_chk_latt(vport)) 3625 if (lpfc_els_chk_latt(vport))
3348 goto dropit; 3626 goto dropit;
3349 3627
3628 /* Ignore traffic recevied during vport shutdown. */
3629 if (vport->load_flag & FC_UNLOADING)
3630 goto dropit;
3631
3350 did = icmd->un.rcvels.remoteID; 3632 did = icmd->un.rcvels.remoteID;
3351 ndlp = lpfc_findnode_did(vport, did); 3633 ndlp = lpfc_findnode_did(vport, did);
3352 if (!ndlp) { 3634 if (!ndlp) {
@@ -3367,7 +3649,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3367 if (elsiocb->context1) 3649 if (elsiocb->context1)
3368 lpfc_nlp_put(elsiocb->context1); 3650 lpfc_nlp_put(elsiocb->context1);
3369 elsiocb->context1 = lpfc_nlp_get(ndlp); 3651 elsiocb->context1 = lpfc_nlp_get(ndlp);
3370 elsiocb->context2 = mp;
3371 elsiocb->vport = vport; 3652 elsiocb->vport = vport;
3372 3653
3373 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 3654 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
@@ -3375,18 +3656,20 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3375 } 3656 }
3376 /* ELS command <elsCmd> received from NPORT <did> */ 3657 /* ELS command <elsCmd> received from NPORT <did> */
3377 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 3658 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3378 "%d:0112 ELS command x%x received from NPORT x%x " 3659 "%d (%d):0112 ELS command x%x received from NPORT x%x "
3379 "Data: x%x\n", phba->brd_no, cmd, did, 3660 "Data: x%x\n", phba->brd_no, vport->vpi, cmd, did,
3380 vport->port_state); 3661 vport->port_state);
3381 3662
3382 switch (cmd) { 3663 switch (cmd) {
3383 case ELS_CMD_PLOGI: 3664 case ELS_CMD_PLOGI:
3384 phba->fc_stat.elsRcvPLOGI++; 3665 phba->fc_stat.elsRcvPLOGI++;
3385 if (vport->port_state < LPFC_DISC_AUTH) { 3666 if ((vport->port_state < LPFC_DISC_AUTH) ||
3386 rjt_err = 1; 3667 ((vport->port_type == LPFC_NPIV_PORT &&
3668 phba->cfg_vport_restrict_login))) {
3669 rjt_err = 2;
3387 break; 3670 break;
3388 } 3671 }
3389 ndlp = lpfc_plogi_confirm_nport(phba, mp, ndlp); 3672 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
3390 lpfc_disc_state_machine(vport, ndlp, elsiocb, 3673 lpfc_disc_state_machine(vport, ndlp, elsiocb,
3391 NLP_EVT_RCV_PLOGI); 3674 NLP_EVT_RCV_PLOGI);
3392 break; 3675 break;
@@ -3482,13 +3765,13 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3482 break; 3765 break;
3483 default: 3766 default:
3484 /* Unsupported ELS command, reject */ 3767 /* Unsupported ELS command, reject */
3485 rjt_err = 1; 3768 rjt_err = 2;
3486 3769
3487 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 3770 /* Unknown ELS command <elsCmd> received from NPORT <did> */
3488 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 3771 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3489 "%d:0115 Unknown ELS command x%x " 3772 "%d (%d):0115 Unknown ELS command x%x "
3490 "received from NPORT x%x\n", 3773 "received from NPORT x%x\n",
3491 phba->brd_no, cmd, did); 3774 phba->brd_no, vport->vpi, cmd, did);
3492 if (newnode) 3775 if (newnode)
3493 lpfc_drop_node(vport, ndlp); 3776 lpfc_drop_node(vport, ndlp);
3494 break; 3777 break;
@@ -3496,96 +3779,742 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3496 3779
3497 /* check if need to LS_RJT received ELS cmd */ 3780 /* check if need to LS_RJT received ELS cmd */
3498 if (rjt_err) { 3781 if (rjt_err) {
3499 stat.un.b.lsRjtRsvd0 = 0; 3782 memset(&stat, 0, sizeof(stat));
3500 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3783 if (rjt_err == 1)
3784 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3785 else
3786 stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
3501 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 3787 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3502 stat.un.b.vendorUnique = 0;
3503 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp); 3788 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp);
3789 if (newnode)
3790 lpfc_drop_node(vport, ndlp);
3504 } 3791 }
3505 3792
3506 return; 3793 return;
3507 3794
3508dropit: 3795dropit:
3509 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 3796 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3510 "%d:0111 Dropping received ELS cmd " 3797 "%d (%d):0111 Dropping received ELS cmd "
3511 "Data: x%x x%x x%x\n", 3798 "Data: x%x x%x x%x\n",
3512 phba->brd_no, 3799 phba->brd_no, vport ? vport->vpi : 0xffff,
3513 icmd->ulpStatus, icmd->un.ulpWord[4], 3800 icmd->ulpStatus, icmd->un.ulpWord[4],
3514 icmd->ulpTimeout); 3801 icmd->ulpTimeout);
3515 phba->fc_stat.elsRcvDrop++; 3802 phba->fc_stat.elsRcvDrop++;
3516} 3803}
3517 3804
3805static struct lpfc_vport *
3806lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
3807{
3808 struct lpfc_vport *vport;
3809
3810 list_for_each_entry(vport, &phba->port_list, listentry) {
3811 if (vport->vpi == vpi)
3812 return vport;
3813 }
3814 return NULL;
3815}
3518 3816
3519void 3817void
3520lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3818lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3521 struct lpfc_iocbq *elsiocb) 3819 struct lpfc_iocbq *elsiocb)
3522{ 3820{
3523 struct lpfc_vport *vport = phba->pport; 3821 struct lpfc_vport *vport = phba->pport;
3524 struct lpfc_dmabuf *mp = NULL;
3525 IOCB_t *icmd = &elsiocb->iocb; 3822 IOCB_t *icmd = &elsiocb->iocb;
3526 struct hbq_dmabuf *sp = NULL;
3527 dma_addr_t paddr; 3823 dma_addr_t paddr;
3824 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
3825 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
3528 3826
3529 if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) && 3827 elsiocb->context2 = NULL;
3530 ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) { 3828 elsiocb->context3 = NULL;
3829
3830 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
3831 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
3832 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
3833 (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
3531 phba->fc_stat.NoRcvBuf++; 3834 phba->fc_stat.NoRcvBuf++;
3532 /* Not enough posted buffers; Try posting more buffers */ 3835 /* Not enough posted buffers; Try posting more buffers */
3533 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 3836 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
3534 lpfc_sli_hbqbuf_fill_hbq(phba);
3535 else
3536 lpfc_post_buffer(phba, pring, 0, 1); 3837 lpfc_post_buffer(phba, pring, 0, 1);
3537 return; 3838 return;
3538 } 3839 }
3539 3840
3540 /* If there are no BDEs associated with this IOCB, 3841 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3541 * there is nothing to do. 3842 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
3542 */ 3843 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3844 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
3845 vport = phba->pport;
3846 else {
3847 uint16_t vpi = icmd->unsli3.rcvsli3.vpi;
3848 vport = lpfc_find_vport_by_vpid(phba, vpi);
3849 }
3850 }
3851 /* If there are no BDEs associated
3852 * with this IOCB, there is nothing to do.
3853 */
3543 if (icmd->ulpBdeCount == 0) 3854 if (icmd->ulpBdeCount == 0)
3544 return; 3855 return;
3545 3856
3546 /* type of ELS cmd is first 32bit word in packet */ 3857 /* type of ELS cmd is first 32bit word
3858 * in packet
3859 */
3547 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 3860 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3548 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 3861 elsiocb->context2 = bdeBuf1;
3549 icmd->un.cont64[0].addrLow);
3550 sp = lpfc_sli_hbqbuf_find(phba, icmd->un.ulpWord[3]);
3551 if (sp)
3552 phba->hbq_buff_count--;
3553 mp = sp ? &sp->dbuf : NULL;
3554 } else { 3862 } else {
3555 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 3863 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
3556 icmd->un.cont64[0].addrLow); 3864 icmd->un.cont64[0].addrLow);
3557 mp = lpfc_sli_ringpostbuf_get(phba, pring, paddr); 3865 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
3866 paddr);
3558 } 3867 }
3559 3868
3560 lpfc_els_unsol_buffer(phba, pring, vport, mp, elsiocb); 3869 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
3561 3870 /*
3871 * The different unsolicited event handlers would tell us
3872 * if they are done with "mp" by setting context2 to NULL.
3873 */
3562 lpfc_nlp_put(elsiocb->context1); 3874 lpfc_nlp_put(elsiocb->context1);
3563 elsiocb->context1 = NULL; 3875 elsiocb->context1 = NULL;
3564 if (elsiocb->context2) { 3876 if (elsiocb->context2) {
3565 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 3877 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
3566 lpfc_sli_free_hbq(phba, sp); 3878 elsiocb->context2 = NULL;
3567 else {
3568 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3569 kfree(mp);
3570 }
3571 } 3879 }
3572 3880
3573 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */ 3881 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
3574 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) != 0 && 3882 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
3575 icmd->ulpBdeCount == 2) { 3883 icmd->ulpBdeCount == 2) {
3576 sp = lpfc_sli_hbqbuf_find(phba, icmd->un.ulpWord[15]); 3884 elsiocb->context2 = bdeBuf2;
3577 if (sp) 3885 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
3578 phba->hbq_buff_count--;
3579 mp = sp ? &sp->dbuf : NULL;
3580 lpfc_els_unsol_buffer(phba, pring, vport, mp, elsiocb);
3581 /* free mp if we are done with it */ 3886 /* free mp if we are done with it */
3582 if (elsiocb->context2) { 3887 if (elsiocb->context2) {
3583 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 3888 lpfc_in_buf_free(phba, elsiocb->context2);
3584 lpfc_sli_free_hbq(phba, sp); 3889 elsiocb->context2 = NULL;
3585 else { 3890 }
3586 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3891 }
3587 kfree(mp); 3892}
3893
3894void
3895lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
3896{
3897 struct lpfc_nodelist *ndlp, *ndlp_fdmi;
3898
3899 ndlp = lpfc_findnode_did(vport, NameServer_DID);
3900 if (!ndlp) {
3901 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3902 if (!ndlp) {
3903 if (phba->fc_topology == TOPOLOGY_LOOP) {
3904 lpfc_disc_start(vport);
3905 return;
3906 }
3907 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3908 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3909 "%d (%d):0251 NameServer login: no memory\n",
3910 phba->brd_no, vport->vpi);
3911 return;
3912 }
3913 lpfc_nlp_init(vport, ndlp, NameServer_DID);
3914 ndlp->nlp_type |= NLP_FABRIC;
3915 }
3916
3917 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
3918
3919 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
3920 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3921 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3922 "%d (%d):0252 Cannot issue NameServer login\n",
3923 phba->brd_no, vport->vpi);
3924 return;
3925 }
3926
3927 if (phba->cfg_fdmi_on) {
3928 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
3929 GFP_KERNEL);
3930 if (ndlp_fdmi) {
3931 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
3932 ndlp_fdmi->nlp_type |= NLP_FABRIC;
3933 ndlp_fdmi->nlp_state =
3934 NLP_STE_PLOGI_ISSUE;
3935 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
3936 0);
3937 }
3938 }
3939 return;
3940}
3941
3942static void
3943lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3944{
3945 struct lpfc_vport *vport = pmb->vport;
3946 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3947 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3948 MAILBOX_t *mb = &pmb->mb;
3949
3950 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
3951 lpfc_nlp_put(ndlp);
3952
3953 if (mb->mbxStatus) {
3954 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
3955 "%d (%d):0915 Register VPI failed: 0x%x\n",
3956 phba->brd_no, vport->vpi, mb->mbxStatus);
3957
3958 switch (mb->mbxStatus) {
3959 case 0x11: /* unsupported feature */
3960 case 0x9603: /* max_vpi exceeded */
3961 /* giving up on vport registration */
3962 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3963 spin_lock_irq(shost->host_lock);
3964 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3965 spin_unlock_irq(shost->host_lock);
3966 lpfc_can_disctmo(vport);
3967 break;
3968 default:
3969 /* Try to recover from this error */
3970 lpfc_mbx_unreg_vpi(vport);
3971 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3972 lpfc_initial_fdisc(vport);
3973 break;
3974 }
3975
3976 } else {
3977 if (vport == phba->pport)
3978 lpfc_issue_fabric_reglogin(vport);
3979 else
3980 lpfc_do_scr_ns_plogi(phba, vport);
3981 }
3982 mempool_free(pmb, phba->mbox_mem_pool);
3983 return;
3984}
3985
3986void
3987lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
3988 struct lpfc_nodelist *ndlp)
3989{
3990 LPFC_MBOXQ_t *mbox;
3991
3992 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3993 if (mbox) {
3994 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox);
3995 mbox->vport = vport;
3996 mbox->context2 = lpfc_nlp_get(ndlp);
3997 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
3998 if (lpfc_sli_issue_mbox(phba, mbox,
3999 MBX_NOWAIT | MBX_STOP_IOCB)
4000 == MBX_NOT_FINISHED) {
4001 mempool_free(mbox, phba->mbox_mem_pool);
4002 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4003
4004 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4005
4006 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
4007 "%d (%d):0253 Register VPI: Cannot send mbox\n",
4008 phba->brd_no, vport->vpi);
4009 }
4010 } else {
4011 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4012
4013 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
4014 "%d (%d):0254 Register VPI: no memory\n",
4015 phba->brd_no, vport->vpi);
4016
4017 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4018 lpfc_nlp_put(ndlp);
4019 }
4020}
4021
4022static void
4023lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4024 struct lpfc_iocbq *rspiocb)
4025{
4026 struct lpfc_vport *vport = cmdiocb->vport;
4027 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4028 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
4029 struct lpfc_nodelist *np;
4030 struct lpfc_nodelist *next_np;
4031 IOCB_t *irsp = &rspiocb->iocb;
4032 struct lpfc_iocbq *piocb;
4033
4034 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
4035 "%d (%d):0123 FDISC completes. x%x/x%x prevDID: x%x\n",
4036 phba->brd_no, vport->vpi,
4037 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
4038
4039 /* Since all FDISCs are being single threaded, we
4040 * must reset the discovery timer for ALL vports
4041 * waiting to send FDISC when one completes.
4042 */
4043 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
4044 lpfc_set_disctmo(piocb->vport);
4045 }
4046
4047 if (irsp->ulpStatus) {
4048 /* Check for retry */
4049 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
4050 goto out;
4051
4052 /* FDISC failed */
4053 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
4054 "%d (%d):0124 FDISC failed. (%d/%d)\n",
4055 phba->brd_no, vport->vpi,
4056 irsp->ulpStatus, irsp->un.ulpWord[4]);
4057 if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
4058 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4059
4060 lpfc_nlp_put(ndlp);
4061 /* giving up on FDISC. Cancel discovery timer */
4062 lpfc_can_disctmo(vport);
4063 } else {
4064 spin_lock_irq(shost->host_lock);
4065 vport->fc_flag |= FC_FABRIC;
4066 if (vport->phba->fc_topology == TOPOLOGY_LOOP)
4067 vport->fc_flag |= FC_PUBLIC_LOOP;
4068 spin_unlock_irq(shost->host_lock);
4069
4070 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
4071 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
4072 if ((vport->fc_prevDID != vport->fc_myDID) &&
4073 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
4074 /* If our NportID changed, we need to ensure all
4075 * remaining NPORTs get unreg_login'ed so we can
4076 * issue unreg_vpi.
4077 */
4078 list_for_each_entry_safe(np, next_np,
4079 &vport->fc_nodes, nlp_listp) {
4080 if (np->nlp_state != NLP_STE_NPR_NODE
4081 || !(np->nlp_flag & NLP_NPR_ADISC))
4082 continue;
4083 spin_lock_irq(shost->host_lock);
4084 np->nlp_flag &= ~NLP_NPR_ADISC;
4085 spin_unlock_irq(shost->host_lock);
4086 lpfc_unreg_rpi(vport, np);
4087 }
4088 lpfc_mbx_unreg_vpi(vport);
4089 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4090 }
4091
4092 if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
4093 lpfc_register_new_vport(phba, vport, ndlp);
4094 else
4095 lpfc_do_scr_ns_plogi(phba, vport);
4096
4097 lpfc_nlp_put(ndlp); /* Free Fabric ndlp for vports */
4098 }
4099
4100out:
4101 lpfc_els_free_iocb(phba, cmdiocb);
4102}
4103
4104int
4105lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4106 uint8_t retry)
4107{
4108 struct lpfc_hba *phba = vport->phba;
4109 IOCB_t *icmd;
4110 struct lpfc_iocbq *elsiocb;
4111 struct serv_parm *sp;
4112 uint8_t *pcmd;
4113 uint16_t cmdsize;
4114 int did = ndlp->nlp_DID;
4115 int rc;
4116 int new_ndlp = 0;
4117
4118 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
4119 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
4120 ELS_CMD_FDISC);
4121 if (!elsiocb) {
4122 if (new_ndlp)
4123 mempool_free(ndlp, phba->nlp_mem_pool);
4124 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4125
4126 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
4127 "%d (%d):0255 Issue FDISC: no IOCB\n",
4128 phba->brd_no, vport->vpi);
4129 return 1;
4130 }
4131
4132 icmd = &elsiocb->iocb;
4133 icmd->un.elsreq64.myID = 0;
4134 icmd->un.elsreq64.fl = 1;
4135
4136 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
4137 icmd->ulpCt_h = 1;
4138 icmd->ulpCt_l = 0;
4139
4140 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4141 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
4142 pcmd += sizeof(uint32_t); /* CSP Word 1 */
4143 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
4144 sp = (struct serv_parm *) pcmd;
4145 /* Setup CSPs accordingly for Fabric */
4146 sp->cmn.e_d_tov = 0;
4147 sp->cmn.w2.r_a_tov = 0;
4148 sp->cls1.classValid = 0;
4149 sp->cls2.seqDelivery = 1;
4150 sp->cls3.seqDelivery = 1;
4151
4152 pcmd += sizeof(uint32_t); /* CSP Word 2 */
4153 pcmd += sizeof(uint32_t); /* CSP Word 3 */
4154 pcmd += sizeof(uint32_t); /* CSP Word 4 */
4155 pcmd += sizeof(uint32_t); /* Port Name */
4156 memcpy(pcmd, &vport->fc_portname, 8);
4157 pcmd += sizeof(uint32_t); /* Node Name */
4158 pcmd += sizeof(uint32_t); /* Node Name */
4159 memcpy(pcmd, &vport->fc_nodename, 8);
4160
4161 lpfc_set_disctmo(vport);
4162
4163 phba->fc_stat.elsXmitFDISC++;
4164 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
4165
4166 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
4167 if (rc == IOCB_ERROR) {
4168 lpfc_els_free_iocb(phba, elsiocb);
4169 if (new_ndlp)
4170 mempool_free(ndlp, phba->nlp_mem_pool);
4171 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4172
4173 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
4174 "%d (%d):0256 Issue FDISC: Cannot send IOCB\n",
4175 phba->brd_no, vport->vpi);
4176
4177 return 1;
4178 }
4179 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
4180 vport->port_state = LPFC_FDISC;
4181 return 0;
4182}
4183
4184static void
4185lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4186 struct lpfc_iocbq *rspiocb)
4187{
4188 struct lpfc_vport *vport = cmdiocb->vport;
4189
4190 lpfc_els_free_iocb(phba, cmdiocb);
4191 vport->unreg_vpi_cmpl = VPORT_ERROR;
4192}
4193
4194int
4195lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4196{
4197 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4198 struct lpfc_hba *phba = vport->phba;
4199 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4200 IOCB_t *icmd;
4201 struct lpfc_iocbq *elsiocb;
4202 uint8_t *pcmd;
4203 uint16_t cmdsize;
4204
4205 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
4206 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
4207 ELS_CMD_LOGO);
4208 if (!elsiocb)
4209 return 1;
4210
4211 icmd = &elsiocb->iocb;
4212 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4213 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
4214 pcmd += sizeof(uint32_t);
4215
4216 /* Fill in LOGO payload */
4217 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
4218 pcmd += sizeof(uint32_t);
4219 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
4220
4221 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
4222 spin_lock_irq(shost->host_lock);
4223 ndlp->nlp_flag |= NLP_LOGO_SND;
4224 spin_unlock_irq(shost->host_lock);
4225 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
4226 spin_lock_irq(shost->host_lock);
4227 ndlp->nlp_flag &= ~NLP_LOGO_SND;
4228 spin_unlock_irq(shost->host_lock);
4229 lpfc_els_free_iocb(phba, elsiocb);
4230 return 1;
4231 }
4232 return 0;
4233}
4234
4235void
4236lpfc_fabric_block_timeout(unsigned long ptr)
4237{
4238 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4239 unsigned long iflags;
4240 uint32_t tmo_posted;
4241 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
4242 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
4243 if (!tmo_posted)
4244 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
4245 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
4246
4247 if (!tmo_posted) {
4248 spin_lock_irqsave(&phba->hbalock, iflags);
4249 if (phba->work_wait)
4250 lpfc_worker_wake_up(phba);
4251 spin_unlock_irqrestore(&phba->hbalock, iflags);
4252 }
4253}
4254
4255static void
4256lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
4257{
4258 struct lpfc_iocbq *iocb;
4259 unsigned long iflags;
4260 int ret;
4261 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4262 IOCB_t *cmd;
4263
4264repeat:
4265 iocb = NULL;
4266 spin_lock_irqsave(&phba->hbalock, iflags);
4267 /* Post any pending iocb to the SLI layer */
4268 if (atomic_read(&phba->fabric_iocb_count) == 0) {
4269 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
4270 list);
4271 if (iocb)
4272 atomic_inc(&phba->fabric_iocb_count);
4273 }
4274 spin_unlock_irqrestore(&phba->hbalock, iflags);
4275 if (iocb) {
4276 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
4277 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
4278 iocb->iocb_flag |= LPFC_IO_FABRIC;
4279
4280 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
4281
4282 if (ret == IOCB_ERROR) {
4283 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
4284 iocb->fabric_iocb_cmpl = NULL;
4285 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
4286 cmd = &iocb->iocb;
4287 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4288 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4289 iocb->iocb_cmpl(phba, iocb, iocb);
4290
4291 atomic_dec(&phba->fabric_iocb_count);
4292 goto repeat;
4293 }
4294 }
4295
4296 return;
4297}
4298
4299void
4300lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
4301{
4302 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4303
4304 lpfc_resume_fabric_iocbs(phba);
4305 return;
4306}
4307
4308static void
4309lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
4310{
4311 int blocked;
4312
4313 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4314 /* Start a timer to unblock fabric
4315 * iocbs after 100ms
4316 */
4317 if (!blocked)
4318 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
4319
4320 return;
4321}
4322
4323static void
4324lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4325 struct lpfc_iocbq *rspiocb)
4326{
4327 struct ls_rjt stat;
4328
4329 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
4330 BUG();
4331
4332 switch (rspiocb->iocb.ulpStatus) {
4333 case IOSTAT_NPORT_RJT:
4334 case IOSTAT_FABRIC_RJT:
4335 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
4336 lpfc_block_fabric_iocbs(phba);
3588 } 4337 }
4338 break;
4339
4340 case IOSTAT_NPORT_BSY:
4341 case IOSTAT_FABRIC_BSY:
4342 lpfc_block_fabric_iocbs(phba);
4343 break;
4344
4345 case IOSTAT_LS_RJT:
4346 stat.un.lsRjtError =
4347 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
4348 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
4349 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
4350 lpfc_block_fabric_iocbs(phba);
4351 break;
4352 }
4353
4354 if (atomic_read(&phba->fabric_iocb_count) == 0)
4355 BUG();
4356
4357 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
4358 cmdiocb->fabric_iocb_cmpl = NULL;
4359 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
4360 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
4361
4362 atomic_dec(&phba->fabric_iocb_count);
4363 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
4364 /* Post any pending iocbs to HBA */
4365 lpfc_resume_fabric_iocbs(phba);
4366 }
4367}
4368
4369int
4370lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
4371{
4372 unsigned long iflags;
4373 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4374 int ready;
4375 int ret;
4376
4377 if (atomic_read(&phba->fabric_iocb_count) > 1)
4378 BUG();
4379
4380 spin_lock_irqsave(&phba->hbalock, iflags);
4381 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
4382 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4383
4384 spin_unlock_irqrestore(&phba->hbalock, iflags);
4385 if (ready) {
4386 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
4387 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
4388 iocb->iocb_flag |= LPFC_IO_FABRIC;
4389
4390 atomic_inc(&phba->fabric_iocb_count);
4391 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
4392
4393 if (ret == IOCB_ERROR) {
4394 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
4395 iocb->fabric_iocb_cmpl = NULL;
4396 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
4397 atomic_dec(&phba->fabric_iocb_count);
4398 }
4399 } else {
4400 spin_lock_irqsave(&phba->hbalock, iflags);
4401 list_add_tail(&iocb->list, &phba->fabric_iocb_list);
4402 spin_unlock_irqrestore(&phba->hbalock, iflags);
4403 ret = IOCB_SUCCESS;
4404 }
4405 return ret;
4406}
4407
4408
4409void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
4410{
4411 LIST_HEAD(completions);
4412 struct lpfc_hba *phba = vport->phba;
4413 struct lpfc_iocbq *tmp_iocb, *piocb;
4414 IOCB_t *cmd;
4415
4416 spin_lock_irq(&phba->hbalock);
4417 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
4418 list) {
4419
4420 if (piocb->vport != vport)
4421 continue;
4422
4423 list_move_tail(&piocb->list, &completions);
4424 }
4425 spin_unlock_irq(&phba->hbalock);
4426
4427 while (!list_empty(&completions)) {
4428 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
4429 list_del_init(&piocb->list);
4430
4431 cmd = &piocb->iocb;
4432 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4433 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4434 (piocb->iocb_cmpl) (phba, piocb, piocb);
4435 }
4436}
4437
4438void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
4439{
4440 LIST_HEAD(completions);
4441 struct lpfc_hba *phba = ndlp->vport->phba;
4442 struct lpfc_iocbq *tmp_iocb, *piocb;
4443 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4444 IOCB_t *cmd;
4445
4446 spin_lock_irq(&phba->hbalock);
4447 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
4448 list) {
4449 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
4450
4451 list_move_tail(&piocb->list, &completions);
3589 } 4452 }
3590 } 4453 }
4454 spin_unlock_irq(&phba->hbalock);
4455
4456 while (!list_empty(&completions)) {
4457 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
4458 list_del_init(&piocb->list);
4459
4460 cmd = &piocb->iocb;
4461 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4462 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4463 (piocb->iocb_cmpl) (phba, piocb, piocb);
4464 }
3591} 4465}
4466
4467void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
4468{
4469 LIST_HEAD(completions);
4470 struct lpfc_iocbq *piocb;
4471 IOCB_t *cmd;
4472
4473 spin_lock_irq(&phba->hbalock);
4474 list_splice_init(&phba->fabric_iocb_list, &completions);
4475 spin_unlock_irq(&phba->hbalock);
4476
4477 while (!list_empty(&completions)) {
4478 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
4479 list_del_init(&piocb->list);
4480
4481 cmd = &piocb->iocb;
4482 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4483 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4484 (piocb->iocb_cmpl) (phba, piocb, piocb);
4485 }
4486}
4487
4488
4489void lpfc_fabric_abort_flogi(struct lpfc_hba *phba)
4490{
4491 LIST_HEAD(completions);
4492 struct lpfc_iocbq *tmp_iocb, *piocb;
4493 IOCB_t *cmd;
4494 struct lpfc_nodelist *ndlp;
4495
4496 spin_lock_irq(&phba->hbalock);
4497 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
4498 list) {
4499
4500 cmd = &piocb->iocb;
4501 ndlp = (struct lpfc_nodelist *) piocb->context1;
4502 if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
4503 ndlp != NULL &&
4504 ndlp->nlp_DID == Fabric_DID)
4505 list_move_tail(&piocb->list, &completions);
4506 }
4507 spin_unlock_irq(&phba->hbalock);
4508
4509 while (!list_empty(&completions)) {
4510 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
4511 list_del_init(&piocb->list);
4512
4513 cmd = &piocb->iocb;
4514 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4515 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4516 (piocb->iocb_cmpl) (phba, piocb, piocb);
4517 }
4518}
4519
4520
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 20b2a4905daa..94ee9675b5b0 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -36,6 +36,7 @@
36#include "lpfc.h" 36#include "lpfc.h"
37#include "lpfc_logmsg.h" 37#include "lpfc_logmsg.h"
38#include "lpfc_crtn.h" 38#include "lpfc_crtn.h"
39#include "lpfc_vport.h"
39 40
40/* AlpaArray for assignment of scsid for scan-down and bind_method */ 41/* AlpaArray for assignment of scsid for scan-down and bind_method */
41static uint8_t lpfcAlpaArray[] = { 42static uint8_t lpfcAlpaArray[] = {
@@ -96,50 +97,68 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
96 int warn_on = 0; 97 int warn_on = 0;
97 struct lpfc_hba *phba; 98 struct lpfc_hba *phba;
98 struct lpfc_vport *vport; 99 struct lpfc_vport *vport;
100 int put_node;
101 int put_rport;
99 102
100 rdata = rport->dd_data; 103 rdata = rport->dd_data;
101 ndlp = rdata->pnode; 104 ndlp = rdata->pnode;
102 105
103 if (!ndlp) { 106 if (!ndlp) {
104 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) 107 if (rport->scsi_target_id != -1) {
105 printk(KERN_ERR "Cannot find remote node" 108 printk(KERN_ERR "Cannot find remote node"
106 " for rport in dev_loss_tmo_callbk x%x\n", 109 " for rport in dev_loss_tmo_callbk x%x\n",
107 rport->port_id); 110 rport->port_id);
111 }
108 return; 112 return;
109 } 113 }
110 114
111 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) 115 if (ndlp->nlp_type & NLP_FABRIC) {
116 /* We will clean up these Nodes in linkup */
117 put_node = rdata->pnode != NULL;
118 put_rport = ndlp->rport != NULL;
119 rdata->pnode = NULL;
120 ndlp->rport = NULL;
121 if (put_node)
122 lpfc_nlp_put(ndlp);
123 if (put_rport)
124 put_device(&rport->dev);
112 return; 125 return;
126 }
113 127
114 name = (uint8_t *)&ndlp->nlp_portname; 128 name = (uint8_t *)&ndlp->nlp_portname;
115 vport = ndlp->vport; 129 vport = ndlp->vport;
116 phba = vport->phba; 130 phba = vport->phba;
117 131
132 if (!(vport->load_flag & FC_UNLOADING) &&
133 ndlp->nlp_state == NLP_STE_MAPPED_NODE)
134 return;
135
136
118 if (ndlp->nlp_sid != NLP_NO_SID) { 137 if (ndlp->nlp_sid != NLP_NO_SID) {
119 warn_on = 1; 138 warn_on = 1;
120 /* flush the target */ 139 /* flush the target */
121 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 140 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
122 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); 141 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
123 } 142 }
124 if (vport->load_flag & FC_UNLOADING) 143 if (vport->load_flag & FC_UNLOADING)
125 warn_on = 0; 144 warn_on = 0;
126 145
127 if (warn_on) { 146 if (warn_on) {
128 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 147 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
129 "%d:0203 Devloss timeout on " 148 "%d (%d):0203 Devloss timeout on "
130 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 149 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
131 "NPort x%x Data: x%x x%x x%x\n", 150 "NPort x%x Data: x%x x%x x%x\n",
132 phba->brd_no, 151 phba->brd_no, vport->vpi,
133 *name, *(name+1), *(name+2), *(name+3), 152 *name, *(name+1), *(name+2), *(name+3),
134 *(name+4), *(name+5), *(name+6), *(name+7), 153 *(name+4), *(name+5), *(name+6), *(name+7),
135 ndlp->nlp_DID, ndlp->nlp_flag, 154 ndlp->nlp_DID, ndlp->nlp_flag,
136 ndlp->nlp_state, ndlp->nlp_rpi); 155 ndlp->nlp_state, ndlp->nlp_rpi);
137 } else { 156 } else {
138 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 157 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
139 "%d:0204 Devloss timeout on " 158 "%d (%d):0204 Devloss timeout on "
140 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 159 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
141 "NPort x%x Data: x%x x%x x%x\n", 160 "NPort x%x Data: x%x x%x x%x\n",
142 phba->brd_no, 161 phba->brd_no, vport->vpi,
143 *name, *(name+1), *(name+2), *(name+3), 162 *name, *(name+1), *(name+2), *(name+3),
144 *(name+4), *(name+5), *(name+6), *(name+7), 163 *(name+4), *(name+5), *(name+6), *(name+7),
145 ndlp->nlp_DID, ndlp->nlp_flag, 164 ndlp->nlp_DID, ndlp->nlp_flag,
@@ -152,12 +171,23 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
152 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) 171 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
153 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 172 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
154 else { 173 else {
174 put_node = rdata->pnode != NULL;
175 put_rport = ndlp->rport != NULL;
155 rdata->pnode = NULL; 176 rdata->pnode = NULL;
156 ndlp->rport = NULL; 177 ndlp->rport = NULL;
157 lpfc_nlp_put(ndlp); 178 if (put_node)
158 put_device(&rport->dev); 179 lpfc_nlp_put(ndlp);
180 if (put_rport)
181 put_device(&rport->dev);
159 } 182 }
183 return;
184}
160 185
186
187void
188lpfc_worker_wake_up(struct lpfc_hba *phba)
189{
190 wake_up(phba->work_wait);
161 return; 191 return;
162} 192}
163 193
@@ -166,6 +196,7 @@ lpfc_work_list_done(struct lpfc_hba *phba)
166{ 196{
167 struct lpfc_work_evt *evtp = NULL; 197 struct lpfc_work_evt *evtp = NULL;
168 struct lpfc_nodelist *ndlp; 198 struct lpfc_nodelist *ndlp;
199 struct lpfc_vport *vport;
169 int free_evt; 200 int free_evt;
170 201
171 spin_lock_irq(&phba->hbalock); 202 spin_lock_irq(&phba->hbalock);
@@ -175,10 +206,23 @@ lpfc_work_list_done(struct lpfc_hba *phba)
175 spin_unlock_irq(&phba->hbalock); 206 spin_unlock_irq(&phba->hbalock);
176 free_evt = 1; 207 free_evt = 1;
177 switch (evtp->evt) { 208 switch (evtp->evt) {
209 case LPFC_EVT_DEV_LOSS:
210 free_evt = 0; /* evt is part of ndlp */
211 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
212 vport = ndlp->vport;
213 if (!vport)
214 break;
215 if (!(vport->load_flag & FC_UNLOADING) &&
216 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
217 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
218 lpfc_disc_state_machine(vport, ndlp, NULL,
219 NLP_EVT_DEVICE_RM);
220 }
221 break;
178 case LPFC_EVT_ELS_RETRY: 222 case LPFC_EVT_ELS_RETRY:
179 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); 223 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
180 lpfc_els_retry_delay_handler(ndlp); 224 lpfc_els_retry_delay_handler(ndlp);
181 free_evt = 0; 225 free_evt = 0; /* evt is part of ndlp */
182 break; 226 break;
183 case LPFC_EVT_ONLINE: 227 case LPFC_EVT_ONLINE:
184 if (phba->link_state < LPFC_LINK_DOWN) 228 if (phba->link_state < LPFC_LINK_DOWN)
@@ -250,24 +294,43 @@ lpfc_work_done(struct lpfc_hba *phba)
250 if (ha_copy & HA_LATT) 294 if (ha_copy & HA_LATT)
251 lpfc_handle_latt(phba); 295 lpfc_handle_latt(phba);
252 296
253 vport = phba->pport; 297 spin_lock_irq(&phba->hbalock);
298 list_for_each_entry(vport, &phba->port_list, listentry) {
299 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
254 300
255 work_port_events = vport->work_port_events; 301 if (!scsi_host_get(shost)) {
302 continue;
303 }
304 spin_unlock_irq(&phba->hbalock);
305 work_port_events = vport->work_port_events;
256 306
257 if (work_port_events & WORKER_DISC_TMO) 307 if (work_port_events & WORKER_DISC_TMO)
258 lpfc_disc_timeout_handler(vport); 308 lpfc_disc_timeout_handler(vport);
259 309
260 if (work_port_events & WORKER_ELS_TMO) 310 if (work_port_events & WORKER_ELS_TMO)
261 lpfc_els_timeout_handler(vport); 311 lpfc_els_timeout_handler(vport);
262 312
263 if (work_port_events & WORKER_MBOX_TMO) 313 if (work_port_events & WORKER_MBOX_TMO)
264 lpfc_mbox_timeout_handler(phba); 314 lpfc_mbox_timeout_handler(phba);
265 315
266 if (work_port_events & WORKER_FDMI_TMO) 316 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
267 lpfc_fdmi_timeout_handler(vport); 317 lpfc_unblock_fabric_iocbs(phba);
268 318
269 spin_lock_irq(&phba->hbalock); 319 if (work_port_events & WORKER_FDMI_TMO)
270 vport->work_port_events &= ~work_port_events; 320 lpfc_fdmi_timeout_handler(vport);
321
322 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
323 lpfc_ramp_down_queue_handler(phba);
324
325 if (work_port_events & WORKER_RAMP_UP_QUEUE)
326 lpfc_ramp_up_queue_handler(phba);
327
328 spin_lock_irq(&vport->work_port_lock);
329 vport->work_port_events &= ~work_port_events;
330 spin_unlock_irq(&vport->work_port_lock);
331 scsi_host_put(shost);
332 spin_lock_irq(&phba->hbalock);
333 }
271 spin_unlock_irq(&phba->hbalock); 334 spin_unlock_irq(&phba->hbalock);
272 335
273 for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) { 336 for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) {
@@ -300,24 +363,41 @@ lpfc_work_done(struct lpfc_hba *phba)
300static int 363static int
301check_work_wait_done(struct lpfc_hba *phba) 364check_work_wait_done(struct lpfc_hba *phba)
302{ 365{
303 struct lpfc_vport *vport = phba->pport; 366 struct lpfc_vport *vport;
304 int rc = 0; 367 struct lpfc_sli_ring *pring;
305 368 int i, rc = 0;
306 if (!vport)
307 return 0;
308 369
309 spin_lock_irq(&phba->hbalock); 370 spin_lock_irq(&phba->hbalock);
371 list_for_each_entry(vport, &phba->port_list, listentry) {
372 if (vport->work_port_events) {
373 rc = 1;
374 goto exit;
375 }
376 }
310 377
311 if (phba->work_ha || 378 if (phba->work_ha || (!list_empty(&phba->work_list)) ||
312 vport->work_port_events || 379 kthread_should_stop()) {
313 (!list_empty(&phba->work_list)) ||
314 kthread_should_stop())
315 rc = 1; 380 rc = 1;
381 goto exit;
382 }
383 for (i = 0; i < phba->sli.num_rings; i++) {
384 pring = &phba->sli.ring[i];
385 if (pring->flag & LPFC_DEFERRED_RING_EVENT) {
386 rc = 1;
387 goto exit;
388 }
389 }
390exit:
391 if (rc)
392 phba->work_found++;
393 else
394 phba->work_found = 0;
316 395
317 spin_unlock_irq(&phba->hbalock); 396 spin_unlock_irq(&phba->hbalock);
318 return rc; 397 return rc;
319} 398}
320 399
400
321int 401int
322lpfc_do_work(void *p) 402lpfc_do_work(void *p)
323{ 403{
@@ -327,11 +407,13 @@ lpfc_do_work(void *p)
327 407
328 set_user_nice(current, -20); 408 set_user_nice(current, -20);
329 phba->work_wait = &work_waitq; 409 phba->work_wait = &work_waitq;
410 phba->work_found = 0;
330 411
331 while (1) { 412 while (1) {
332 413
333 rc = wait_event_interruptible(work_waitq, 414 rc = wait_event_interruptible(work_waitq,
334 check_work_wait_done(phba)); 415 check_work_wait_done(phba));
416
335 BUG_ON(rc); 417 BUG_ON(rc);
336 418
337 if (kthread_should_stop()) 419 if (kthread_should_stop())
@@ -339,6 +421,17 @@ lpfc_do_work(void *p)
339 421
340 lpfc_work_done(phba); 422 lpfc_work_done(phba);
341 423
424 /* If there is alot of slow ring work, like during link up
425 * check_work_wait_done() may cause this thread to not give
426 * up the CPU for very long periods of time. This may cause
427 * soft lockups or other problems. To avoid these situations
428 * give up the CPU here after LPFC_MAX_WORKER_ITERATION
429 * consecutive iterations.
430 */
431 if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
432 phba->work_found = 0;
433 schedule();
434 }
342 } 435 }
343 phba->work_wait = NULL; 436 phba->work_wait = NULL;
344 return 0; 437 return 0;
@@ -360,7 +453,7 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
360 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will 453 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
361 * be queued to worker thread for processing 454 * be queued to worker thread for processing
362 */ 455 */
363 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL); 456 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
364 if (!evtp) 457 if (!evtp)
365 return 0; 458 return 0;
366 459
@@ -371,37 +464,94 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
371 spin_lock_irqsave(&phba->hbalock, flags); 464 spin_lock_irqsave(&phba->hbalock, flags);
372 list_add_tail(&evtp->evt_listp, &phba->work_list); 465 list_add_tail(&evtp->evt_listp, &phba->work_list);
373 if (phba->work_wait) 466 if (phba->work_wait)
374 wake_up(phba->work_wait); 467 lpfc_worker_wake_up(phba);
375 spin_unlock_irqrestore(&phba->hbalock, flags); 468 spin_unlock_irqrestore(&phba->hbalock, flags);
376 469
377 return 1; 470 return 1;
378} 471}
379 472
473void
474lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
475{
476 struct lpfc_hba *phba = vport->phba;
477 struct lpfc_nodelist *ndlp, *next_ndlp;
478 int rc;
479
480 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
481 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
482 continue;
483
484 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN)
485 lpfc_unreg_rpi(vport, ndlp);
486
487 /* Leave Fabric nodes alone on link down */
488 if (!remove && ndlp->nlp_type & NLP_FABRIC)
489 continue;
490 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
491 remove
492 ? NLP_EVT_DEVICE_RM
493 : NLP_EVT_DEVICE_RECOVERY);
494 }
495 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
496 lpfc_mbx_unreg_vpi(vport);
497 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
498 }
499}
500
501static void
502lpfc_linkdown_port(struct lpfc_vport *vport)
503{
504 struct lpfc_nodelist *ndlp, *next_ndlp;
505 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
506
507 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
508
509 /* Cleanup any outstanding RSCN activity */
510 lpfc_els_flush_rscn(vport);
511
512 /* Cleanup any outstanding ELS commands */
513 lpfc_els_flush_cmd(vport);
514
515 lpfc_cleanup_rpis(vport, 0);
516
517 /* free any ndlp's on unused list */
518 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
519 /* free any ndlp's in unused state */
520 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
521 lpfc_drop_node(vport, ndlp);
522
523 /* Turn off discovery timer if its running */
524 lpfc_can_disctmo(vport);
525}
526
380int 527int
381lpfc_linkdown(struct lpfc_hba *phba) 528lpfc_linkdown(struct lpfc_hba *phba)
382{ 529{
383 struct lpfc_vport *vport = phba->pport; 530 struct lpfc_vport *vport = phba->pport;
384 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 531 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
385 struct lpfc_sli *psli; 532 struct lpfc_vport *port_iterator;
386 struct lpfc_nodelist *ndlp, *next_ndlp;
387 LPFC_MBOXQ_t *mb; 533 LPFC_MBOXQ_t *mb;
388 int rc;
389 534
390 psli = &phba->sli;
391 if (phba->link_state == LPFC_LINK_DOWN) { 535 if (phba->link_state == LPFC_LINK_DOWN) {
392 return 0; 536 return 0;
393 } 537 }
394 spin_lock_irq(&phba->hbalock); 538 spin_lock_irq(&phba->hbalock);
395 if (phba->link_state > LPFC_LINK_DOWN) 539 if (phba->link_state > LPFC_LINK_DOWN) {
396 phba->link_state = LPFC_LINK_DOWN; 540 phba->link_state = LPFC_LINK_DOWN;
541 phba->pport->fc_flag &= ~FC_LBIT;
542 }
397 spin_unlock_irq(&phba->hbalock); 543 spin_unlock_irq(&phba->hbalock);
398 544
399 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); 545 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
546
547 /* Issue a LINK DOWN event to all nodes */
548 lpfc_linkdown_port(port_iterator);
549 }
400 550
401 /* Clean up any firmware default rpi's */ 551 /* Clean up any firmware default rpi's */
402 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 552 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
403 if (mb) { 553 if (mb) {
404 lpfc_unreg_did(phba, 0xffffffff, mb); 554 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
405 mb->vport = vport; 555 mb->vport = vport;
406 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 556 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
407 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB)) 557 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
@@ -410,31 +560,13 @@ lpfc_linkdown(struct lpfc_hba *phba)
410 } 560 }
411 } 561 }
412 562
413 /* Cleanup any outstanding RSCN activity */
414 lpfc_els_flush_rscn(vport);
415
416 /* Cleanup any outstanding ELS commands */
417 lpfc_els_flush_cmd(vport);
418
419 /*
420 * Issue a LINK DOWN event to all nodes.
421 */
422 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
423 /* free any ndlp's on unused state */
424 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
425 lpfc_drop_node(vport, ndlp);
426 else /* otherwise, force node recovery. */
427 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
428 NLP_EVT_DEVICE_RECOVERY);
429 }
430
431 /* Setup myDID for link up if we are in pt2pt mode */ 563 /* Setup myDID for link up if we are in pt2pt mode */
432 if (vport->fc_flag & FC_PT2PT) { 564 if (phba->pport->fc_flag & FC_PT2PT) {
433 vport->fc_myDID = 0; 565 phba->pport->fc_myDID = 0;
434 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 566 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
435 if (mb) { 567 if (mb) {
436 lpfc_config_link(phba, mb); 568 lpfc_config_link(phba, mb);
437 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl; 569 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
438 mb->vport = vport; 570 mb->vport = vport;
439 if (lpfc_sli_issue_mbox(phba, mb, 571 if (lpfc_sli_issue_mbox(phba, mb,
440 (MBX_NOWAIT | MBX_STOP_IOCB)) 572 (MBX_NOWAIT | MBX_STOP_IOCB))
@@ -443,66 +575,88 @@ lpfc_linkdown(struct lpfc_hba *phba)
443 } 575 }
444 } 576 }
445 spin_lock_irq(shost->host_lock); 577 spin_lock_irq(shost->host_lock);
446 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); 578 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
447 spin_unlock_irq(shost->host_lock); 579 spin_unlock_irq(shost->host_lock);
448 } 580 }
449 581
450 spin_lock_irq(shost->host_lock); 582 return 0;
451 vport->fc_flag &= ~FC_LBIT; 583}
452 spin_unlock_irq(shost->host_lock);
453 584
454 /* Turn off discovery timer if its running */ 585static void
455 lpfc_can_disctmo(vport); 586lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
587{
588 struct lpfc_nodelist *ndlp;
456 589
457 /* Must process IOCBs on all rings to handle ABORTed I/Os */ 590 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
458 return 0; 591 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
592 continue;
593
594 if (ndlp->nlp_type & NLP_FABRIC) {
595 /* On Linkup its safe to clean up the ndlp
596 * from Fabric connections.
597 */
598 if (ndlp->nlp_DID != Fabric_DID)
599 lpfc_unreg_rpi(vport, ndlp);
600 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
601 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
602 /* Fail outstanding IO now since device is
603 * marked for PLOGI.
604 */
605 lpfc_unreg_rpi(vport, ndlp);
606 }
607 }
459} 608}
460 609
461static int 610static void
462lpfc_linkup(struct lpfc_hba *phba) 611lpfc_linkup_port(struct lpfc_vport *vport)
463{ 612{
464 struct lpfc_vport *vport = phba->pport; 613 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
465 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
466 struct lpfc_nodelist *ndlp, *next_ndlp; 614 struct lpfc_nodelist *ndlp, *next_ndlp;
615 struct lpfc_hba *phba = vport->phba;
616
617 if ((vport->load_flag & FC_UNLOADING) != 0)
618 return;
619
620 /* If NPIV is not enabled, only bring the physical port up */
621 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
622 (vport != phba->pport))
623 return;
467 624
468 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0); 625 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
469 626
470 spin_lock_irq(shost->host_lock); 627 spin_lock_irq(shost->host_lock);
471 phba->link_state = LPFC_LINK_UP;
472 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | 628 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
473 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY); 629 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
474 vport->fc_flag |= FC_NDISC_ACTIVE; 630 vport->fc_flag |= FC_NDISC_ACTIVE;
475 vport->fc_ns_retry = 0; 631 vport->fc_ns_retry = 0;
476 spin_unlock_irq(shost->host_lock); 632 spin_unlock_irq(shost->host_lock);
477 633
634 if (vport->fc_flag & FC_LBIT)
635 lpfc_linkup_cleanup_nodes(vport);
478 636
479 if (vport->fc_flag & FC_LBIT) { 637 /* free any ndlp's in unused state */
480 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
481 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) {
482 if (ndlp->nlp_type & NLP_FABRIC) {
483 /*
484 * On Linkup its safe to clean up the
485 * ndlp from Fabric connections.
486 */
487 lpfc_nlp_set_state(vport, ndlp,
488 NLP_STE_UNUSED_NODE);
489 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
490 /*
491 * Fail outstanding IO now since
492 * device is marked for PLOGI.
493 */
494 lpfc_unreg_rpi(vport, ndlp);
495 }
496 }
497 }
498 }
499
500 /* free any ndlp's in unused state */
501 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 638 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
502 nlp_listp) { 639 nlp_listp)
503 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 640 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
504 lpfc_drop_node(vport, ndlp); 641 lpfc_drop_node(vport, ndlp);
642}
643
644static int
645lpfc_linkup(struct lpfc_hba *phba)
646{
647 struct lpfc_vport *vport;
648
649 phba->link_state = LPFC_LINK_UP;
650
651 /* Unblock fabric iocbs if they are blocked */
652 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
653 del_timer_sync(&phba->fabric_block_timer);
654
655 list_for_each_entry(vport, &phba->port_list, listentry) {
656 lpfc_linkup_port(vport);
505 } 657 }
658 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
659 lpfc_issue_clear_la(phba, phba->pport);
506 660
507 return 0; 661 return 0;
508} 662}
@@ -529,18 +683,28 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
529 683
530 /* Check for error */ 684 /* Check for error */
531 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { 685 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
532 /* CLEAR_LA mbox error <mbxStatus> state <port_state> */ 686 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
533 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 687 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
534 "%d:0320 CLEAR_LA mbxStatus error x%x hba " 688 "%d (%d):0320 CLEAR_LA mbxStatus error x%x hba "
535 "state x%x\n", 689 "state x%x\n",
536 phba->brd_no, mb->mbxStatus, vport->port_state); 690 phba->brd_no, vport->vpi, mb->mbxStatus,
691 vport->port_state);
537 692
538 phba->link_state = LPFC_HBA_ERROR; 693 phba->link_state = LPFC_HBA_ERROR;
539 goto out; 694 goto out;
540 } 695 }
541 696
542 if (vport->fc_flag & FC_ABORT_DISCOVERY) 697 if (vport->port_type == LPFC_PHYSICAL_PORT)
543 goto out; 698 phba->link_state = LPFC_HBA_READY;
699
700 spin_lock_irq(&phba->hbalock);
701 psli->sli_flag |= LPFC_PROCESS_LA;
702 control = readl(phba->HCregaddr);
703 control |= HC_LAINT_ENA;
704 writel(control, phba->HCregaddr);
705 readl(phba->HCregaddr); /* flush */
706 spin_unlock_irq(&phba->hbalock);
707 return;
544 708
545 vport->num_disc_nodes = 0; 709 vport->num_disc_nodes = 0;
546 /* go thru NPR nodes and issue ELS PLOGIs */ 710 /* go thru NPR nodes and issue ELS PLOGIs */
@@ -558,8 +722,8 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
558out: 722out:
559 /* Device Discovery completes */ 723 /* Device Discovery completes */
560 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 724 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
561 "%d:0225 Device Discovery completes\n", 725 "%d (%d):0225 Device Discovery completes\n",
562 phba->brd_no); 726 phba->brd_no, vport->vpi);
563 727
564 mempool_free(pmb, phba->mbox_mem_pool); 728 mempool_free(pmb, phba->mbox_mem_pool);
565 729
@@ -589,8 +753,6 @@ static void
589lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 753lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
590{ 754{
591 struct lpfc_vport *vport = pmb->vport; 755 struct lpfc_vport *vport = pmb->vport;
592 struct lpfc_sli *psli = &phba->sli;
593 int rc;
594 756
595 if (pmb->mb.mbxStatus) 757 if (pmb->mb.mbxStatus)
596 goto out; 758 goto out;
@@ -606,49 +768,40 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
606 */ 768 */
607 lpfc_set_disctmo(vport); 769 lpfc_set_disctmo(vport);
608 return; 770 return;
609 } 771 }
610 772
611 /* Start discovery by sending a FLOGI. port_state is identically 773 /* Start discovery by sending a FLOGI. port_state is identically
612 * LPFC_FLOGI while waiting for FLOGI cmpl 774 * LPFC_FLOGI while waiting for FLOGI cmpl
613 */ 775 */
614 vport->port_state = LPFC_FLOGI; 776 if (vport->port_state != LPFC_FLOGI) {
615 lpfc_set_disctmo(vport); 777 vport->port_state = LPFC_FLOGI;
616 lpfc_initial_flogi(vport); 778 lpfc_set_disctmo(vport);
779 lpfc_initial_flogi(vport);
780 }
617 return; 781 return;
618 782
619out: 783out:
620 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 784 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
621 "%d:0306 CONFIG_LINK mbxStatus error x%x " 785 "%d (%d):0306 CONFIG_LINK mbxStatus error x%x "
622 "HBA state x%x\n", 786 "HBA state x%x\n",
623 phba->brd_no, pmb->mb.mbxStatus, vport->port_state); 787 phba->brd_no, vport->vpi, pmb->mb.mbxStatus,
788 vport->port_state);
624 789
625 lpfc_linkdown(phba); 790 mempool_free(pmb, phba->mbox_mem_pool);
626 791
627 phba->link_state = LPFC_HBA_ERROR; 792 lpfc_linkdown(phba);
628 793
629 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 794 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
630 "%d:0200 CONFIG_LINK bad hba state x%x\n", 795 "%d (%d):0200 CONFIG_LINK bad hba state x%x\n",
631 phba->brd_no, vport->port_state); 796 phba->brd_no, vport->vpi, vport->port_state);
632 797
633 lpfc_clear_la(phba, pmb); 798 lpfc_issue_clear_la(phba, vport);
634 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
635 pmb->vport = vport;
636 rc = lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
637 if (rc == MBX_NOT_FINISHED) {
638 mempool_free(pmb, phba->mbox_mem_pool);
639 lpfc_disc_flush_list(vport);
640 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
641 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
642 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
643 vport->port_state = LPFC_VPORT_READY;
644 }
645 return; 799 return;
646} 800}
647 801
648static void 802static void
649lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 803lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
650{ 804{
651 struct lpfc_sli *psli = &phba->sli;
652 MAILBOX_t *mb = &pmb->mb; 805 MAILBOX_t *mb = &pmb->mb;
653 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; 806 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
654 struct lpfc_vport *vport = pmb->vport; 807 struct lpfc_vport *vport = pmb->vport;
@@ -658,12 +811,12 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
658 if (mb->mbxStatus) { 811 if (mb->mbxStatus) {
659 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ 812 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
660 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 813 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
661 "%d:0319 READ_SPARAM mbxStatus error x%x " 814 "%d (%d):0319 READ_SPARAM mbxStatus error x%x "
662 "hba state x%x>\n", 815 "hba state x%x>\n",
663 phba->brd_no, mb->mbxStatus, vport->port_state); 816 phba->brd_no, vport->vpi, mb->mbxStatus,
817 vport->port_state);
664 818
665 lpfc_linkdown(phba); 819 lpfc_linkdown(phba);
666 phba->link_state = LPFC_HBA_ERROR;
667 goto out; 820 goto out;
668 } 821 }
669 822
@@ -675,12 +828,15 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
675 if (phba->cfg_soft_wwpn) 828 if (phba->cfg_soft_wwpn)
676 u64_to_wwn(phba->cfg_soft_wwpn, 829 u64_to_wwn(phba->cfg_soft_wwpn,
677 vport->fc_sparam.portName.u.wwn); 830 vport->fc_sparam.portName.u.wwn);
678 memcpy((uint8_t *) &vport->fc_nodename, 831 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
679 (uint8_t *) &vport->fc_sparam.nodeName, 832 sizeof(vport->fc_nodename));
680 sizeof (struct lpfc_name)); 833 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
681 memcpy((uint8_t *) &vport->fc_portname, 834 sizeof(vport->fc_portname));
682 (uint8_t *) &vport->fc_sparam.portName, 835 if (vport->port_type == LPFC_PHYSICAL_PORT) {
683 sizeof (struct lpfc_name)); 836 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
837 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
838 }
839
684 lpfc_mbuf_free(phba, mp->virt, mp->phys); 840 lpfc_mbuf_free(phba, mp->virt, mp->phys);
685 kfree(mp); 841 kfree(mp);
686 mempool_free(pmb, phba->mbox_mem_pool); 842 mempool_free(pmb, phba->mbox_mem_pool);
@@ -690,35 +846,15 @@ out:
690 pmb->context1 = NULL; 846 pmb->context1 = NULL;
691 lpfc_mbuf_free(phba, mp->virt, mp->phys); 847 lpfc_mbuf_free(phba, mp->virt, mp->phys);
692 kfree(mp); 848 kfree(mp);
693 if (phba->link_state != LPFC_CLEAR_LA) { 849 lpfc_issue_clear_la(phba, vport);
694 struct lpfc_sli_ring *extra_ring = 850 mempool_free(pmb, phba->mbox_mem_pool);
695 &psli->ring[psli->extra_ring];
696 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
697 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
698
699 lpfc_clear_la(phba, pmb);
700 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
701 pmb->vport = vport;
702 if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
703 == MBX_NOT_FINISHED) {
704 mempool_free(pmb, phba->mbox_mem_pool);
705 lpfc_disc_flush_list(vport);
706 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
707 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
708 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
709 vport->port_state = LPFC_VPORT_READY;
710 }
711 } else {
712 mempool_free(pmb, phba->mbox_mem_pool);
713 }
714 return; 851 return;
715} 852}
716 853
717static void 854static void
718lpfc_mbx_process_link_up(struct lpfc_vport *vport, READ_LA_VAR *la) 855lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
719{ 856{
720 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 857 struct lpfc_vport *vport = phba->pport;
721 struct lpfc_hba *phba = vport->phba;
722 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; 858 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
723 int i; 859 int i;
724 struct lpfc_dmabuf *mp; 860 struct lpfc_dmabuf *mp;
@@ -727,30 +863,32 @@ lpfc_mbx_process_link_up(struct lpfc_vport *vport, READ_LA_VAR *la)
727 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 863 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
728 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 864 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
729 865
730 spin_lock_irq(shost->host_lock); 866 spin_lock_irq(&phba->hbalock);
731 switch (la->UlnkSpeed) { 867 switch (la->UlnkSpeed) {
732 case LA_1GHZ_LINK: 868 case LA_1GHZ_LINK:
733 phba->fc_linkspeed = LA_1GHZ_LINK; 869 phba->fc_linkspeed = LA_1GHZ_LINK;
734 break; 870 break;
735 case LA_2GHZ_LINK: 871 case LA_2GHZ_LINK:
736 phba->fc_linkspeed = LA_2GHZ_LINK; 872 phba->fc_linkspeed = LA_2GHZ_LINK;
737 break; 873 break;
738 case LA_4GHZ_LINK: 874 case LA_4GHZ_LINK:
739 phba->fc_linkspeed = LA_4GHZ_LINK; 875 phba->fc_linkspeed = LA_4GHZ_LINK;
740 break; 876 break;
741 case LA_8GHZ_LINK: 877 case LA_8GHZ_LINK:
742 phba->fc_linkspeed = LA_8GHZ_LINK; 878 phba->fc_linkspeed = LA_8GHZ_LINK;
743 break; 879 break;
744 default: 880 default:
745 phba->fc_linkspeed = LA_UNKNW_LINK; 881 phba->fc_linkspeed = LA_UNKNW_LINK;
746 break; 882 break;
747 } 883 }
748 884
749 phba->fc_topology = la->topology; 885 phba->fc_topology = la->topology;
886 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
750 887
751 if (phba->fc_topology == TOPOLOGY_LOOP) { 888 if (phba->fc_topology == TOPOLOGY_LOOP) {
752 /* Get Loop Map information */ 889 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
753 890
891 /* Get Loop Map information */
754 if (la->il) 892 if (la->il)
755 vport->fc_flag |= FC_LBIT; 893 vport->fc_flag |= FC_LBIT;
756 894
@@ -784,30 +922,35 @@ lpfc_mbx_process_link_up(struct lpfc_vport *vport, READ_LA_VAR *la)
784 } 922 }
785 /* Link Up Event ALPA map */ 923 /* Link Up Event ALPA map */
786 lpfc_printf_log(phba, 924 lpfc_printf_log(phba,
787 KERN_WARNING, 925 KERN_WARNING,
788 LOG_LINK_EVENT, 926 LOG_LINK_EVENT,
789 "%d:1304 Link Up Event " 927 "%d:1304 Link Up Event "
790 "ALPA map Data: x%x " 928 "ALPA map Data: x%x "
791 "x%x x%x x%x\n", 929 "x%x x%x x%x\n",
792 phba->brd_no, 930 phba->brd_no,
793 un.pa.wd1, un.pa.wd2, 931 un.pa.wd1, un.pa.wd2,
794 un.pa.wd3, un.pa.wd4); 932 un.pa.wd3, un.pa.wd4);
795 } 933 }
796 } 934 }
797 } 935 }
798 } else { 936 } else {
937 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
938 if (phba->max_vpi && lpfc_npiv_enable &&
939 (phba->sli_rev == 3))
940 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
941 }
799 vport->fc_myDID = phba->fc_pref_DID; 942 vport->fc_myDID = phba->fc_pref_DID;
800 vport->fc_flag |= FC_LBIT; 943 vport->fc_flag |= FC_LBIT;
801 } 944 }
802 spin_unlock_irq(shost->host_lock); 945 spin_unlock_irq(&phba->hbalock);
803 946
804 lpfc_linkup(phba); 947 lpfc_linkup(phba);
805 if (sparam_mbox) { 948 if (sparam_mbox) {
806 lpfc_read_sparam(phba, sparam_mbox); 949 lpfc_read_sparam(phba, sparam_mbox, 0);
807 sparam_mbox->vport = vport; 950 sparam_mbox->vport = vport;
808 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 951 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
809 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, 952 rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
810 (MBX_NOWAIT | MBX_STOP_IOCB)); 953 (MBX_NOWAIT | MBX_STOP_IOCB));
811 if (rc == MBX_NOT_FINISHED) { 954 if (rc == MBX_NOT_FINISHED) {
812 mp = (struct lpfc_dmabuf *) sparam_mbox->context1; 955 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
813 lpfc_mbuf_free(phba, mp->virt, mp->phys); 956 lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -815,7 +958,7 @@ lpfc_mbx_process_link_up(struct lpfc_vport *vport, READ_LA_VAR *la)
815 mempool_free(sparam_mbox, phba->mbox_mem_pool); 958 mempool_free(sparam_mbox, phba->mbox_mem_pool);
816 if (cfglink_mbox) 959 if (cfglink_mbox)
817 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 960 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
818 return; 961 goto out;
819 } 962 }
820 } 963 }
821 964
@@ -825,10 +968,20 @@ lpfc_mbx_process_link_up(struct lpfc_vport *vport, READ_LA_VAR *la)
825 cfglink_mbox->vport = vport; 968 cfglink_mbox->vport = vport;
826 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 969 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
827 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, 970 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
828 (MBX_NOWAIT | MBX_STOP_IOCB)); 971 (MBX_NOWAIT | MBX_STOP_IOCB));
829 if (rc == MBX_NOT_FINISHED) 972 if (rc != MBX_NOT_FINISHED)
830 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 973 return;
974 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
831 } 975 }
976out:
977 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
978 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
979 "%d (%d):0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
980 phba->brd_no, vport->vpi,
981 vport->port_state, sparam_mbox, cfglink_mbox);
982
983 lpfc_issue_clear_la(phba, vport);
984 return;
832} 985}
833 986
834static void 987static void
@@ -886,12 +1039,12 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
886 spin_unlock_irq(shost->host_lock); 1039 spin_unlock_irq(shost->host_lock);
887 1040
888 if (((phba->fc_eventTag + 1) < la->eventTag) || 1041 if (((phba->fc_eventTag + 1) < la->eventTag) ||
889 (phba->fc_eventTag == la->eventTag)) { 1042 (phba->fc_eventTag == la->eventTag)) {
890 phba->fc_stat.LinkMultiEvent++; 1043 phba->fc_stat.LinkMultiEvent++;
891 if (la->attType == AT_LINK_UP) 1044 if (la->attType == AT_LINK_UP)
892 if (phba->fc_eventTag != 0) 1045 if (phba->fc_eventTag != 0)
893 lpfc_linkdown(phba); 1046 lpfc_linkdown(phba);
894 } 1047 }
895 1048
896 phba->fc_eventTag = la->eventTag; 1049 phba->fc_eventTag = la->eventTag;
897 1050
@@ -912,7 +1065,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
912 la->granted_AL_PA, la->UlnkSpeed, 1065 la->granted_AL_PA, la->UlnkSpeed,
913 phba->alpa_map[0]); 1066 phba->alpa_map[0]);
914 } 1067 }
915 lpfc_mbx_process_link_up(vport, la); 1068 lpfc_mbx_process_link_up(phba, la);
916 } else { 1069 } else {
917 phba->fc_stat.LinkDown++; 1070 phba->fc_stat.LinkDown++;
918 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1071 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -940,7 +1093,7 @@ void
940lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1093lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
941{ 1094{
942 struct lpfc_vport *vport = pmb->vport; 1095 struct lpfc_vport *vport = pmb->vport;
943 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; 1096 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
944 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 1097 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
945 1098
946 pmb->context1 = NULL; 1099 pmb->context1 = NULL;
@@ -955,6 +1108,100 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
955 return; 1108 return;
956} 1109}
957 1110
1111static void
1112lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1113{
1114 MAILBOX_t *mb = &pmb->mb;
1115 struct lpfc_vport *vport = pmb->vport;
1116 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1117
1118 switch (mb->mbxStatus) {
1119 case 0x0011:
1120 case 0x0020:
1121 case 0x9700:
1122 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1123 "%d (%d):0911 cmpl_unreg_vpi, "
1124 "mb status = 0x%x\n",
1125 phba->brd_no, vport->vpi, mb->mbxStatus);
1126 break;
1127 default:
1128 phba->vpi_cnt--;
1129 }
1130 vport->unreg_vpi_cmpl = VPORT_OK;
1131 mempool_free(pmb, phba->mbox_mem_pool);
1132 /*
1133 * This shost reference might have been taken at the beginning of
1134 * lpfc_vport_delete()
1135 */
1136 if (vport->load_flag & FC_UNLOADING)
1137 scsi_host_put(shost);
1138}
1139
1140void
1141lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1142{
1143 struct lpfc_hba *phba = vport->phba;
1144 LPFC_MBOXQ_t *mbox;
1145 int rc;
1146
1147 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1148 if (!mbox)
1149 return;
1150
1151 lpfc_unreg_vpi(phba, vport->vpi, mbox);
1152 mbox->vport = vport;
1153 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
1154 rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
1155 if (rc == MBX_NOT_FINISHED) {
1156 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1157 "%d (%d):1800 Could not issue unreg_vpi\n",
1158 phba->brd_no, vport->vpi);
1159 mempool_free(mbox, phba->mbox_mem_pool);
1160 vport->unreg_vpi_cmpl = VPORT_ERROR;
1161 }
1162}
1163
1164static void
1165lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1166{
1167 struct lpfc_vport *vport = pmb->vport;
1168 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1169 MAILBOX_t *mb = &pmb->mb;
1170
1171 switch (mb->mbxStatus) {
1172 case 0x0011:
1173 case 0x9601:
1174 case 0x9602:
1175 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1176 "%d (%d):0912 cmpl_reg_vpi, mb status = 0x%x\n",
1177 phba->brd_no, vport->vpi, mb->mbxStatus);
1178 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1179 spin_lock_irq(shost->host_lock);
1180 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1181 spin_unlock_irq(shost->host_lock);
1182 vport->fc_myDID = 0;
1183 goto out;
1184 }
1185 phba->vpi_cnt++;
1186
1187 vport->num_disc_nodes = 0;
1188 /* go thru NPR list and issue ELS PLOGIs */
1189 if (vport->fc_npr_cnt)
1190 lpfc_els_disc_plogi(vport);
1191
1192 if (!vport->num_disc_nodes) {
1193 spin_lock_irq(shost->host_lock);
1194 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1195 spin_unlock_irq(shost->host_lock);
1196 lpfc_can_disctmo(vport);
1197 }
1198 vport->port_state = LPFC_VPORT_READY;
1199
1200out:
1201 mempool_free(pmb, phba->mbox_mem_pool);
1202 return;
1203}
1204
958/* 1205/*
959 * This routine handles processing a Fabric REG_LOGIN mailbox 1206 * This routine handles processing a Fabric REG_LOGIN mailbox
960 * command upon completion. It is setup in the LPFC_MBOXQ 1207 * command upon completion. It is setup in the LPFC_MBOXQ
@@ -964,10 +1211,11 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
964void 1211void
965lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1212lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
966{ 1213{
967 struct lpfc_vport *vport = pmb->vport; 1214 struct lpfc_vport *vport = pmb->vport;
1215 struct lpfc_vport *next_vport;
968 MAILBOX_t *mb = &pmb->mb; 1216 MAILBOX_t *mb = &pmb->mb;
969 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 1217 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
970 struct lpfc_nodelist *ndlp, *ndlp_fdmi; 1218 struct lpfc_nodelist *ndlp;
971 ndlp = (struct lpfc_nodelist *) pmb->context2; 1219 ndlp = (struct lpfc_nodelist *) pmb->context2;
972 1220
973 pmb->context1 = NULL; 1221 pmb->context1 = NULL;
@@ -979,11 +1227,20 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
979 mempool_free(pmb, phba->mbox_mem_pool); 1227 mempool_free(pmb, phba->mbox_mem_pool);
980 lpfc_nlp_put(ndlp); 1228 lpfc_nlp_put(ndlp);
981 1229
982 /* FLOGI failed, so just use loop map to make discovery list */ 1230 if (phba->fc_topology == TOPOLOGY_LOOP) {
983 lpfc_disc_list_loopmap(vport); 1231 /* FLOGI failed, use loop map to make discovery list */
1232 lpfc_disc_list_loopmap(vport);
1233
1234 /* Start discovery */
1235 lpfc_disc_start(vport);
1236 return;
1237 }
1238
1239 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1240 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1241 "%d (%d):0258 Register Fabric login error: 0x%x\n",
1242 phba->brd_no, vport->vpi, mb->mbxStatus);
984 1243
985 /* Start discovery */
986 lpfc_disc_start(vport);
987 return; 1244 return;
988 } 1245 }
989 1246
@@ -994,47 +1251,25 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
994 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */ 1251 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
995 1252
996 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 1253 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
997 /* This NPort has been assigned an NPort_ID by the fabric as a 1254 list_for_each_entry(next_vport, &phba->port_list, listentry) {
998 * result of the completed fabric login. Issue a State Change 1255 if (next_vport->port_type == LPFC_PHYSICAL_PORT)
999 * Registration (SCR) ELS request to the fabric controller 1256 continue;
1000 * (SCR_DID) so that this NPort gets RSCN events from the
1001 * fabric.
1002 */
1003 lpfc_issue_els_scr(vport, SCR_DID, 0);
1004
1005 ndlp = lpfc_findnode_did(vport, NameServer_DID);
1006 if (!ndlp) {
1007 /* Allocate a new node instance. If the pool is empty,
1008 * start the discovery process and skip the Nameserver
1009 * login process. This is attempted again later on.
1010 * Otherwise, issue a Port Login (PLOGI) to
1011 * the NameServer
1012 */
1013 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1014 if (!ndlp) {
1015 lpfc_disc_start(vport);
1016 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1017 kfree(mp);
1018 mempool_free(pmb, phba->mbox_mem_pool);
1019 return;
1020 } else {
1021 lpfc_nlp_init(vport, ndlp, NameServer_DID);
1022 ndlp->nlp_type |= NLP_FABRIC;
1023 }
1024 }
1025 1257
1026 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 1258 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1027 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 1259 lpfc_initial_fdisc(next_vport);
1028 if (phba->cfg_fdmi_on) { 1260 else {
1029 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool, 1261 if (phba->sli3_options &
1030 GFP_KERNEL); 1262 LPFC_SLI3_NPIV_ENABLED) {
1031 if (ndlp_fdmi) { 1263 lpfc_vport_set_state(vport,
1032 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID); 1264 FC_VPORT_NO_FABRIC_SUPP);
1033 ndlp_fdmi->nlp_type |= NLP_FABRIC; 1265 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1034 ndlp_fdmi->nlp_state = NLP_STE_PLOGI_ISSUE; 1266 "%d (%d):0259 No NPIV Fabric "
1035 lpfc_issue_els_plogi(vport, FDMI_DID, 0); 1267 "support\n",
1268 phba->brd_no, vport->vpi);
1269 }
1036 } 1270 }
1037 } 1271 }
1272 lpfc_do_scr_ns_plogi(phba, vport);
1038 } 1273 }
1039 1274
1040 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1275 lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1058,20 +1293,28 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1058 struct lpfc_vport *vport = pmb->vport; 1293 struct lpfc_vport *vport = pmb->vport;
1059 1294
1060 if (mb->mbxStatus) { 1295 if (mb->mbxStatus) {
1296out:
1061 lpfc_nlp_put(ndlp); 1297 lpfc_nlp_put(ndlp);
1062 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1298 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1063 kfree(mp); 1299 kfree(mp);
1064 mempool_free(pmb, phba->mbox_mem_pool); 1300 mempool_free(pmb, phba->mbox_mem_pool);
1065 lpfc_drop_node(vport, ndlp); 1301 lpfc_drop_node(vport, ndlp);
1066 1302
1067 /* 1303 if (phba->fc_topology == TOPOLOGY_LOOP) {
1068 * RegLogin failed, so just use loop map to make discovery 1304 /*
1069 * list 1305 * RegLogin failed, use loop map to make discovery
1070 */ 1306 * list
1071 lpfc_disc_list_loopmap(vport); 1307 */
1308 lpfc_disc_list_loopmap(vport);
1072 1309
1073 /* Start discovery */ 1310 /* Start discovery */
1074 lpfc_disc_start(vport); 1311 lpfc_disc_start(vport);
1312 return;
1313 }
1314 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1315 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1316 "%d (%d):0260 Register NameServer error: 0x%x\n",
1317 phba->brd_no, vport->vpi, mb->mbxStatus);
1075 return; 1318 return;
1076 } 1319 }
1077 1320
@@ -1083,17 +1326,21 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1083 1326
1084 if (vport->port_state < LPFC_VPORT_READY) { 1327 if (vport->port_state < LPFC_VPORT_READY) {
1085 /* Link up discovery requires Fabric registration. */ 1328 /* Link up discovery requires Fabric registration. */
1086 lpfc_ns_cmd(vport, ndlp, SLI_CTNS_RNN_ID); 1329 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
1087 lpfc_ns_cmd(vport, ndlp, SLI_CTNS_RSNN_NN); 1330 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
1088 lpfc_ns_cmd(vport, ndlp, SLI_CTNS_RFT_ID); 1331 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
1089 lpfc_ns_cmd(vport, ndlp, SLI_CTNS_RFF_ID); 1332 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
1333 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
1334
1335 /* Issue SCR just before NameServer GID_FT Query */
1336 lpfc_issue_els_scr(vport, SCR_DID, 0);
1090 } 1337 }
1091 1338
1092 vport->fc_ns_retry = 0; 1339 vport->fc_ns_retry = 0;
1093 /* Good status, issue CT Request to NameServer */ 1340 /* Good status, issue CT Request to NameServer */
1094 if (lpfc_ns_cmd(vport, ndlp, SLI_CTNS_GID_FT)) { 1341 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
1095 /* Cannot issue NameServer Query, so finish up discovery */ 1342 /* Cannot issue NameServer Query, so finish up discovery */
1096 lpfc_disc_start(vport); 1343 goto out;
1097 } 1344 }
1098 1345
1099 lpfc_nlp_put(ndlp); 1346 lpfc_nlp_put(ndlp);
@@ -1127,7 +1374,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1127 * registered the port. 1374 * registered the port.
1128 */ 1375 */
1129 if (ndlp->rport && ndlp->rport->dd_data && 1376 if (ndlp->rport && ndlp->rport->dd_data &&
1130 *(struct lpfc_rport_data **) ndlp->rport->dd_data) { 1377 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
1131 lpfc_nlp_put(ndlp); 1378 lpfc_nlp_put(ndlp);
1132 } 1379 }
1133 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); 1380 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
@@ -1147,16 +1394,16 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1147 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; 1394 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1148 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 1395 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1149 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; 1396 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1397 del_timer_sync(&ndlp->nlp_initiator_tmr);
1150 1398
1151 1399
1152 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 1400 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1153 fc_remote_port_rolechg(rport, rport_ids.roles); 1401 fc_remote_port_rolechg(rport, rport_ids.roles);
1154 1402
1155 if ((rport->scsi_target_id != -1) && 1403 if ((rport->scsi_target_id != -1) &&
1156 (rport->scsi_target_id < LPFC_MAX_TARGET)) { 1404 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1157 ndlp->nlp_sid = rport->scsi_target_id; 1405 ndlp->nlp_sid = rport->scsi_target_id;
1158 } 1406 }
1159
1160 return; 1407 return;
1161} 1408}
1162 1409
@@ -1164,14 +1411,6 @@ static void
1164lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) 1411lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
1165{ 1412{
1166 struct fc_rport *rport = ndlp->rport; 1413 struct fc_rport *rport = ndlp->rport;
1167 struct lpfc_rport_data *rdata = rport->dd_data;
1168
1169 if (rport->scsi_target_id == -1) {
1170 ndlp->rport = NULL;
1171 rdata->pnode = NULL;
1172 lpfc_nlp_put(ndlp);
1173 put_device(&rport->dev);
1174 }
1175 1414
1176 fc_remote_port_delete(rport); 1415 fc_remote_port_delete(rport);
1177 1416
@@ -1377,9 +1616,9 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
1377 1616
1378 /* Start Discovery Timer state <hba_state> */ 1617 /* Start Discovery Timer state <hba_state> */
1379 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1618 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1380 "%d:0247 Start Discovery Timer state x%x " 1619 "%d (%d):0247 Start Discovery Timer state x%x "
1381 "Data: x%x x%lx x%x x%x\n", 1620 "Data: x%x x%lx x%x x%x\n",
1382 phba->brd_no, vport->port_state, tmo, 1621 phba->brd_no, vport->vpi, vport->port_state, tmo,
1383 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt, 1622 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
1384 vport->fc_adisc_cnt); 1623 vport->fc_adisc_cnt);
1385 1624
@@ -1409,10 +1648,11 @@ lpfc_can_disctmo(struct lpfc_vport *vport)
1409 1648
1410 /* Cancel Discovery Timer state <hba_state> */ 1649 /* Cancel Discovery Timer state <hba_state> */
1411 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1650 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1412 "%d:0248 Cancel Discovery Timer state x%x " 1651 "%d (%d):0248 Cancel Discovery Timer state x%x "
1413 "Data: x%x x%x x%x\n", 1652 "Data: x%x x%x x%x\n",
1414 phba->brd_no, vport->port_state, vport->fc_flag, 1653 phba->brd_no, vport->vpi, vport->port_state,
1415 vport->fc_plogi_cnt, vport->fc_adisc_cnt); 1654 vport->fc_flag, vport->fc_plogi_cnt,
1655 vport->fc_adisc_cnt);
1416 1656
1417 return 0; 1657 return 0;
1418} 1658}
@@ -1429,6 +1669,11 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
1429{ 1669{
1430 struct lpfc_sli *psli = &phba->sli; 1670 struct lpfc_sli *psli = &phba->sli;
1431 IOCB_t *icmd = &iocb->iocb; 1671 IOCB_t *icmd = &iocb->iocb;
1672 struct lpfc_vport *vport = ndlp->vport;
1673
1674 if (iocb->vport != vport)
1675 return 0;
1676
1432 if (pring->ringno == LPFC_ELS_RING) { 1677 if (pring->ringno == LPFC_ELS_RING) {
1433 switch (icmd->ulpCommand) { 1678 switch (icmd->ulpCommand) {
1434 case CMD_GEN_REQUEST64_CR: 1679 case CMD_GEN_REQUEST64_CR:
@@ -1446,7 +1691,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
1446 } else if (pring->ringno == psli->fcp_ring) { 1691 } else if (pring->ringno == psli->fcp_ring) {
1447 /* Skip match check if waiting to relogin to FCP target */ 1692 /* Skip match check if waiting to relogin to FCP target */
1448 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 1693 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1449 (ndlp->nlp_flag & NLP_DELAY_TMO)) { 1694 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1450 return 0; 1695 return 0;
1451 } 1696 }
1452 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) { 1697 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
@@ -1472,6 +1717,8 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1472 IOCB_t *icmd; 1717 IOCB_t *icmd;
1473 uint32_t rpi, i; 1718 uint32_t rpi, i;
1474 1719
1720 lpfc_fabric_abort_nport(ndlp);
1721
1475 /* 1722 /*
1476 * Everything that matches on txcmplq will be returned 1723 * Everything that matches on txcmplq will be returned
1477 * by firmware with a no rpi error. 1724 * by firmware with a no rpi error.
@@ -1490,8 +1737,8 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1490 * Check to see if iocb matches the nport we are 1737 * Check to see if iocb matches the nport we are
1491 * looking for 1738 * looking for
1492 */ 1739 */
1493 if ((lpfc_check_sli_ndlp 1740 if ((lpfc_check_sli_ndlp(phba, pring, iocb,
1494 (phba, pring, iocb, ndlp))) { 1741 ndlp))) {
1495 /* It matches, so deque and call compl 1742 /* It matches, so deque and call compl
1496 with an error */ 1743 with an error */
1497 list_move_tail(&iocb->list, 1744 list_move_tail(&iocb->list,
@@ -1505,7 +1752,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1505 1752
1506 while (!list_empty(&completions)) { 1753 while (!list_empty(&completions)) {
1507 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 1754 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1508 list_del(&iocb->list); 1755 list_del_init(&iocb->list);
1509 1756
1510 if (!iocb->iocb_cmpl) 1757 if (!iocb->iocb_cmpl)
1511 lpfc_sli_release_iocbq(phba, iocb); 1758 lpfc_sli_release_iocbq(phba, iocb);
@@ -1539,11 +1786,11 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1539 if (ndlp->nlp_rpi) { 1786 if (ndlp->nlp_rpi) {
1540 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1787 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1541 if (mbox) { 1788 if (mbox) {
1542 lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox); 1789 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
1543 mbox->vport = vport; 1790 mbox->vport = vport;
1544 mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl; 1791 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1545 rc = lpfc_sli_issue_mbox 1792 rc = lpfc_sli_issue_mbox(phba, mbox,
1546 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); 1793 (MBX_NOWAIT | MBX_STOP_IOCB));
1547 if (rc == MBX_NOT_FINISHED) 1794 if (rc == MBX_NOT_FINISHED)
1548 mempool_free(mbox, phba->mbox_mem_pool); 1795 mempool_free(mbox, phba->mbox_mem_pool);
1549 } 1796 }
@@ -1554,6 +1801,50 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1554 return 0; 1801 return 0;
1555} 1802}
1556 1803
1804void
1805lpfc_unreg_all_rpis(struct lpfc_vport *vport)
1806{
1807 struct lpfc_hba *phba = vport->phba;
1808 LPFC_MBOXQ_t *mbox;
1809 int rc;
1810
1811 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1812 if (mbox) {
1813 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
1814 mbox->vport = vport;
1815 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1816 rc = lpfc_sli_issue_mbox(phba, mbox,
1817 (MBX_NOWAIT | MBX_STOP_IOCB));
1818 if (rc == MBX_NOT_FINISHED) {
1819 mempool_free(mbox, phba->mbox_mem_pool);
1820 }
1821 }
1822}
1823
1824void
1825lpfc_unreg_default_rpis(struct lpfc_vport *vport)
1826{
1827 struct lpfc_hba *phba = vport->phba;
1828 LPFC_MBOXQ_t *mbox;
1829 int rc;
1830
1831 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1832 if (mbox) {
1833 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
1834 mbox->vport = vport;
1835 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1836 rc = lpfc_sli_issue_mbox(phba, mbox,
1837 (MBX_NOWAIT | MBX_STOP_IOCB));
1838 if (rc == MBX_NOT_FINISHED) {
1839 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1840 "%d (%d):1815 Could not issue "
1841 "unreg_did (default rpis)\n",
1842 phba->brd_no, vport->vpi);
1843 mempool_free(mbox, phba->mbox_mem_pool);
1844 }
1845 }
1846}
1847
1557/* 1848/*
1558 * Free resources associated with LPFC_NODELIST entry 1849 * Free resources associated with LPFC_NODELIST entry
1559 * so it can be freed. 1850 * so it can be freed.
@@ -1568,9 +1859,9 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1568 1859
1569 /* Cleanup node for NPort <nlp_DID> */ 1860 /* Cleanup node for NPort <nlp_DID> */
1570 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1861 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1571 "%d:0900 Cleanup node for NPort x%x " 1862 "%d (%d):0900 Cleanup node for NPort x%x "
1572 "Data: x%x x%x x%x\n", 1863 "Data: x%x x%x x%x\n",
1573 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, 1864 phba->brd_no, vport->vpi, ndlp->nlp_DID, ndlp->nlp_flag,
1574 ndlp->nlp_state, ndlp->nlp_rpi); 1865 ndlp->nlp_state, ndlp->nlp_rpi);
1575 1866
1576 lpfc_dequeue_node(vport, ndlp); 1867 lpfc_dequeue_node(vport, ndlp);
@@ -1587,7 +1878,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1587 spin_lock_irq(&phba->hbalock); 1878 spin_lock_irq(&phba->hbalock);
1588 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1879 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1589 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1880 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1590 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1881 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1591 mp = (struct lpfc_dmabuf *) (mb->context1); 1882 mp = (struct lpfc_dmabuf *) (mb->context1);
1592 if (mp) { 1883 if (mp) {
1593 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 1884 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1607,9 +1898,12 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1607 1898
1608 ndlp->nlp_last_elscmd = 0; 1899 ndlp->nlp_last_elscmd = 0;
1609 del_timer_sync(&ndlp->nlp_delayfunc); 1900 del_timer_sync(&ndlp->nlp_delayfunc);
1901 del_timer_sync(&ndlp->nlp_initiator_tmr);
1610 1902
1611 if (!list_empty(&ndlp->els_retry_evt.evt_listp)) 1903 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1612 list_del_init(&ndlp->els_retry_evt.evt_listp); 1904 list_del_init(&ndlp->els_retry_evt.evt_listp);
1905 if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
1906 list_del_init(&ndlp->dev_loss_evt.evt_listp);
1613 1907
1614 lpfc_unreg_rpi(vport, ndlp); 1908 lpfc_unreg_rpi(vport, ndlp);
1615 1909
@@ -1633,12 +1927,11 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1633 lpfc_cleanup_node(vport, ndlp); 1927 lpfc_cleanup_node(vport, ndlp);
1634 1928
1635 /* 1929 /*
1636 * We should never get here with a non-NULL ndlp->rport. But 1930 * We can get here with a non-NULL ndlp->rport because when we
1637 * if we do, drop the reference to the rport. That seems the 1931 * unregister a rport we don't break the rport/node linkage. So if we
1638 * intelligent thing to do. 1932 * do, make sure we don't leaving any dangling pointers behind.
1639 */ 1933 */
1640 if (ndlp->rport && !(vport->load_flag & FC_UNLOADING)) { 1934 if (ndlp->rport) {
1641 put_device(&ndlp->rport->dev);
1642 rdata = ndlp->rport->dd_data; 1935 rdata = ndlp->rport->dd_data;
1643 rdata->pnode = NULL; 1936 rdata->pnode = NULL;
1644 ndlp->rport = NULL; 1937 ndlp->rport = NULL;
@@ -1709,9 +2002,9 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
1709 ((uint32_t) ndlp->nlp_type << 8) | 2002 ((uint32_t) ndlp->nlp_type << 8) |
1710 ((uint32_t) ndlp->nlp_rpi & 0xff)); 2003 ((uint32_t) ndlp->nlp_rpi & 0xff));
1711 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 2004 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1712 "%d:0929 FIND node DID " 2005 "%d (%d):0929 FIND node DID "
1713 " Data: x%p x%x x%x x%x\n", 2006 " Data: x%p x%x x%x x%x\n",
1714 phba->brd_no, 2007 phba->brd_no, vport->vpi,
1715 ndlp, ndlp->nlp_DID, 2008 ndlp, ndlp->nlp_DID,
1716 ndlp->nlp_flag, data1); 2009 ndlp->nlp_flag, data1);
1717 return ndlp; 2010 return ndlp;
@@ -1720,8 +2013,8 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
1720 2013
1721 /* FIND node did <did> NOT FOUND */ 2014 /* FIND node did <did> NOT FOUND */
1722 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 2015 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1723 "%d:0932 FIND node did x%x NOT FOUND.\n", 2016 "%d (%d):0932 FIND node did x%x NOT FOUND.\n",
1724 phba->brd_no, did); 2017 phba->brd_no, vport->vpi, did);
1725 return NULL; 2018 return NULL;
1726} 2019}
1727 2020
@@ -1835,6 +2128,14 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
1835 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring]; 2128 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
1836 int rc; 2129 int rc;
1837 2130
2131 /*
2132 * if it's not a physical port or if we already send
2133 * clear_la then don't send it.
2134 */
2135 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2136 (vport->port_type != LPFC_PHYSICAL_PORT))
2137 return;
2138
1838 /* Link up discovery */ 2139 /* Link up discovery */
1839 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { 2140 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
1840 phba->link_state = LPFC_CLEAR_LA; 2141 phba->link_state = LPFC_CLEAR_LA;
@@ -1849,7 +2150,26 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
1849 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 2150 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
1850 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 2151 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
1851 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 2152 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
1852 vport->port_state = LPFC_VPORT_READY; 2153 phba->link_state = LPFC_HBA_ERROR;
2154 }
2155 }
2156}
2157
2158/* Reg_vpi to tell firmware to resume normal operations */
2159void
2160lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2161{
2162 LPFC_MBOXQ_t *regvpimbox;
2163
2164 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2165 if (regvpimbox) {
2166 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
2167 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2168 regvpimbox->vport = vport;
2169 if (lpfc_sli_issue_mbox(phba, regvpimbox,
2170 (MBX_NOWAIT | MBX_STOP_IOCB))
2171 == MBX_NOT_FINISHED) {
2172 mempool_free(regvpimbox, phba->mbox_mem_pool);
1853 } 2173 }
1854 } 2174 }
1855} 2175}
@@ -1860,7 +2180,6 @@ lpfc_disc_start(struct lpfc_vport *vport)
1860{ 2180{
1861 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2181 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1862 struct lpfc_hba *phba = vport->phba; 2182 struct lpfc_hba *phba = vport->phba;
1863 struct lpfc_nodelist *ndlp, *next_ndlp;
1864 uint32_t num_sent; 2183 uint32_t num_sent;
1865 uint32_t clear_la_pending; 2184 uint32_t clear_la_pending;
1866 int did_changed; 2185 int did_changed;
@@ -1888,21 +2207,11 @@ lpfc_disc_start(struct lpfc_vport *vport)
1888 2207
1889 /* Start Discovery state <hba_state> */ 2208 /* Start Discovery state <hba_state> */
1890 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 2209 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1891 "%d:0202 Start Discovery hba state x%x " 2210 "%d (%d):0202 Start Discovery hba state x%x "
1892 "Data: x%x x%x x%x\n", 2211 "Data: x%x x%x x%x\n",
1893 phba->brd_no, vport->port_state, vport->fc_flag, 2212 phba->brd_no, vport->vpi, vport->port_state,
1894 vport->fc_plogi_cnt, vport->fc_adisc_cnt); 2213 vport->fc_flag, vport->fc_plogi_cnt,
1895 2214 vport->fc_adisc_cnt);
1896 /* If our did changed, we MUST do PLOGI */
1897 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1898 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
1899 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
1900 did_changed) {
1901 spin_lock_irq(shost->host_lock);
1902 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1903 spin_unlock_irq(shost->host_lock);
1904 }
1905 }
1906 2215
1907 /* First do ADISCs - if any */ 2216 /* First do ADISCs - if any */
1908 num_sent = lpfc_els_disc_adisc(vport); 2217 num_sent = lpfc_els_disc_adisc(vport);
@@ -1910,12 +2219,26 @@ lpfc_disc_start(struct lpfc_vport *vport)
1910 if (num_sent) 2219 if (num_sent)
1911 return; 2220 return;
1912 2221
2222 /*
2223 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
2224 * continue discovery.
2225 */
2226 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2227 !(vport->fc_flag & FC_RSCN_MODE)) {
2228 lpfc_issue_reg_vpi(phba, vport);
2229 return;
2230 }
2231
2232 /*
2233 * For SLI2, we need to set port_state to READY and continue
2234 * discovery.
2235 */
1913 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) { 2236 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
1914 if (vport->port_type == LPFC_PHYSICAL_PORT) {
1915 /* If we get here, there is nothing to ADISC */ 2237 /* If we get here, there is nothing to ADISC */
2238 if (vport->port_type == LPFC_PHYSICAL_PORT)
1916 lpfc_issue_clear_la(phba, vport); 2239 lpfc_issue_clear_la(phba, vport);
1917 } else if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1918 2240
2241 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1919 vport->num_disc_nodes = 0; 2242 vport->num_disc_nodes = 0;
1920 /* go thru NPR nodes and issue ELS PLOGIs */ 2243 /* go thru NPR nodes and issue ELS PLOGIs */
1921 if (vport->fc_npr_cnt) 2244 if (vport->fc_npr_cnt)
@@ -1925,9 +2248,10 @@ lpfc_disc_start(struct lpfc_vport *vport)
1925 spin_lock_irq(shost->host_lock); 2248 spin_lock_irq(shost->host_lock);
1926 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2249 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1927 spin_unlock_irq(shost->host_lock); 2250 spin_unlock_irq(shost->host_lock);
2251 lpfc_can_disctmo(vport);
1928 } 2252 }
1929 vport->port_state = LPFC_VPORT_READY;
1930 } 2253 }
2254 vport->port_state = LPFC_VPORT_READY;
1931 } else { 2255 } else {
1932 /* Next do PLOGIs - if any */ 2256 /* Next do PLOGIs - if any */
1933 num_sent = lpfc_els_disc_plogi(vport); 2257 num_sent = lpfc_els_disc_plogi(vport);
@@ -1944,6 +2268,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
1944 spin_lock_irq(shost->host_lock); 2268 spin_lock_irq(shost->host_lock);
1945 vport->fc_flag &= ~FC_RSCN_MODE; 2269 vport->fc_flag &= ~FC_RSCN_MODE;
1946 spin_unlock_irq(shost->host_lock); 2270 spin_unlock_irq(shost->host_lock);
2271 lpfc_can_disctmo(vport);
1947 } else 2272 } else
1948 lpfc_els_handle_rscn(vport); 2273 lpfc_els_handle_rscn(vport);
1949 } 2274 }
@@ -1999,7 +2324,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1999 2324
2000 while (!list_empty(&completions)) { 2325 while (!list_empty(&completions)) {
2001 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 2326 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
2002 list_del(&iocb->list); 2327 list_del_init(&iocb->list);
2003 2328
2004 if (!iocb->iocb_cmpl) 2329 if (!iocb->iocb_cmpl)
2005 lpfc_sli_release_iocbq(phba, iocb); 2330 lpfc_sli_release_iocbq(phba, iocb);
@@ -2030,6 +2355,14 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
2030 } 2355 }
2031} 2356}
2032 2357
2358void
2359lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
2360{
2361 lpfc_els_flush_rscn(vport);
2362 lpfc_els_flush_cmd(vport);
2363 lpfc_disc_flush_list(vport);
2364}
2365
2033/*****************************************************************************/ 2366/*****************************************************************************/
2034/* 2367/*
2035 * NAME: lpfc_disc_timeout 2368 * NAME: lpfc_disc_timeout
@@ -2060,8 +2393,10 @@ lpfc_disc_timeout(unsigned long ptr)
2060 vport->work_port_events |= WORKER_DISC_TMO; 2393 vport->work_port_events |= WORKER_DISC_TMO;
2061 spin_unlock_irqrestore(&vport->work_port_lock, flags); 2394 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2062 2395
2396 spin_lock_irqsave(&phba->hbalock, flags);
2063 if (phba->work_wait) 2397 if (phba->work_wait)
2064 wake_up(phba->work_wait); 2398 lpfc_worker_wake_up(phba);
2399 spin_unlock_irqrestore(&phba->hbalock, flags);
2065 } 2400 }
2066 return; 2401 return;
2067} 2402}
@@ -2073,7 +2408,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2073 struct lpfc_hba *phba = vport->phba; 2408 struct lpfc_hba *phba = vport->phba;
2074 struct lpfc_sli *psli = &phba->sli; 2409 struct lpfc_sli *psli = &phba->sli;
2075 struct lpfc_nodelist *ndlp, *next_ndlp; 2410 struct lpfc_nodelist *ndlp, *next_ndlp;
2076 LPFC_MBOXQ_t *clearlambox, *initlinkmbox; 2411 LPFC_MBOXQ_t *initlinkmbox;
2077 int rc, clrlaerr = 0; 2412 int rc, clrlaerr = 0;
2078 2413
2079 if (!(vport->fc_flag & FC_DISC_TMO)) 2414 if (!(vport->fc_flag & FC_DISC_TMO))
@@ -2091,8 +2426,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2091 */ 2426 */
2092 /* FAN timeout */ 2427 /* FAN timeout */
2093 lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY, 2428 lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
2094 "%d:0221 FAN timeout\n", 2429 "%d (%d):0221 FAN timeout\n",
2095 phba->brd_no); 2430 phba->brd_no, vport->vpi);
2096 2431
2097 /* Start discovery by sending FLOGI, clean up old rpis */ 2432 /* Start discovery by sending FLOGI, clean up old rpis */
2098 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 2433 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
@@ -2109,17 +2444,21 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2109 lpfc_unreg_rpi(vport, ndlp); 2444 lpfc_unreg_rpi(vport, ndlp);
2110 } 2445 }
2111 } 2446 }
2112 vport->port_state = LPFC_FLOGI; 2447 if (vport->port_state != LPFC_FLOGI) {
2113 lpfc_set_disctmo(vport); 2448 vport->port_state = LPFC_FLOGI;
2114 lpfc_initial_flogi(vport); 2449 lpfc_set_disctmo(vport);
2450 lpfc_initial_flogi(vport);
2451 }
2115 break; 2452 break;
2116 2453
2454 case LPFC_FDISC:
2117 case LPFC_FLOGI: 2455 case LPFC_FLOGI:
2118 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ 2456 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2119 /* Initial FLOGI timeout */ 2457 /* Initial FLOGI timeout */
2120 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2458 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2121 "%d:0222 Initial FLOGI timeout\n", 2459 "%d (%d):0222 Initial %s timeout\n",
2122 phba->brd_no); 2460 phba->brd_no, vport->vpi,
2461 vport->vpi ? "FLOGI" : "FDISC");
2123 2462
2124 /* Assume no Fabric and go on with discovery. 2463 /* Assume no Fabric and go on with discovery.
2125 * Check for outstanding ELS FLOGI to abort. 2464 * Check for outstanding ELS FLOGI to abort.
@@ -2136,8 +2475,9 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2136 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for 2475 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2137 NameServer login */ 2476 NameServer login */
2138 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2477 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2139 "%d:0223 Timeout while waiting for NameServer " 2478 "%d (%d):0223 Timeout while waiting for "
2140 "login\n", phba->brd_no); 2479 "NameServer login\n",
2480 phba->brd_no, vport->vpi);
2141 2481
2142 /* Next look for NameServer ndlp */ 2482 /* Next look for NameServer ndlp */
2143 ndlp = lpfc_findnode_did(vport, NameServer_DID); 2483 ndlp = lpfc_findnode_did(vport, NameServer_DID);
@@ -2150,53 +2490,40 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2150 case LPFC_NS_QRY: 2490 case LPFC_NS_QRY:
2151 /* Check for wait for NameServer Rsp timeout */ 2491 /* Check for wait for NameServer Rsp timeout */
2152 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2492 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2153 "%d:0224 NameServer Query timeout " 2493 "%d (%d):0224 NameServer Query timeout "
2154 "Data: x%x x%x\n", 2494 "Data: x%x x%x\n",
2155 phba->brd_no, 2495 phba->brd_no, vport->vpi,
2156 vport->fc_ns_retry, LPFC_MAX_NS_RETRY); 2496 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2157 2497
2158 ndlp = lpfc_findnode_did(vport, NameServer_DID); 2498 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2159 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 2499 /* Try it one more time */
2160 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { 2500 vport->fc_ns_retry++;
2161 /* Try it one more time */ 2501 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
2162 rc = lpfc_ns_cmd(vport, ndlp, SLI_CTNS_GID_FT); 2502 vport->fc_ns_retry, 0);
2163 if (rc == 0) 2503 if (rc == 0)
2164 break; 2504 break;
2165 }
2166 vport->fc_ns_retry = 0;
2167 }
2168
2169 /* Nothing to authenticate, so CLEAR_LA right now */
2170 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2171 if (!clearlambox) {
2172 clrlaerr = 1;
2173 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2174 "%d:0226 Device Discovery "
2175 "completion error\n",
2176 phba->brd_no);
2177 phba->link_state = LPFC_HBA_ERROR;
2178 break;
2179 } 2505 }
2506 vport->fc_ns_retry = 0;
2180 2507
2181 phba->link_state = LPFC_CLEAR_LA; 2508 /*
2182 lpfc_clear_la(phba, clearlambox); 2509 * Discovery is over.
2183 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; 2510 * set port_state to PORT_READY if SLI2.
2184 clearlambox->vport = vport; 2511 * cmpl_reg_vpi will set port_state to READY for SLI3.
2185 rc = lpfc_sli_issue_mbox(phba, clearlambox, 2512 */
2186 (MBX_NOWAIT | MBX_STOP_IOCB)); 2513 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2187 if (rc == MBX_NOT_FINISHED) { 2514 lpfc_issue_reg_vpi(phba, vport);
2188 mempool_free(clearlambox, phba->mbox_mem_pool); 2515 else { /* NPIV Not enabled */
2189 clrlaerr = 1; 2516 lpfc_issue_clear_la(phba, vport);
2190 break; 2517 vport->port_state = LPFC_VPORT_READY;
2191 } 2518 }
2192 2519
2193 /* Setup and issue mailbox INITIALIZE LINK command */ 2520 /* Setup and issue mailbox INITIALIZE LINK command */
2194 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2521 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2195 if (!initlinkmbox) { 2522 if (!initlinkmbox) {
2196 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2523 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2197 "%d:0206 Device Discovery " 2524 "%d (%d):0206 Device Discovery "
2198 "completion error\n", 2525 "completion error\n",
2199 phba->brd_no); 2526 phba->brd_no, vport->vpi);
2200 phba->link_state = LPFC_HBA_ERROR; 2527 phba->link_state = LPFC_HBA_ERROR;
2201 break; 2528 break;
2202 } 2529 }
@@ -2206,6 +2533,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2206 phba->cfg_link_speed); 2533 phba->cfg_link_speed);
2207 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 2534 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2208 initlinkmbox->vport = vport; 2535 initlinkmbox->vport = vport;
2536 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2209 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, 2537 rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2210 (MBX_NOWAIT | MBX_STOP_IOCB)); 2538 (MBX_NOWAIT | MBX_STOP_IOCB));
2211 lpfc_set_loopback_flag(phba); 2539 lpfc_set_loopback_flag(phba);
@@ -2217,37 +2545,28 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2217 case LPFC_DISC_AUTH: 2545 case LPFC_DISC_AUTH:
2218 /* Node Authentication timeout */ 2546 /* Node Authentication timeout */
2219 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2547 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2220 "%d:0227 Node Authentication timeout\n", 2548 "%d (%d):0227 Node Authentication timeout\n",
2221 phba->brd_no); 2549 phba->brd_no, vport->vpi);
2222 lpfc_disc_flush_list(vport); 2550 lpfc_disc_flush_list(vport);
2223 2551
2224 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2552 /*
2225 if (!clearlambox) { 2553 * set port_state to PORT_READY if SLI2.
2226 clrlaerr = 1; 2554 * cmpl_reg_vpi will set port_state to READY for SLI3.
2227 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2555 */
2228 "%d:0207 Device Discovery " 2556 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2229 "completion error\n", 2557 lpfc_issue_reg_vpi(phba, vport);
2230 phba->brd_no); 2558 else { /* NPIV Not enabled */
2231 phba->link_state = LPFC_HBA_ERROR; 2559 lpfc_issue_clear_la(phba, vport);
2232 break; 2560 vport->port_state = LPFC_VPORT_READY;
2233 }
2234 phba->link_state = LPFC_CLEAR_LA;
2235 lpfc_clear_la(phba, clearlambox);
2236 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2237 clearlambox->vport = vport;
2238 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2239 (MBX_NOWAIT | MBX_STOP_IOCB));
2240 if (rc == MBX_NOT_FINISHED) {
2241 mempool_free(clearlambox, phba->mbox_mem_pool);
2242 clrlaerr = 1;
2243 } 2561 }
2244 break; 2562 break;
2245 2563
2246 case LPFC_VPORT_READY: 2564 case LPFC_VPORT_READY:
2247 if (vport->fc_flag & FC_RSCN_MODE) { 2565 if (vport->fc_flag & FC_RSCN_MODE) {
2248 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2566 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2249 "%d:0231 RSCN timeout Data: x%x x%x\n", 2567 "%d (%d):0231 RSCN timeout Data: x%x "
2250 phba->brd_no, 2568 "x%x\n",
2569 phba->brd_no, vport->vpi,
2251 vport->fc_ns_retry, LPFC_MAX_NS_RETRY); 2570 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2252 2571
2253 /* Cleanup any outstanding ELS commands */ 2572 /* Cleanup any outstanding ELS commands */
@@ -2258,23 +2577,21 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2258 } 2577 }
2259 break; 2578 break;
2260 2579
2261 case LPFC_STATE_UNKNOWN: 2580 default:
2262 case LPFC_NS_REG:
2263 case LPFC_BUILD_DISC_LIST:
2264 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2581 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2265 "%d:0229 Unexpected discovery timeout, vport " 2582 "%d (%d):0229 Unexpected discovery timeout, "
2266 "State x%x\n", 2583 "vport State x%x\n",
2267 vport->port_state, phba->brd_no); 2584 phba->brd_no, vport->vpi, vport->port_state);
2268 2585
2269 break; 2586 break;
2270 } 2587 }
2271 2588
2272 switch (phba->link_state) { 2589 switch (phba->link_state) {
2273 case LPFC_CLEAR_LA: 2590 case LPFC_CLEAR_LA:
2274 /* CLEAR LA timeout */ 2591 /* CLEAR LA timeout */
2275 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2592 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2276 "%d:0228 CLEAR LA timeout\n", 2593 "%d (%d):0228 CLEAR LA timeout\n",
2277 phba->brd_no); 2594 phba->brd_no, vport->vpi);
2278 clrlaerr = 1; 2595 clrlaerr = 1;
2279 break; 2596 break;
2280 2597
@@ -2286,11 +2603,14 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2286 case LPFC_LINK_UP: 2603 case LPFC_LINK_UP:
2287 case LPFC_HBA_ERROR: 2604 case LPFC_HBA_ERROR:
2288 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2605 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2289 "%d:0230 Unexpected timeout, hba link " 2606 "%d (%d):0230 Unexpected timeout, hba link "
2290 "state x%x\n", 2607 "state x%x\n",
2291 phba->brd_no, phba->link_state); 2608 phba->brd_no, vport->vpi, phba->link_state);
2292 clrlaerr = 1; 2609 clrlaerr = 1;
2293 break; 2610 break;
2611
2612 case LPFC_HBA_READY:
2613 break;
2294 } 2614 }
2295 2615
2296 if (clrlaerr) { 2616 if (clrlaerr) {
@@ -2374,7 +2694,7 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2374 2694
2375/* 2695/*
2376 * Search node lists for a remote port matching filter criteria 2696 * Search node lists for a remote port matching filter criteria
2377 * This routine is used when the caller does NOT have host_lock. 2697 * Caller needs to hold host_lock before calling this routine.
2378 */ 2698 */
2379struct lpfc_nodelist * 2699struct lpfc_nodelist *
2380lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) 2700lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
@@ -2427,11 +2747,41 @@ lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
2427} 2747}
2428 2748
2429void 2749void
2750lpfc_dev_loss_delay(unsigned long ptr)
2751{
2752 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2753 struct lpfc_vport *vport = ndlp->vport;
2754 struct lpfc_hba *phba = vport->phba;
2755 struct lpfc_work_evt *evtp = &ndlp->dev_loss_evt;
2756 unsigned long flags;
2757
2758 evtp = &ndlp->dev_loss_evt;
2759
2760 spin_lock_irqsave(&phba->hbalock, flags);
2761 if (!list_empty(&evtp->evt_listp)) {
2762 spin_unlock_irqrestore(&phba->hbalock, flags);
2763 return;
2764 }
2765
2766 evtp->evt_arg1 = ndlp;
2767 evtp->evt = LPFC_EVT_DEV_LOSS;
2768 list_add_tail(&evtp->evt_listp, &phba->work_list);
2769 if (phba->work_wait)
2770 lpfc_worker_wake_up(phba);
2771 spin_unlock_irqrestore(&phba->hbalock, flags);
2772 return;
2773}
2774
2775void
2430lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2776lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2431 uint32_t did) 2777 uint32_t did)
2432{ 2778{
2433 memset(ndlp, 0, sizeof (struct lpfc_nodelist)); 2779 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2434 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 2780 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2781 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
2782 init_timer(&ndlp->nlp_initiator_tmr);
2783 ndlp->nlp_initiator_tmr.function = lpfc_dev_loss_delay;
2784 ndlp->nlp_initiator_tmr.data = (unsigned long)ndlp;
2435 init_timer(&ndlp->nlp_delayfunc); 2785 init_timer(&ndlp->nlp_delayfunc);
2436 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; 2786 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2437 ndlp->nlp_delayfunc.data = (unsigned long)ndlp; 2787 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 430416805e85..7fab93d34367 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -64,6 +64,7 @@
64#define SLI3_IOCB_CMD_SIZE 128 64#define SLI3_IOCB_CMD_SIZE 128
65#define SLI3_IOCB_RSP_SIZE 64 65#define SLI3_IOCB_RSP_SIZE 64
66 66
67
67/* Common Transport structures and definitions */ 68/* Common Transport structures and definitions */
68 69
69union CtRevisionId { 70union CtRevisionId {
@@ -84,6 +85,9 @@ union CtCommandResponse {
84 uint32_t word; 85 uint32_t word;
85}; 86};
86 87
88#define FC4_FEATURE_INIT 0x2
89#define FC4_FEATURE_TARGET 0x1
90
87struct lpfc_sli_ct_request { 91struct lpfc_sli_ct_request {
88 /* Structure is in Big Endian format */ 92 /* Structure is in Big Endian format */
89 union CtRevisionId RevisionId; 93 union CtRevisionId RevisionId;
@@ -126,20 +130,6 @@ struct lpfc_sli_ct_request {
126 130
127 uint32_t rsvd[7]; 131 uint32_t rsvd[7];
128 } rft; 132 } rft;
129 struct rff {
130 uint32_t PortId;
131 uint8_t reserved[2];
132#ifdef __BIG_ENDIAN_BITFIELD
133 uint8_t feature_res:6;
134 uint8_t feature_init:1;
135 uint8_t feature_tgt:1;
136#else /* __LITTLE_ENDIAN_BITFIELD */
137 uint8_t feature_tgt:1;
138 uint8_t feature_init:1;
139 uint8_t feature_res:6;
140#endif
141 uint8_t type_code; /* type=8 for FCP */
142 } rff;
143 struct rnn { 133 struct rnn {
144 uint32_t PortId; /* For RNN_ID requests */ 134 uint32_t PortId; /* For RNN_ID requests */
145 uint8_t wwnn[8]; 135 uint8_t wwnn[8];
@@ -149,15 +139,42 @@ struct lpfc_sli_ct_request {
149 uint8_t len; 139 uint8_t len;
150 uint8_t symbname[255]; 140 uint8_t symbname[255];
151 } rsnn; 141 } rsnn;
142 struct rspn { /* For RSPN_ID requests */
143 uint32_t PortId;
144 uint8_t len;
145 uint8_t symbname[255];
146 } rspn;
147 struct gff {
148 uint32_t PortId;
149 } gff;
150 struct gff_acc {
151 uint8_t fbits[128];
152 } gff_acc;
153#define FCP_TYPE_FEATURE_OFFSET 4
154 struct rff {
155 uint32_t PortId;
156 uint8_t reserved[2];
157 uint8_t fbits;
158 uint8_t type_code; /* type=8 for FCP */
159 } rff;
152 } un; 160 } un;
153}; 161};
154 162
155#define SLI_CT_REVISION 1 163#define SLI_CT_REVISION 1
156#define GID_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 260) 164#define GID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
157#define RFT_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 228) 165 sizeof(struct gid))
158#define RFF_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 235) 166#define GFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
159#define RNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 252) 167 sizeof(struct gff))
160#define RSNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request)) 168#define RFT_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
169 sizeof(struct rft))
170#define RFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
171 sizeof(struct rff))
172#define RNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
173 sizeof(struct rnn))
174#define RSNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
175 sizeof(struct rsnn))
176#define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
177 sizeof(struct rspn))
161 178
162/* 179/*
163 * FsType Definitions 180 * FsType Definitions
@@ -232,6 +249,7 @@ struct lpfc_sli_ct_request {
232#define SLI_CTNS_GFT_ID 0x0117 249#define SLI_CTNS_GFT_ID 0x0117
233#define SLI_CTNS_GSPN_ID 0x0118 250#define SLI_CTNS_GSPN_ID 0x0118
234#define SLI_CTNS_GPT_ID 0x011A 251#define SLI_CTNS_GPT_ID 0x011A
252#define SLI_CTNS_GFF_ID 0x011F
235#define SLI_CTNS_GID_PN 0x0121 253#define SLI_CTNS_GID_PN 0x0121
236#define SLI_CTNS_GID_NN 0x0131 254#define SLI_CTNS_GID_NN 0x0131
237#define SLI_CTNS_GIP_NN 0x0135 255#define SLI_CTNS_GIP_NN 0x0135
@@ -245,9 +263,9 @@ struct lpfc_sli_ct_request {
245#define SLI_CTNS_RNN_ID 0x0213 263#define SLI_CTNS_RNN_ID 0x0213
246#define SLI_CTNS_RCS_ID 0x0214 264#define SLI_CTNS_RCS_ID 0x0214
247#define SLI_CTNS_RFT_ID 0x0217 265#define SLI_CTNS_RFT_ID 0x0217
248#define SLI_CTNS_RFF_ID 0x021F
249#define SLI_CTNS_RSPN_ID 0x0218 266#define SLI_CTNS_RSPN_ID 0x0218
250#define SLI_CTNS_RPT_ID 0x021A 267#define SLI_CTNS_RPT_ID 0x021A
268#define SLI_CTNS_RFF_ID 0x021F
251#define SLI_CTNS_RIP_NN 0x0235 269#define SLI_CTNS_RIP_NN 0x0235
252#define SLI_CTNS_RIPA_NN 0x0236 270#define SLI_CTNS_RIPA_NN 0x0236
253#define SLI_CTNS_RSNN_NN 0x0239 271#define SLI_CTNS_RSNN_NN 0x0239
@@ -316,8 +334,9 @@ struct csp {
316 uint8_t bbCreditlsb; /* FC Word 0, byte 3 */ 334 uint8_t bbCreditlsb; /* FC Word 0, byte 3 */
317 335
318#ifdef __BIG_ENDIAN_BITFIELD 336#ifdef __BIG_ENDIAN_BITFIELD
319 uint16_t increasingOffset:1; /* FC Word 1, bit 31 */ 337 uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
320 uint16_t response_multiple_Nport:1; /* FC Word 1, bit 29 */ 338 uint16_t randomOffset:1; /* FC Word 1, bit 30 */
339 uint16_t response_multiple_NPort:1; /* FC Word 1, bit 29 */
321 uint16_t fPort:1; /* FC Word 1, bit 28 */ 340 uint16_t fPort:1; /* FC Word 1, bit 28 */
322 uint16_t altBbCredit:1; /* FC Word 1, bit 27 */ 341 uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
323 uint16_t edtovResolution:1; /* FC Word 1, bit 26 */ 342 uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
@@ -336,9 +355,9 @@ struct csp {
336 uint16_t edtovResolution:1; /* FC Word 1, bit 26 */ 355 uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
337 uint16_t altBbCredit:1; /* FC Word 1, bit 27 */ 356 uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
338 uint16_t fPort:1; /* FC Word 1, bit 28 */ 357 uint16_t fPort:1; /* FC Word 1, bit 28 */
339 uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */ 358 uint16_t response_multiple_NPort:1; /* FC Word 1, bit 29 */
340 uint16_t randomOffset:1; /* FC Word 1, bit 30 */ 359 uint16_t randomOffset:1; /* FC Word 1, bit 30 */
341 uint16_t increasingOffset:1; /* FC Word 1, bit 31 */ 360 uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
342 361
343 uint16_t payloadlength:1; /* FC Word 1, bit 16 */ 362 uint16_t payloadlength:1; /* FC Word 1, bit 16 */
344 uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */ 363 uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
@@ -1268,6 +1287,10 @@ typedef struct { /* FireFly BIU registers */
1268#define MBX_READ_RPI64 0x8F 1287#define MBX_READ_RPI64 0x8F
1269#define MBX_REG_LOGIN64 0x93 1288#define MBX_REG_LOGIN64 0x93
1270#define MBX_READ_LA64 0x95 1289#define MBX_READ_LA64 0x95
1290#define MBX_REG_VPI 0x96
1291#define MBX_UNREG_VPI 0x97
1292#define MBX_REG_VNPID 0x96
1293#define MBX_UNREG_VNPID 0x97
1271 1294
1272#define MBX_FLASH_WR_ULA 0x98 1295#define MBX_FLASH_WR_ULA 0x98
1273#define MBX_SET_DEBUG 0x99 1296#define MBX_SET_DEBUG 0x99
@@ -1570,7 +1593,7 @@ typedef struct {
1570#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */ 1593#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */
1571#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */ 1594#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */
1572#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */ 1595#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */
1573#define FLAGS_UNREG_LOGIN_ALL 0x08 /* UNREG_LOGIN all on link down */ 1596#define FLAGS_UNREG_LOGIN_ALL 0x08 /* UNREG_LOGIN all on link down */
1574#define FLAGS_LIRP_LILP 0x80 /* LIRP / LILP is disabled */ 1597#define FLAGS_LIRP_LILP 0x80 /* LIRP / LILP is disabled */
1575 1598
1576#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */ 1599#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */
@@ -2086,6 +2109,45 @@ typedef struct {
2086#endif 2109#endif
2087} UNREG_LOGIN_VAR; 2110} UNREG_LOGIN_VAR;
2088 2111
2112/* Structure for MB Command REG_VPI (0x96) */
2113typedef struct {
2114#ifdef __BIG_ENDIAN_BITFIELD
2115 uint32_t rsvd1;
2116 uint32_t rsvd2:8;
2117 uint32_t sid:24;
2118 uint32_t rsvd3;
2119 uint32_t rsvd4;
2120 uint32_t rsvd5;
2121 uint16_t rsvd6;
2122 uint16_t vpi;
2123#else /* __LITTLE_ENDIAN */
2124 uint32_t rsvd1;
2125 uint32_t sid:24;
2126 uint32_t rsvd2:8;
2127 uint32_t rsvd3;
2128 uint32_t rsvd4;
2129 uint32_t rsvd5;
2130 uint16_t vpi;
2131 uint16_t rsvd6;
2132#endif
2133} REG_VPI_VAR;
2134
2135/* Structure for MB Command UNREG_VPI (0x97) */
2136typedef struct {
2137 uint32_t rsvd1;
2138 uint32_t rsvd2;
2139 uint32_t rsvd3;
2140 uint32_t rsvd4;
2141 uint32_t rsvd5;
2142#ifdef __BIG_ENDIAN_BITFIELD
2143 uint16_t rsvd6;
2144 uint16_t vpi;
2145#else /* __LITTLE_ENDIAN */
2146 uint16_t vpi;
2147 uint16_t rsvd6;
2148#endif
2149} UNREG_VPI_VAR;
2150
2089/* Structure for MB Command UNREG_D_ID (0x23) */ 2151/* Structure for MB Command UNREG_D_ID (0x23) */
2090 2152
2091typedef struct { 2153typedef struct {
@@ -2549,8 +2611,8 @@ typedef union {
2549 LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */ 2611 LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */
2550 READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */ 2612 READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */
2551 WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */ 2613 WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */
2552 BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */ 2614 BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */
2553 INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */ 2615 INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */
2554 DOWN_LINK_VAR varDwnLnk; /* cmd = 6 (DOWN_LINK) */ 2616 DOWN_LINK_VAR varDwnLnk; /* cmd = 6 (DOWN_LINK) */
2555 CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */ 2617 CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */
2556 PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */ 2618 PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */
@@ -2575,6 +2637,8 @@ typedef union {
2575 */ 2637 */
2576 struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */ 2638 struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */
2577 CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */ 2639 CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
2640 REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */
2641 UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
2578} MAILVARIANTS; 2642} MAILVARIANTS;
2579 2643
2580/* 2644/*
@@ -2614,7 +2678,6 @@ typedef union {
2614 struct sli3_pgp s3_pgp; 2678 struct sli3_pgp s3_pgp;
2615} SLI_VAR; 2679} SLI_VAR;
2616 2680
2617
2618typedef struct { 2681typedef struct {
2619#ifdef __BIG_ENDIAN_BITFIELD 2682#ifdef __BIG_ENDIAN_BITFIELD
2620 uint16_t mbxStatus; 2683 uint16_t mbxStatus;
@@ -2935,6 +2998,8 @@ struct rcv_sli3 {
2935 struct ulp_bde64 bde2; 2998 struct ulp_bde64 bde2;
2936}; 2999};
2937 3000
3001
3002
2938typedef struct _IOCB { /* IOCB structure */ 3003typedef struct _IOCB { /* IOCB structure */
2939 union { 3004 union {
2940 GENERIC_RSP grsp; /* Generic response */ 3005 GENERIC_RSP grsp; /* Generic response */
@@ -3011,6 +3076,7 @@ typedef struct _IOCB { /* IOCB structure */
3011 uint32_t ulpXS:1; 3076 uint32_t ulpXS:1;
3012 uint32_t ulpTimeout:8; 3077 uint32_t ulpTimeout:8;
3013#endif 3078#endif
3079
3014 union { 3080 union {
3015 struct rcv_sli3 rcvsli3; /* words 8 - 15 */ 3081 struct rcv_sli3 rcvsli3; /* words 8 - 15 */
3016 uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */ 3082 uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
@@ -3024,6 +3090,7 @@ typedef struct _IOCB { /* IOCB structure */
3024#define PARM_UNUSED 0 /* PU field (Word 4) not used */ 3090#define PARM_UNUSED 0 /* PU field (Word 4) not used */
3025#define PARM_REL_OFF 1 /* PU field (Word 4) = R. O. */ 3091#define PARM_REL_OFF 1 /* PU field (Word 4) = R. O. */
3026#define PARM_READ_CHECK 2 /* PU field (Word 4) = Data Transfer Length */ 3092#define PARM_READ_CHECK 2 /* PU field (Word 4) = Data Transfer Length */
3093#define PARM_NPIV_DID 3
3027#define CLASS1 0 /* Class 1 */ 3094#define CLASS1 0 /* Class 1 */
3028#define CLASS2 1 /* Class 2 */ 3095#define CLASS2 1 /* Class 2 */
3029#define CLASS3 2 /* Class 3 */ 3096#define CLASS3 2 /* Class 3 */
@@ -3044,7 +3111,7 @@ typedef struct _IOCB { /* IOCB structure */
3044#define IOSTAT_RSVD2 0xC 3111#define IOSTAT_RSVD2 0xC
3045#define IOSTAT_RSVD3 0xD 3112#define IOSTAT_RSVD3 0xD
3046#define IOSTAT_RSVD4 0xE 3113#define IOSTAT_RSVD4 0xE
3047#define IOSTAT_RSVD5 0xF 3114#define IOSTAT_NEED_BUFFER 0xF
3048#define IOSTAT_DRIVER_REJECT 0x10 /* ulpStatus - Driver defined */ 3115#define IOSTAT_DRIVER_REJECT 0x10 /* ulpStatus - Driver defined */
3049#define IOSTAT_DEFAULT 0xF /* Same as rsvd5 for now */ 3116#define IOSTAT_DEFAULT 0xF /* Same as rsvd5 for now */
3050#define IOSTAT_CNT 0x11 3117#define IOSTAT_CNT 0x11
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e50c5ad252f9..4dd0f1aa09e8 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -27,6 +27,7 @@
27#include <linux/kthread.h> 27#include <linux/kthread.h>
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/ctype.h>
30 31
31#include <scsi/scsi.h> 32#include <scsi/scsi.h>
32#include <scsi/scsi_device.h> 33#include <scsi/scsi_device.h>
@@ -40,21 +41,18 @@
40#include "lpfc.h" 41#include "lpfc.h"
41#include "lpfc_logmsg.h" 42#include "lpfc_logmsg.h"
42#include "lpfc_crtn.h" 43#include "lpfc_crtn.h"
44#include "lpfc_vport.h"
43#include "lpfc_version.h" 45#include "lpfc_version.h"
46#include "lpfc_vport.h"
44 47
45static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 48static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
46static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 49static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
47static int lpfc_post_rcv_buf(struct lpfc_hba *); 50static int lpfc_post_rcv_buf(struct lpfc_hba *);
48 51
49static struct scsi_transport_template *lpfc_transport_template = NULL; 52static struct scsi_transport_template *lpfc_transport_template = NULL;
53static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
50static DEFINE_IDR(lpfc_hba_index); 54static DEFINE_IDR(lpfc_hba_index);
51 55
52int lpfc_sli_mode = 0;
53module_param(lpfc_sli_mode, int, 0);
54MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
55 " 0 - auto (SLI-3 if supported),"
56 " 2 - select SLI-2 even on SLI-3 capable HBAs,"
57 " 3 - select SLI-3");
58 56
59 57
60/************************************************************************/ 58/************************************************************************/
@@ -123,6 +121,8 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
123 sizeof(phba->wwpn)); 121 sizeof(phba->wwpn));
124 } 122 }
125 123
124 phba->sli3_options = 0x0;
125
126 /* Setup and issue mailbox READ REV command */ 126 /* Setup and issue mailbox READ REV command */
127 lpfc_read_rev(phba, pmb); 127 lpfc_read_rev(phba, pmb);
128 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 128 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
@@ -136,6 +136,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
136 return -ERESTART; 136 return -ERESTART;
137 } 137 }
138 138
139
139 /* 140 /*
140 * The value of rr must be 1 since the driver set the cv field to 1. 141 * The value of rr must be 1 since the driver set the cv field to 1.
141 * This setting requires the FW to set all revision fields. 142 * This setting requires the FW to set all revision fields.
@@ -155,6 +156,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
155 156
156 /* Save information as VPD data */ 157 /* Save information as VPD data */
157 vp->rev.rBit = 1; 158 vp->rev.rBit = 1;
159 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
158 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 160 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
159 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 161 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
160 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 162 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
@@ -170,6 +172,13 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
170 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 172 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
171 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 173 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
172 174
175 /* If the sli feature level is less then 9, we must
176 * tear down all RPIs and VPIs on link down if NPIV
177 * is enabled.
178 */
179 if (vp->rev.feaLevelHigh < 9)
180 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
181
173 if (lpfc_is_LC_HBA(phba->pcidev->device)) 182 if (lpfc_is_LC_HBA(phba->pcidev->device))
174 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 183 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
175 sizeof (phba->RandomData)); 184 sizeof (phba->RandomData));
@@ -197,7 +206,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
197 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 206 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
198 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 207 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
199 lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset, 208 lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset,
200 mb->un.varDmp.word_cnt); 209 mb->un.varDmp.word_cnt);
201 offset += mb->un.varDmp.word_cnt; 210 offset += mb->un.varDmp.word_cnt;
202 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 211 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
203 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 212 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
@@ -240,7 +249,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
240 mb = &pmb->mb; 249 mb = &pmb->mb;
241 250
242 /* Get login parameters for NID. */ 251 /* Get login parameters for NID. */
243 lpfc_read_sparam(phba, pmb); 252 lpfc_read_sparam(phba, pmb, 0);
244 pmb->vport = vport; 253 pmb->vport = vport;
245 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 254 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
246 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -431,10 +440,9 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
431 writel(0, phba->HCregaddr); 440 writel(0, phba->HCregaddr);
432 readl(phba->HCregaddr); /* flush */ 441 readl(phba->HCregaddr); /* flush */
433 442
434 /* Cleanup potential discovery resources */ 443 list_for_each_entry(vport, &phba->port_list, listentry) {
435 lpfc_els_flush_rscn(vport); 444 lpfc_cleanup_discovery_resources(vport);
436 lpfc_els_flush_cmd(vport); 445 }
437 lpfc_disc_flush_list(vport);
438 446
439 return 0; 447 return 0;
440} 448}
@@ -456,13 +464,17 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
456 struct lpfc_dmabuf *mp, *next_mp; 464 struct lpfc_dmabuf *mp, *next_mp;
457 int i; 465 int i;
458 466
459 /* Cleanup preposted buffers on the ELS ring */ 467 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
460 pring = &psli->ring[LPFC_ELS_RING]; 468 lpfc_sli_hbqbuf_free_all(phba);
461 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 469 else {
462 list_del(&mp->list); 470 /* Cleanup preposted buffers on the ELS ring */
463 pring->postbufq_cnt--; 471 pring = &psli->ring[LPFC_ELS_RING];
464 lpfc_mbuf_free(phba, mp->virt, mp->phys); 472 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
465 kfree(mp); 473 list_del(&mp->list);
474 pring->postbufq_cnt--;
475 lpfc_mbuf_free(phba, mp->virt, mp->phys);
476 kfree(mp);
477 }
466 } 478 }
467 479
468 for (i = 0; i < psli->num_rings; i++) { 480 for (i = 0; i < psli->num_rings; i++) {
@@ -485,10 +497,11 @@ void
485lpfc_handle_eratt(struct lpfc_hba *phba) 497lpfc_handle_eratt(struct lpfc_hba *phba)
486{ 498{
487 struct lpfc_vport *vport = phba->pport; 499 struct lpfc_vport *vport = phba->pport;
488 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
489 struct lpfc_sli *psli = &phba->sli; 500 struct lpfc_sli *psli = &phba->sli;
490 struct lpfc_sli_ring *pring; 501 struct lpfc_sli_ring *pring;
502 struct lpfc_vport *port_iterator;
491 uint32_t event_data; 503 uint32_t event_data;
504 struct Scsi_Host *shost;
492 505
493 /* If the pci channel is offline, ignore possible errors, 506 /* If the pci channel is offline, ignore possible errors,
494 * since we cannot communicate with the pci card anyway. */ 507 * since we cannot communicate with the pci card anyway. */
@@ -503,10 +516,17 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
503 "Data: x%x x%x x%x\n", 516 "Data: x%x x%x x%x\n",
504 phba->brd_no, phba->work_hs, 517 phba->brd_no, phba->work_hs,
505 phba->work_status[0], phba->work_status[1]); 518 phba->work_status[0], phba->work_status[1]);
506 spin_lock_irq(shost->host_lock); 519 list_for_each_entry(port_iterator, &phba->port_list,
507 vport->fc_flag |= FC_ESTABLISH_LINK; 520 listentry) {
521 shost = lpfc_shost_from_vport(port_iterator);
522
523 spin_lock_irq(shost->host_lock);
524 port_iterator->fc_flag |= FC_ESTABLISH_LINK;
525 spin_unlock_irq(shost->host_lock);
526 }
527 spin_lock_irq(&phba->hbalock);
508 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 528 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
509 spin_unlock_irq(shost->host_lock); 529 spin_unlock_irq(&phba->hbalock);
510 530
511 /* 531 /*
512 * Firmware stops when it triggled erratt with HS_FFER6. 532 * Firmware stops when it triggled erratt with HS_FFER6.
@@ -543,11 +563,14 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
543 phba->work_status[0], phba->work_status[1]); 563 phba->work_status[0], phba->work_status[1]);
544 564
545 event_data = FC_REG_DUMP_EVENT; 565 event_data = FC_REG_DUMP_EVENT;
566 shost = lpfc_shost_from_vport(vport);
546 fc_host_post_vendor_event(shost, fc_get_event_number(), 567 fc_host_post_vendor_event(shost, fc_get_event_number(),
547 sizeof(event_data), (char *) &event_data, 568 sizeof(event_data), (char *) &event_data,
548 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 569 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
549 570
571 spin_lock_irq(&phba->hbalock);
550 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 572 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
573 spin_unlock_irq(&phba->hbalock);
551 lpfc_offline_prep(phba); 574 lpfc_offline_prep(phba);
552 lpfc_offline(phba); 575 lpfc_offline(phba);
553 lpfc_unblock_mgmt_io(phba); 576 lpfc_unblock_mgmt_io(phba);
@@ -569,6 +592,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
569{ 592{
570 struct lpfc_vport *vport = phba->pport; 593 struct lpfc_vport *vport = phba->pport;
571 struct lpfc_sli *psli = &phba->sli; 594 struct lpfc_sli *psli = &phba->sli;
595 struct lpfc_vport *port_iterator;
572 LPFC_MBOXQ_t *pmb; 596 LPFC_MBOXQ_t *pmb;
573 volatile uint32_t control; 597 volatile uint32_t control;
574 struct lpfc_dmabuf *mp; 598 struct lpfc_dmabuf *mp;
@@ -589,7 +613,8 @@ lpfc_handle_latt(struct lpfc_hba *phba)
589 rc = -EIO; 613 rc = -EIO;
590 614
591 /* Cleanup any outstanding ELS commands */ 615 /* Cleanup any outstanding ELS commands */
592 lpfc_els_flush_cmd(vport); 616 list_for_each_entry(port_iterator, &phba->port_list, listentry)
617 lpfc_els_flush_cmd(port_iterator);
593 618
594 psli->slistat.link_event++; 619 psli->slistat.link_event++;
595 lpfc_read_la(phba, pmb, mp); 620 lpfc_read_la(phba, pmb, mp);
@@ -1023,9 +1048,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
1023 return cnt; 1048 return cnt;
1024 } 1049 }
1025 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1050 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1026 if (mp2) { 1051 if (mp2)
1027 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1052 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1028 }
1029 } 1053 }
1030 pring->missbufcnt = 0; 1054 pring->missbufcnt = 0;
1031 return 0; 1055 return 0;
@@ -1175,34 +1199,45 @@ lpfc_cleanup(struct lpfc_vport *vport)
1175static void 1199static void
1176lpfc_establish_link_tmo(unsigned long ptr) 1200lpfc_establish_link_tmo(unsigned long ptr)
1177{ 1201{
1178 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 1202 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
1179 struct lpfc_vport *vport = phba->pport; 1203 struct lpfc_vport *vport = phba->pport;
1180 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1181 unsigned long iflag; 1204 unsigned long iflag;
1182 1205
1183
1184 /* Re-establishing Link, timer expired */ 1206 /* Re-establishing Link, timer expired */
1185 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1207 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1186 "%d:1300 Re-establishing Link, timer expired " 1208 "%d:1300 Re-establishing Link, timer expired "
1187 "Data: x%x x%x\n", 1209 "Data: x%x x%x\n",
1188 phba->brd_no, vport->fc_flag, 1210 phba->brd_no, vport->fc_flag,
1189 vport->port_state); 1211 vport->port_state);
1190 spin_lock_irqsave(shost->host_lock, iflag); 1212 list_for_each_entry(vport, &phba->port_list, listentry) {
1191 vport->fc_flag &= ~FC_ESTABLISH_LINK; 1213 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1192 spin_unlock_irqrestore(shost->host_lock, iflag); 1214
1215 spin_lock_irqsave(shost->host_lock, iflag);
1216 vport->fc_flag &= ~FC_ESTABLISH_LINK;
1217 spin_unlock_irqrestore(shost->host_lock, iflag);
1218 }
1219}
1220
1221void
1222lpfc_stop_vport_timers(struct lpfc_vport *vport)
1223{
1224 del_timer_sync(&vport->els_tmofunc);
1225 del_timer_sync(&vport->fc_fdmitmo);
1226 lpfc_can_disctmo(vport);
1227 return;
1193} 1228}
1194 1229
1195static void 1230static void
1196lpfc_stop_timer(struct lpfc_hba *phba) 1231lpfc_stop_phba_timers(struct lpfc_hba *phba)
1197{ 1232{
1198 struct lpfc_vport *vport = phba->pport; 1233 struct lpfc_vport *vport;
1199 1234
1200 del_timer_sync(&phba->fcp_poll_timer); 1235 del_timer_sync(&phba->fcp_poll_timer);
1201 del_timer_sync(&phba->fc_estabtmo); 1236 del_timer_sync(&phba->fc_estabtmo);
1202 del_timer_sync(&vport->els_tmofunc); 1237 list_for_each_entry(vport, &phba->port_list, listentry)
1203 del_timer_sync(&vport->fc_fdmitmo); 1238 lpfc_stop_vport_timers(vport);
1204 del_timer_sync(&vport->fc_disctmo);
1205 del_timer_sync(&phba->sli.mbox_tmo); 1239 del_timer_sync(&phba->sli.mbox_tmo);
1240 del_timer_sync(&phba->fabric_block_timer);
1206 return; 1241 return;
1207} 1242}
1208 1243
@@ -1210,7 +1245,6 @@ int
1210lpfc_online(struct lpfc_hba *phba) 1245lpfc_online(struct lpfc_hba *phba)
1211{ 1246{
1212 struct lpfc_vport *vport = phba->pport; 1247 struct lpfc_vport *vport = phba->pport;
1213 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1214 1248
1215 if (!phba) 1249 if (!phba)
1216 return 0; 1250 return 0;
@@ -1234,9 +1268,14 @@ lpfc_online(struct lpfc_hba *phba)
1234 return 1; 1268 return 1;
1235 } 1269 }
1236 1270
1237 spin_lock_irq(shost->host_lock); 1271 list_for_each_entry(vport, &phba->port_list, listentry) {
1238 vport->fc_flag &= ~FC_OFFLINE_MODE; 1272 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1239 spin_unlock_irq(shost->host_lock); 1273 spin_lock_irq(shost->host_lock);
1274 vport->fc_flag &= ~FC_OFFLINE_MODE;
1275 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
1276 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
1277 spin_unlock_irq(shost->host_lock);
1278 }
1240 1279
1241 lpfc_unblock_mgmt_io(phba); 1280 lpfc_unblock_mgmt_io(phba);
1242 return 0; 1281 return 0;
@@ -1288,31 +1327,37 @@ lpfc_offline(struct lpfc_hba *phba)
1288{ 1327{
1289 struct lpfc_vport *vport = phba->pport; 1328 struct lpfc_vport *vport = phba->pport;
1290 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1329 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1291 unsigned long iflag; 1330 struct lpfc_vport *port_iterator;
1292 1331
1293 if (vport->fc_flag & FC_OFFLINE_MODE) 1332 if (vport->fc_flag & FC_OFFLINE_MODE)
1294 return; 1333 return;
1295 1334
1296 /* stop all timers associated with this hba */ 1335 /* stop all timers associated with this hba */
1297 lpfc_stop_timer(phba); 1336 lpfc_stop_phba_timers(phba);
1337 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
1338 port_iterator->work_port_events = 0;
1339 }
1298 1340
1299 lpfc_printf_log(phba, 1341 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1300 KERN_WARNING,
1301 LOG_INIT,
1302 "%d:0460 Bring Adapter offline\n", 1342 "%d:0460 Bring Adapter offline\n",
1303 phba->brd_no); 1343 phba->brd_no);
1304 1344
1305 /* Bring down the SLI Layer and cleanup. The HBA is offline 1345 /* Bring down the SLI Layer and cleanup. The HBA is offline
1306 now. */ 1346 now. */
1307 lpfc_sli_hba_down(phba); 1347 lpfc_sli_hba_down(phba);
1308 lpfc_cleanup(vport); 1348 spin_lock_irq(&phba->hbalock);
1309 spin_lock_irqsave(shost->host_lock, iflag);
1310 spin_lock(&phba->hbalock);
1311 phba->work_ha = 0; 1349 phba->work_ha = 0;
1312 vport->work_port_events = 0;
1313 vport->fc_flag |= FC_OFFLINE_MODE; 1350 vport->fc_flag |= FC_OFFLINE_MODE;
1314 spin_unlock(&phba->hbalock); 1351 spin_unlock_irq(&phba->hbalock);
1315 spin_unlock_irqrestore(shost->host_lock, iflag); 1352 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
1353 shost = lpfc_shost_from_vport(port_iterator);
1354
1355 lpfc_cleanup(port_iterator);
1356 spin_lock_irq(shost->host_lock);
1357 vport->work_port_events = 0;
1358 vport->fc_flag |= FC_OFFLINE_MODE;
1359 spin_unlock_irq(shost->host_lock);
1360 }
1316} 1361}
1317 1362
1318/****************************************************************************** 1363/******************************************************************************
@@ -1332,7 +1377,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
1332 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 1377 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
1333 list_del(&sb->list); 1378 list_del(&sb->list);
1334 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 1379 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
1335 sb->dma_handle); 1380 sb->dma_handle);
1336 kfree(sb); 1381 kfree(sb);
1337 phba->total_scsi_bufs--; 1382 phba->total_scsi_bufs--;
1338 } 1383 }
@@ -1349,8 +1394,9 @@ lpfc_scsi_free(struct lpfc_hba *phba)
1349 return 0; 1394 return 0;
1350} 1395}
1351 1396
1397
1352struct lpfc_vport * 1398struct lpfc_vport *
1353lpfc_create_port(struct lpfc_hba *phba, int instance) 1399lpfc_create_port(struct lpfc_hba *phba, int instance, struct fc_vport *fc_vport)
1354{ 1400{
1355 struct lpfc_vport *vport; 1401 struct lpfc_vport *vport;
1356 struct Scsi_Host *shost; 1402 struct Scsi_Host *shost;
@@ -1364,6 +1410,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance)
1364 vport->phba = phba; 1410 vport->phba = phba;
1365 1411
1366 vport->load_flag |= FC_LOADING; 1412 vport->load_flag |= FC_LOADING;
1413 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
1367 1414
1368 shost->unique_id = instance; 1415 shost->unique_id = instance;
1369 shost->max_id = LPFC_MAX_TARGET; 1416 shost->max_id = LPFC_MAX_TARGET;
@@ -1376,7 +1423,13 @@ lpfc_create_port(struct lpfc_hba *phba, int instance)
1376 * max xri value determined in hba setup. 1423 * max xri value determined in hba setup.
1377 */ 1424 */
1378 shost->can_queue = phba->cfg_hba_queue_depth - 10; 1425 shost->can_queue = phba->cfg_hba_queue_depth - 10;
1379 shost->transportt = lpfc_transport_template; 1426 if (fc_vport != NULL) {
1427 shost->transportt = lpfc_vport_transport_template;
1428 vport->port_type = LPFC_NPIV_PORT;
1429 } else {
1430 shost->transportt = lpfc_transport_template;
1431 vport->port_type = LPFC_PHYSICAL_PORT;
1432 }
1380 1433
1381 /* Initialize all internally managed lists. */ 1434 /* Initialize all internally managed lists. */
1382 INIT_LIST_HEAD(&vport->fc_nodes); 1435 INIT_LIST_HEAD(&vport->fc_nodes);
@@ -1384,22 +1437,28 @@ lpfc_create_port(struct lpfc_hba *phba, int instance)
1384 1437
1385 init_timer(&vport->fc_disctmo); 1438 init_timer(&vport->fc_disctmo);
1386 vport->fc_disctmo.function = lpfc_disc_timeout; 1439 vport->fc_disctmo.function = lpfc_disc_timeout;
1387 vport->fc_disctmo.data = (unsigned long) vport; 1440 vport->fc_disctmo.data = (unsigned long)vport;
1388 1441
1389 init_timer(&vport->fc_fdmitmo); 1442 init_timer(&vport->fc_fdmitmo);
1390 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 1443 vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
1391 vport->fc_fdmitmo.data = (unsigned long) vport; 1444 vport->fc_fdmitmo.data = (unsigned long)vport;
1392 1445
1393 init_timer(&vport->els_tmofunc); 1446 init_timer(&vport->els_tmofunc);
1394 vport->els_tmofunc.function = lpfc_els_timeout; 1447 vport->els_tmofunc.function = lpfc_els_timeout;
1395 vport->els_tmofunc.data = (unsigned long) vport; 1448 vport->els_tmofunc.data = (unsigned long)vport;
1396 1449
1397 error = scsi_add_host(shost, &phba->pcidev->dev); 1450 if (fc_vport != NULL) {
1451 error = scsi_add_host(shost, &fc_vport->dev);
1452 } else {
1453 error = scsi_add_host(shost, &phba->pcidev->dev);
1454 }
1398 if (error) 1455 if (error)
1399 goto out_put_shost; 1456 goto out_put_shost;
1400 1457
1458 if (!shost->shost_classdev.kobj.dentry)
1459 goto out_put_shost;
1460
1401 list_add_tail(&vport->listentry, &phba->port_list); 1461 list_add_tail(&vport->listentry, &phba->port_list);
1402 scsi_scan_host(shost);
1403 return vport; 1462 return vport;
1404 1463
1405out_put_shost: 1464out_put_shost:
@@ -1411,19 +1470,40 @@ out:
1411void 1470void
1412destroy_port(struct lpfc_vport *vport) 1471destroy_port(struct lpfc_vport *vport)
1413{ 1472{
1414 lpfc_cleanup(vport); 1473 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1415 list_del(&vport->listentry); 1474 struct lpfc_hba *phba = vport->phba;
1475
1476 kfree(vport->vname);
1416 lpfc_free_sysfs_attr(vport); 1477 lpfc_free_sysfs_attr(vport);
1417 fc_remove_host(lpfc_shost_from_vport(vport)); 1478
1418 scsi_remove_host(lpfc_shost_from_vport(vport)); 1479 fc_remove_host(shost);
1480 scsi_remove_host(shost);
1481
1482 spin_lock_irq(&phba->hbalock);
1483 list_del_init(&vport->listentry);
1484 spin_unlock_irq(&phba->hbalock);
1485
1486 lpfc_cleanup(vport);
1419 return; 1487 return;
1420} 1488}
1421 1489
1490int
1491lpfc_get_instance(void)
1492{
1493 int instance = 0;
1494
1495 /* Assign an unused number */
1496 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
1497 return -1;
1498 if (idr_get_new(&lpfc_hba_index, NULL, &instance))
1499 return -1;
1500 return instance;
1501}
1502
1422static void 1503static void
1423lpfc_remove_device(struct lpfc_vport *vport) 1504lpfc_remove_device(struct lpfc_vport *vport)
1424{ 1505{
1425 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1506 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1426 struct lpfc_hba *phba = vport->phba;
1427 1507
1428 lpfc_free_sysfs_attr(vport); 1508 lpfc_free_sysfs_attr(vport);
1429 1509
@@ -1433,8 +1513,6 @@ lpfc_remove_device(struct lpfc_vport *vport)
1433 1513
1434 fc_remove_host(shost); 1514 fc_remove_host(shost);
1435 scsi_remove_host(shost); 1515 scsi_remove_host(shost);
1436
1437 kthread_stop(phba->worker_thread);
1438} 1516}
1439 1517
1440void lpfc_scan_start(struct Scsi_Host *shost) 1518void lpfc_scan_start(struct Scsi_Host *shost)
@@ -1442,7 +1520,7 @@ void lpfc_scan_start(struct Scsi_Host *shost)
1442 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1520 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1443 struct lpfc_hba *phba = vport->phba; 1521 struct lpfc_hba *phba = vport->phba;
1444 1522
1445 if (lpfc_alloc_sysfs_attr(vport)) 1523 if (lpfc_sli_hba_setup(phba))
1446 goto error; 1524 goto error;
1447 1525
1448 /* 1526 /*
@@ -1486,6 +1564,14 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
1486 return 0; 1564 return 0;
1487 1565
1488finished: 1566finished:
1567 lpfc_host_attrib_init(shost);
1568 return 1;
1569}
1570
1571void lpfc_host_attrib_init(struct Scsi_Host *shost)
1572{
1573 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1574 struct lpfc_hba *phba = vport->phba;
1489 /* 1575 /*
1490 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 1576 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
1491 */ 1577 */
@@ -1499,7 +1585,8 @@ finished:
1499 fc_host_supported_fc4s(shost)[2] = 1; 1585 fc_host_supported_fc4s(shost)[2] = 1;
1500 fc_host_supported_fc4s(shost)[7] = 1; 1586 fc_host_supported_fc4s(shost)[7] = 1;
1501 1587
1502 lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost)); 1588 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
1589 sizeof fc_host_symbolic_name(shost));
1503 1590
1504 fc_host_supported_speeds(shost) = 0; 1591 fc_host_supported_speeds(shost) = 0;
1505 if (phba->lmt & LMT_10Gb) 1592 if (phba->lmt & LMT_10Gb)
@@ -1521,11 +1608,10 @@ finished:
1521 fc_host_active_fc4s(shost)[2] = 1; 1608 fc_host_active_fc4s(shost)[2] = 1;
1522 fc_host_active_fc4s(shost)[7] = 1; 1609 fc_host_active_fc4s(shost)[7] = 1;
1523 1610
1611 fc_host_max_npiv_vports(shost) = phba->max_vpi;
1524 spin_lock_irq(shost->host_lock); 1612 spin_lock_irq(shost->host_lock);
1525 vport->fc_flag &= ~FC_LOADING; 1613 vport->fc_flag &= ~FC_LOADING;
1526 spin_unlock_irq(shost->host_lock); 1614 spin_unlock_irq(shost->host_lock);
1527
1528 return 1;
1529} 1615}
1530 1616
1531static int __devinit 1617static int __devinit
@@ -1555,20 +1641,17 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1555 phba->pcidev = pdev; 1641 phba->pcidev = pdev;
1556 1642
1557 /* Assign an unused board number */ 1643 /* Assign an unused board number */
1558 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 1644 if ((phba->brd_no = lpfc_get_instance()) < 0)
1559 goto out_free_phba;
1560
1561 error = idr_get_new(&lpfc_hba_index, NULL, &phba->brd_no);
1562 if (error)
1563 goto out_free_phba; 1645 goto out_free_phba;
1564 1646
1565 INIT_LIST_HEAD(&phba->port_list); 1647 INIT_LIST_HEAD(&phba->port_list);
1566 1648 INIT_LIST_HEAD(&phba->hbq_buffer_list);
1567 /* 1649 /*
1568 * Get all the module params for configuring this host and then 1650 * Get all the module params for configuring this host and then
1569 * establish the host. 1651 * establish the host.
1570 */ 1652 */
1571 lpfc_get_cfgparam(phba); 1653 lpfc_get_cfgparam(phba);
1654 phba->max_vpi = LPFC_MAX_VPI;
1572 1655
1573 /* Initialize timers used by driver */ 1656 /* Initialize timers used by driver */
1574 init_timer(&phba->fc_estabtmo); 1657 init_timer(&phba->fc_estabtmo);
@@ -1581,6 +1664,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1581 init_timer(&phba->fcp_poll_timer); 1664 init_timer(&phba->fcp_poll_timer);
1582 phba->fcp_poll_timer.function = lpfc_poll_timeout; 1665 phba->fcp_poll_timer.function = lpfc_poll_timeout;
1583 phba->fcp_poll_timer.data = (unsigned long) phba; 1666 phba->fcp_poll_timer.data = (unsigned long) phba;
1667 init_timer(&phba->fabric_block_timer);
1668 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
1669 phba->fabric_block_timer.data = (unsigned long) phba;
1584 1670
1585 pci_set_master(pdev); 1671 pci_set_master(pdev);
1586 retval = pci_set_mwi(pdev); 1672 retval = pci_set_mwi(pdev);
@@ -1696,15 +1782,17 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1696 spin_lock_init(&phba->scsi_buf_list_lock); 1782 spin_lock_init(&phba->scsi_buf_list_lock);
1697 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 1783 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
1698 1784
1699 vport = lpfc_create_port(phba, phba->brd_no); 1785 /* Initialize list of fabric iocbs */
1786 INIT_LIST_HEAD(&phba->fabric_iocb_list);
1787
1788 vport = lpfc_create_port(phba, phba->brd_no, NULL);
1700 if (!vport) 1789 if (!vport)
1701 goto out_kthread_stop; 1790 goto out_kthread_stop;
1702 1791
1703 shost = lpfc_shost_from_vport(vport); 1792 shost = lpfc_shost_from_vport(vport);
1704 vport->port_type = LPFC_PHYSICAL_PORT;
1705 phba->pport = vport; 1793 phba->pport = vport;
1706 1794
1707 pci_set_drvdata(pdev, lpfc_shost_from_vport(vport)); 1795 pci_set_drvdata(pdev, shost);
1708 1796
1709 if (phba->cfg_use_msi) { 1797 if (phba->cfg_use_msi) {
1710 error = pci_enable_msi(phba->pcidev); 1798 error = pci_enable_msi(phba->pcidev);
@@ -1720,7 +1808,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1720 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1808 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1721 "%d:0451 Enable interrupt handler failed\n", 1809 "%d:0451 Enable interrupt handler failed\n",
1722 phba->brd_no); 1810 phba->brd_no);
1723 goto out_destroy_port; 1811 goto out_disable_msi;
1724 } 1812 }
1725 1813
1726 phba->MBslimaddr = phba->slim_memmap_p; 1814 phba->MBslimaddr = phba->slim_memmap_p;
@@ -1729,10 +1817,10 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1729 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 1817 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
1730 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 1818 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
1731 1819
1732 error = lpfc_sli_hba_setup(phba); 1820 if (lpfc_alloc_sysfs_attr(vport))
1733 if (error)
1734 goto out_free_irq; 1821 goto out_free_irq;
1735 1822
1823 scsi_scan_host(shost);
1736 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1824 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1737 spin_lock_irq(shost->host_lock); 1825 spin_lock_irq(shost->host_lock);
1738 lpfc_poll_start_timer(phba); 1826 lpfc_poll_start_timer(phba);
@@ -1742,11 +1830,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1742 return 0; 1830 return 0;
1743 1831
1744out_free_irq: 1832out_free_irq:
1745 lpfc_stop_timer(phba); 1833 lpfc_stop_phba_timers(phba);
1746 phba->pport->work_port_events = 0; 1834 phba->pport->work_port_events = 0;
1747 free_irq(phba->pcidev->irq, phba); 1835 free_irq(phba->pcidev->irq, phba);
1836out_disable_msi:
1748 pci_disable_msi(phba->pcidev); 1837 pci_disable_msi(phba->pcidev);
1749out_destroy_port:
1750 destroy_port(vport); 1838 destroy_port(vport);
1751out_kthread_stop: 1839out_kthread_stop:
1752 kthread_stop(phba->worker_thread); 1840 kthread_stop(phba->worker_thread);
@@ -1786,9 +1874,9 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
1786 struct Scsi_Host *shost = pci_get_drvdata(pdev); 1874 struct Scsi_Host *shost = pci_get_drvdata(pdev);
1787 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1875 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1788 struct lpfc_hba *phba = vport->phba; 1876 struct lpfc_hba *phba = vport->phba;
1789 1877 struct lpfc_vport *port_iterator;
1790 vport->load_flag |= FC_UNLOADING; 1878 list_for_each_entry(port_iterator, &phba->port_list, listentry)
1791 lpfc_remove_device(vport); 1879 port_iterator->load_flag |= FC_UNLOADING;
1792 1880
1793 /* 1881 /*
1794 * Bring down the SLI Layer. This step disable all interrupts, 1882 * Bring down the SLI Layer. This step disable all interrupts,
@@ -1798,7 +1886,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
1798 lpfc_sli_hba_down(phba); 1886 lpfc_sli_hba_down(phba);
1799 lpfc_sli_brdrestart(phba); 1887 lpfc_sli_brdrestart(phba);
1800 1888
1801 lpfc_stop_timer(phba); 1889 lpfc_stop_phba_timers(phba);
1802 1890
1803 kthread_stop(phba->worker_thread); 1891 kthread_stop(phba->worker_thread);
1804 1892
@@ -1806,7 +1894,6 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
1806 free_irq(phba->pcidev->irq, phba); 1894 free_irq(phba->pcidev->irq, phba);
1807 pci_disable_msi(phba->pcidev); 1895 pci_disable_msi(phba->pcidev);
1808 1896
1809 vport->work_port_events = 0;
1810 destroy_port(vport); 1897 destroy_port(vport);
1811 1898
1812 pci_set_drvdata(pdev, NULL); 1899 pci_set_drvdata(pdev, NULL);
@@ -1892,13 +1979,14 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
1892 pci_set_master(pdev); 1979 pci_set_master(pdev);
1893 1980
1894 /* Re-establishing Link */ 1981 /* Re-establishing Link */
1895 spin_lock_irq(&phba->hbalock);
1896 phba->pport->fc_flag |= FC_ESTABLISH_LINK;
1897 spin_unlock_irq(&phba->hbalock);
1898 spin_lock_irq(host->host_lock); 1982 spin_lock_irq(host->host_lock);
1899 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1983 phba->pport->fc_flag |= FC_ESTABLISH_LINK;
1900 spin_unlock_irq(host->host_lock); 1984 spin_unlock_irq(host->host_lock);
1901 1985
1986 spin_lock_irq(&phba->hbalock);
1987 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
1988 spin_unlock_irq(&phba->hbalock);
1989
1902 1990
1903 /* Take device offline; this will perform cleanup */ 1991 /* Take device offline; this will perform cleanup */
1904 lpfc_offline(phba); 1992 lpfc_offline(phba);
@@ -2020,11 +2108,15 @@ lpfc_init(void)
2020 2108
2021 lpfc_transport_template = 2109 lpfc_transport_template =
2022 fc_attach_transport(&lpfc_transport_functions); 2110 fc_attach_transport(&lpfc_transport_functions);
2023 if (!lpfc_transport_template) 2111 lpfc_vport_transport_template =
2112 fc_attach_transport(&lpfc_vport_transport_functions);
2113 if (!lpfc_transport_template || !lpfc_vport_transport_template)
2024 return -ENOMEM; 2114 return -ENOMEM;
2025 error = pci_register_driver(&lpfc_driver); 2115 error = pci_register_driver(&lpfc_driver);
2026 if (error) 2116 if (error) {
2027 fc_release_transport(lpfc_transport_template); 2117 fc_release_transport(lpfc_transport_template);
2118 fc_release_transport(lpfc_vport_transport_template);
2119 }
2028 2120
2029 return error; 2121 return error;
2030} 2122}
@@ -2034,6 +2126,7 @@ lpfc_exit(void)
2034{ 2126{
2035 pci_unregister_driver(&lpfc_driver); 2127 pci_unregister_driver(&lpfc_driver);
2036 fc_release_transport(lpfc_transport_template); 2128 fc_release_transport(lpfc_transport_template);
2129 fc_release_transport(lpfc_vport_transport_template);
2037} 2130}
2038 2131
2039module_init(lpfc_init); 2132module_init(lpfc_init);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 438cbcd9eb13..8a6ceffeabcf 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -30,6 +30,7 @@
30#define LOG_SLI 0x800 /* SLI events */ 30#define LOG_SLI 0x800 /* SLI events */
31#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ 31#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
32#define LOG_LIBDFC 0x2000 /* Libdfc events */ 32#define LOG_LIBDFC 0x2000 /* Libdfc events */
33#define LOG_VPORT 0x4000 /* NPIV events */
33#define LOG_ALL_MSG 0xffff /* LOG all messages */ 34#define LOG_ALL_MSG 0xffff /* LOG all messages */
34 35
35#define lpfc_printf_log(phba, level, mask, fmt, arg...) \ 36#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 977799c2b2c2..277eb6132e81 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -106,7 +106,7 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
106 */ 106 */
107 pmb->context1 = (uint8_t *) mp; 107 pmb->context1 = (uint8_t *) mp;
108 mb->mbxOwner = OWN_HOST; 108 mb->mbxOwner = OWN_HOST;
109 return 0; 109 return (0);
110} 110}
111 111
112/**********************************************/ 112/**********************************************/
@@ -209,7 +209,7 @@ lpfc_init_link(struct lpfc_hba * phba,
209 */ 209 */
210 vpd = &phba->vpd; 210 vpd = &phba->vpd;
211 if (vpd->rev.feaLevelHigh >= 0x02){ 211 if (vpd->rev.feaLevelHigh >= 0x02){
212 switch (linkspeed){ 212 switch(linkspeed){
213 case LINK_SPEED_1G: 213 case LINK_SPEED_1G:
214 case LINK_SPEED_2G: 214 case LINK_SPEED_2G:
215 case LINK_SPEED_4G: 215 case LINK_SPEED_4G:
@@ -232,7 +232,6 @@ lpfc_init_link(struct lpfc_hba * phba,
232 mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK; 232 mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
233 mb->mbxOwner = OWN_HOST; 233 mb->mbxOwner = OWN_HOST;
234 mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA; 234 mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
235 mb->un.varInitLnk.link_flags |= FLAGS_UNREG_LOGIN_ALL;
236 return; 235 return;
237} 236}
238 237
@@ -241,7 +240,7 @@ lpfc_init_link(struct lpfc_hba * phba,
241/* mailbox command */ 240/* mailbox command */
242/**********************************************/ 241/**********************************************/
243int 242int
244lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 243lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
245{ 244{
246 struct lpfc_dmabuf *mp; 245 struct lpfc_dmabuf *mp;
247 MAILBOX_t *mb; 246 MAILBOX_t *mb;
@@ -265,18 +264,19 @@ lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
265 LOG_MBOX, 264 LOG_MBOX,
266 "%d:0301 READ_SPARAM: no buffers\n", 265 "%d:0301 READ_SPARAM: no buffers\n",
267 phba->brd_no); 266 phba->brd_no);
268 return 1; 267 return (1);
269 } 268 }
270 INIT_LIST_HEAD(&mp->list); 269 INIT_LIST_HEAD(&mp->list);
271 mb->mbxCommand = MBX_READ_SPARM64; 270 mb->mbxCommand = MBX_READ_SPARM64;
272 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); 271 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
273 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); 272 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
274 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); 273 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
274 mb->un.varRdSparm.vpi = vpi;
275 275
276 /* save address for completion */ 276 /* save address for completion */
277 pmb->context1 = mp; 277 pmb->context1 = mp;
278 278
279 return 0; 279 return (0);
280} 280}
281 281
282/********************************************/ 282/********************************************/
@@ -284,7 +284,8 @@ lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
284/* mailbox command */ 284/* mailbox command */
285/********************************************/ 285/********************************************/
286void 286void
287lpfc_unreg_did(struct lpfc_hba *phba, uint32_t did, LPFC_MBOXQ_t *pmb) 287lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
288 LPFC_MBOXQ_t * pmb)
288{ 289{
289 MAILBOX_t *mb; 290 MAILBOX_t *mb;
290 291
@@ -292,6 +293,7 @@ lpfc_unreg_did(struct lpfc_hba *phba, uint32_t did, LPFC_MBOXQ_t *pmb)
292 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 293 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
293 294
294 mb->un.varUnregDID.did = did; 295 mb->un.varUnregDID.did = did;
296 mb->un.varUnregDID.vpi = vpi;
295 297
296 mb->mbxCommand = MBX_UNREG_D_ID; 298 mb->mbxCommand = MBX_UNREG_D_ID;
297 mb->mbxOwner = OWN_HOST; 299 mb->mbxOwner = OWN_HOST;
@@ -337,8 +339,8 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
337/* mailbox command */ 339/* mailbox command */
338/********************************************/ 340/********************************************/
339int 341int
340lpfc_reg_login(struct lpfc_hba *phba, uint32_t did, uint8_t *param, 342lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
341 LPFC_MBOXQ_t *pmb, uint32_t flag) 343 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
342{ 344{
343 MAILBOX_t *mb = &pmb->mb; 345 MAILBOX_t *mb = &pmb->mb;
344 uint8_t *sparam; 346 uint8_t *sparam;
@@ -347,6 +349,7 @@ lpfc_reg_login(struct lpfc_hba *phba, uint32_t did, uint8_t *param,
347 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 349 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
348 350
349 mb->un.varRegLogin.rpi = 0; 351 mb->un.varRegLogin.rpi = 0;
352 mb->un.varRegLogin.vpi = vpi;
350 mb->un.varRegLogin.did = did; 353 mb->un.varRegLogin.did = did;
351 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */ 354 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
352 355
@@ -358,13 +361,11 @@ lpfc_reg_login(struct lpfc_hba *phba, uint32_t did, uint8_t *param,
358 kfree(mp); 361 kfree(mp);
359 mb->mbxCommand = MBX_REG_LOGIN64; 362 mb->mbxCommand = MBX_REG_LOGIN64;
360 /* REG_LOGIN: no buffers */ 363 /* REG_LOGIN: no buffers */
361 lpfc_printf_log(phba, 364 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
362 KERN_WARNING, 365 "%d (%d):0302 REG_LOGIN: no buffers, DID x%x, "
363 LOG_MBOX, 366 "flag x%x\n",
364 "%d:0302 REG_LOGIN: no buffers Data x%x x%x\n", 367 phba->brd_no, vpi, did, flag);
365 phba->brd_no, 368 return (1);
366 (uint32_t) did, (uint32_t) flag);
367 return 1;
368 } 369 }
369 INIT_LIST_HEAD(&mp->list); 370 INIT_LIST_HEAD(&mp->list);
370 sparam = mp->virt; 371 sparam = mp->virt;
@@ -380,7 +381,7 @@ lpfc_reg_login(struct lpfc_hba *phba, uint32_t did, uint8_t *param,
380 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys); 381 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
381 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys); 382 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
382 383
383 return 0; 384 return (0);
384} 385}
385 386
386/**********************************************/ 387/**********************************************/
@@ -388,7 +389,8 @@ lpfc_reg_login(struct lpfc_hba *phba, uint32_t did, uint8_t *param,
388/* mailbox command */ 389/* mailbox command */
389/**********************************************/ 390/**********************************************/
390void 391void
391lpfc_unreg_login(struct lpfc_hba *phba, uint32_t rpi, LPFC_MBOXQ_t * pmb) 392lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
393 LPFC_MBOXQ_t * pmb)
392{ 394{
393 MAILBOX_t *mb; 395 MAILBOX_t *mb;
394 396
@@ -397,12 +399,52 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint32_t rpi, LPFC_MBOXQ_t * pmb)
397 399
398 mb->un.varUnregLogin.rpi = (uint16_t) rpi; 400 mb->un.varUnregLogin.rpi = (uint16_t) rpi;
399 mb->un.varUnregLogin.rsvd1 = 0; 401 mb->un.varUnregLogin.rsvd1 = 0;
402 mb->un.varUnregLogin.vpi = vpi;
400 403
401 mb->mbxCommand = MBX_UNREG_LOGIN; 404 mb->mbxCommand = MBX_UNREG_LOGIN;
402 mb->mbxOwner = OWN_HOST; 405 mb->mbxOwner = OWN_HOST;
403 return; 406 return;
404} 407}
405 408
409/**************************************************/
410/* lpfc_reg_vpi Issue a REG_VPI */
411/* mailbox command */
412/**************************************************/
413void
414lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
415 LPFC_MBOXQ_t *pmb)
416{
417 MAILBOX_t *mb = &pmb->mb;
418
419 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
420
421 mb->un.varRegVpi.vpi = vpi;
422 mb->un.varRegVpi.sid = sid;
423
424 mb->mbxCommand = MBX_REG_VPI;
425 mb->mbxOwner = OWN_HOST;
426 return;
427
428}
429
430/**************************************************/
431/* lpfc_unreg_vpi Issue a UNREG_VNPI */
432/* mailbox command */
433/**************************************************/
434void
435lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
436{
437 MAILBOX_t *mb = &pmb->mb;
438 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
439
440 mb->un.varUnregVpi.vpi = vpi;
441
442 mb->mbxCommand = MBX_UNREG_VPI;
443 mb->mbxOwner = OWN_HOST;
444 return;
445
446}
447
406static void 448static void
407lpfc_config_pcb_setup(struct lpfc_hba * phba) 449lpfc_config_pcb_setup(struct lpfc_hba * phba)
408{ 450{
@@ -420,9 +462,9 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
420 pring = &psli->ring[i]; 462 pring = &psli->ring[i];
421 463
422 pring->sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE: 464 pring->sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE:
423 SLI2_IOCB_CMD_SIZE; 465 SLI2_IOCB_CMD_SIZE;
424 pring->sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE: 466 pring->sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE:
425 SLI2_IOCB_RSP_SIZE; 467 SLI2_IOCB_RSP_SIZE;
426 /* A ring MUST have both cmd and rsp entries defined to be 468 /* A ring MUST have both cmd and rsp entries defined to be
427 valid */ 469 valid */
428 if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) { 470 if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) {
@@ -437,18 +479,18 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
437 continue; 479 continue;
438 } 480 }
439 /* Command ring setup for ring */ 481 /* Command ring setup for ring */
440 pring->cmdringaddr = (void *)&phba->slim2p->IOCBs[iocbCnt]; 482 pring->cmdringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt];
441 pcbp->rdsc[i].cmdEntries = pring->numCiocb; 483 pcbp->rdsc[i].cmdEntries = pring->numCiocb;
442 484
443 offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] - 485 offset = (uint8_t *) &phba->slim2p->IOCBs[iocbCnt] -
444 (uint8_t *)phba->slim2p; 486 (uint8_t *) phba->slim2p;
445 pdma_addr = phba->slim2p_mapping + offset; 487 pdma_addr = phba->slim2p_mapping + offset;
446 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr); 488 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
447 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr); 489 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
448 iocbCnt += pring->numCiocb; 490 iocbCnt += pring->numCiocb;
449 491
450 /* Response ring setup for ring */ 492 /* Response ring setup for ring */
451 pring->rspringaddr = (void *)&phba->slim2p->IOCBs[iocbCnt]; 493 pring->rspringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt];
452 494
453 pcbp->rdsc[i].rspEntries = pring->numRiocb; 495 pcbp->rdsc[i].rspEntries = pring->numRiocb;
454 offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] - 496 offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
@@ -519,7 +561,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, struct lpfc_hbq_init *hbq_desc,
519 * Notification */ 561 * Notification */
520 hbqmb->numMask = hbq_desc->mask_count; /* # R_CTL/TYPE masks 562 hbqmb->numMask = hbq_desc->mask_count; /* # R_CTL/TYPE masks
521 * # in words 0-19 */ 563 * # in words 0-19 */
522 hbqmb->profile = hbq_desc->profile; /* Selection profile: 564 hbqmb->profile = hbq_desc->profile; /* Selection profile:
523 * 0 = all, 565 * 0 = all,
524 * 7 = logentry */ 566 * 7 = logentry */
525 hbqmb->ringMask = hbq_desc->ring_mask; /* Binds HBQ to a ring 567 hbqmb->ringMask = hbq_desc->ring_mask; /* Binds HBQ to a ring
@@ -538,9 +580,9 @@ lpfc_config_hbq(struct lpfc_hba *phba, struct lpfc_hbq_init *hbq_desc,
538 mb->mbxCommand = MBX_CONFIG_HBQ; 580 mb->mbxCommand = MBX_CONFIG_HBQ;
539 mb->mbxOwner = OWN_HOST; 581 mb->mbxOwner = OWN_HOST;
540 582
541 /* Copy info for profiles 2,3,5. Other 583 /* Copy info for profiles 2,3,5. Other
542 * profiles this area is reserved 584 * profiles this area is reserved
543 */ 585 */
544 if (hbq_desc->profile == 2) 586 if (hbq_desc->profile == 2)
545 lpfc_build_hbq_profile2(hbqmb, hbq_desc); 587 lpfc_build_hbq_profile2(hbqmb, hbq_desc);
546 else if (hbq_desc->profile == 3) 588 else if (hbq_desc->profile == 3)
@@ -563,6 +605,8 @@ lpfc_config_hbq(struct lpfc_hba *phba, struct lpfc_hbq_init *hbq_desc,
563 return; 605 return;
564} 606}
565 607
608
609
566void 610void
567lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) 611lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
568{ 612{
@@ -605,7 +649,7 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
605} 649}
606 650
607void 651void
608lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 652lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
609{ 653{
610 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr; 654 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
611 MAILBOX_t *mb = &pmb->mb; 655 MAILBOX_t *mb = &pmb->mb;
@@ -629,11 +673,19 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
629 673
630 /* If HBA supports SLI=3 ask for it */ 674 /* If HBA supports SLI=3 ask for it */
631 675
632 mb->un.varCfgPort.sli_mode = phba->sli_rev; 676 if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
633 if (phba->sli_rev == 3) {
634 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ 677 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
635 mb->un.varCfgPort.max_hbq = 1; /* Requesting 2 HBQs */ 678 mb->un.varCfgPort.max_hbq = 1; /* Requesting 2 HBQs */
636 } 679 if (phba->max_vpi && lpfc_npiv_enable &&
680 phba->vpd.sli3Feat.cmv) {
681 mb->un.varCfgPort.max_vpi = phba->max_vpi;
682 mb->un.varCfgPort.cmv = 1;
683 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
684 } else
685 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
686 } else
687 phba->sli_rev = 2;
688 mb->un.varCfgPort.sli_mode = phba->sli_rev;
637 689
638 /* Now setup pcb */ 690 /* Now setup pcb */
639 phba->slim2p->pcb.type = TYPE_NATIVE_SLI2; 691 phba->slim2p->pcb.type = TYPE_NATIVE_SLI2;
@@ -748,7 +800,7 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
748 800
749 /* Swap PCB if needed */ 801 /* Swap PCB if needed */
750 lpfc_sli_pcimem_bcopy(&phba->slim2p->pcb, &phba->slim2p->pcb, 802 lpfc_sli_pcimem_bcopy(&phba->slim2p->pcb, &phba->slim2p->pcb,
751 sizeof (PCB_t)); 803 sizeof(PCB_t));
752} 804}
753 805
754void 806void
@@ -783,13 +835,22 @@ lpfc_mbox_get(struct lpfc_hba * phba)
783 struct lpfc_sli *psli = &phba->sli; 835 struct lpfc_sli *psli = &phba->sli;
784 836
785 list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list); 837 list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
786 if (mbq) { 838 if (mbq)
787 psli->mboxq_cnt--; 839 psli->mboxq_cnt--;
788 }
789 840
790 return mbq; 841 return mbq;
791} 842}
792 843
844void
845lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
846{
847 /* This function expects to be called from interupt context */
848 spin_lock(&phba->hbalock);
849 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
850 spin_unlock(&phba->hbalock);
851 return;
852}
853
793int 854int
794lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) 855lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
795{ 856{
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 435dc2ec9357..6598e89627df 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -44,6 +44,7 @@ int
44lpfc_mem_alloc(struct lpfc_hba * phba) 44lpfc_mem_alloc(struct lpfc_hba * phba)
45{ 45{
46 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 46 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
47 int longs;
47 int i; 48 int i;
48 49
49 phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", 50 phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool",
@@ -87,8 +88,15 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
87 if (!phba->lpfc_hbq_pool) 88 if (!phba->lpfc_hbq_pool)
88 goto fail_free_nlp_mem_pool; 89 goto fail_free_nlp_mem_pool;
89 90
91 longs = (phba->max_vpi + BITS_PER_LONG - 1) / BITS_PER_LONG;
92 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
93 if (!phba->vpi_bmask)
94 goto fail_free_hbq_pool;
95
90 return 0; 96 return 0;
91 97
98 fail_free_hbq_pool:
99 lpfc_sli_hbqbuf_free_all(phba);
92 fail_free_nlp_mem_pool: 100 fail_free_nlp_mem_pool:
93 mempool_destroy(phba->nlp_mem_pool); 101 mempool_destroy(phba->nlp_mem_pool);
94 phba->nlp_mem_pool = NULL; 102 phba->nlp_mem_pool = NULL;
@@ -119,9 +127,9 @@ lpfc_mem_free(struct lpfc_hba * phba)
119 struct lpfc_dmabuf *mp; 127 struct lpfc_dmabuf *mp;
120 int i; 128 int i;
121 129
130 kfree(phba->vpi_bmask);
122 lpfc_sli_hbqbuf_free_all(phba); 131 lpfc_sli_hbqbuf_free_all(phba);
123 132
124 spin_lock_irq(&phba->hbalock);
125 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { 133 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
126 mp = (struct lpfc_dmabuf *) (mbox->context1); 134 mp = (struct lpfc_dmabuf *) (mbox->context1);
127 if (mp) { 135 if (mp) {
@@ -131,9 +139,17 @@ lpfc_mem_free(struct lpfc_hba * phba)
131 list_del(&mbox->list); 139 list_del(&mbox->list);
132 mempool_free(mbox, phba->mbox_mem_pool); 140 mempool_free(mbox, phba->mbox_mem_pool);
133 } 141 }
142 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
143 mp = (struct lpfc_dmabuf *) (mbox->context1);
144 if (mp) {
145 lpfc_mbuf_free(phba, mp->virt, mp->phys);
146 kfree(mp);
147 }
148 list_del(&mbox->list);
149 mempool_free(mbox, phba->mbox_mem_pool);
150 }
134 151
135 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 152 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
136 spin_unlock_irq(&phba->hbalock);
137 if (psli->mbox_active) { 153 if (psli->mbox_active) {
138 mbox = psli->mbox_active; 154 mbox = psli->mbox_active;
139 mp = (struct lpfc_dmabuf *) (mbox->context1); 155 mp = (struct lpfc_dmabuf *) (mbox->context1);
@@ -163,7 +179,7 @@ lpfc_mem_free(struct lpfc_hba * phba)
163 phba->lpfc_scsi_dma_buf_pool = NULL; 179 phba->lpfc_scsi_dma_buf_pool = NULL;
164 phba->lpfc_mbuf_pool = NULL; 180 phba->lpfc_mbuf_pool = NULL;
165 181
166 /* Free the iocb lookup array */ 182 /* Free the iocb lookup array */
167 kfree(psli->iocbq_lookup); 183 kfree(psli->iocbq_lookup);
168 psli->iocbq_lookup = NULL; 184 psli->iocbq_lookup = NULL;
169 185
@@ -179,7 +195,7 @@ lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
179 ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle); 195 ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
180 196
181 spin_lock_irqsave(&phba->hbalock, iflags); 197 spin_lock_irqsave(&phba->hbalock, iflags);
182 if (!ret && ( mem_flags & MEM_PRI) && pool->current_count) { 198 if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
183 pool->current_count--; 199 pool->current_count--;
184 ret = pool->elements[pool->current_count].virt; 200 ret = pool->elements[pool->current_count].virt;
185 *handle = pool->elements[pool->current_count].phys; 201 *handle = pool->elements[pool->current_count].phys;
@@ -214,7 +230,6 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
214 return; 230 return;
215} 231}
216 232
217
218void * 233void *
219lpfc_hbq_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) 234lpfc_hbq_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
220{ 235{
@@ -230,3 +245,24 @@ lpfc_hbq_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
230 return; 245 return;
231} 246}
232 247
248void
249lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
250{
251 struct hbq_dmabuf *hbq_entry;
252
253 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
254 hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
255 if (hbq_entry->tag == -1) {
256 lpfc_hbq_free(phba, hbq_entry->dbuf.virt,
257 hbq_entry->dbuf.phys);
258 kfree(hbq_entry);
259 } else {
260 lpfc_sli_free_hbq(phba, hbq_entry);
261 }
262 } else {
263 lpfc_mbuf_free(phba, mp->virt, mp->phys);
264 kfree(mp);
265 }
266 return;
267}
268
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index b1727481a1e6..50a247602a6b 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,4 +1,4 @@
1/******************************************************************* 1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
@@ -35,6 +35,7 @@
35#include "lpfc.h" 35#include "lpfc.h"
36#include "lpfc_logmsg.h" 36#include "lpfc_logmsg.h"
37#include "lpfc_crtn.h" 37#include "lpfc_crtn.h"
38#include "lpfc_vport.h"
38 39
39 40
40/* Called to verify a rcv'ed ADISC was intended for us. */ 41/* Called to verify a rcv'ed ADISC was intended for us. */
@@ -74,12 +75,14 @@ lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
74 hsp->cls1.rcvDataSizeLsb; 75 hsp->cls1.rcvDataSizeLsb;
75 ssp_value = (sp->cls1.rcvDataSizeMsb << 8) | 76 ssp_value = (sp->cls1.rcvDataSizeMsb << 8) |
76 sp->cls1.rcvDataSizeLsb; 77 sp->cls1.rcvDataSizeLsb;
78 if (!ssp_value)
79 goto bad_service_param;
77 if (ssp_value > hsp_value) { 80 if (ssp_value > hsp_value) {
78 sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb; 81 sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
79 sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb; 82 sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
80 } 83 }
81 } else if (class == CLASS1) { 84 } else if (class == CLASS1) {
82 return 0; 85 goto bad_service_param;
83 } 86 }
84 87
85 if (sp->cls2.classValid) { 88 if (sp->cls2.classValid) {
@@ -87,12 +90,14 @@ lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
87 hsp->cls2.rcvDataSizeLsb; 90 hsp->cls2.rcvDataSizeLsb;
88 ssp_value = (sp->cls2.rcvDataSizeMsb << 8) | 91 ssp_value = (sp->cls2.rcvDataSizeMsb << 8) |
89 sp->cls2.rcvDataSizeLsb; 92 sp->cls2.rcvDataSizeLsb;
93 if (!ssp_value)
94 goto bad_service_param;
90 if (ssp_value > hsp_value) { 95 if (ssp_value > hsp_value) {
91 sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb; 96 sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
92 sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb; 97 sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
93 } 98 }
94 } else if (class == CLASS2) { 99 } else if (class == CLASS2) {
95 return 0; 100 goto bad_service_param;
96 } 101 }
97 102
98 if (sp->cls3.classValid) { 103 if (sp->cls3.classValid) {
@@ -100,12 +105,14 @@ lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
100 hsp->cls3.rcvDataSizeLsb; 105 hsp->cls3.rcvDataSizeLsb;
101 ssp_value = (sp->cls3.rcvDataSizeMsb << 8) | 106 ssp_value = (sp->cls3.rcvDataSizeMsb << 8) |
102 sp->cls3.rcvDataSizeLsb; 107 sp->cls3.rcvDataSizeLsb;
108 if (!ssp_value)
109 goto bad_service_param;
103 if (ssp_value > hsp_value) { 110 if (ssp_value > hsp_value) {
104 sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb; 111 sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
105 sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb; 112 sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
106 } 113 }
107 } else if (class == CLASS3) { 114 } else if (class == CLASS3) {
108 return 0; 115 goto bad_service_param;
109 } 116 }
110 117
111 /* 118 /*
@@ -124,11 +131,22 @@ lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
124 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name)); 131 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
125 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name)); 132 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
126 return 1; 133 return 1;
134bad_service_param:
135 lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY,
136 "%d (%d):0207 Device %x "
137 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
138 "invalid service parameters. Ignoring device.\n",
139 vport->phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID,
140 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
141 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
142 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
143 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
144 return 0;
127} 145}
128 146
129static void * 147static void *
130lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 148lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
131 struct lpfc_iocbq *rspiocb) 149 struct lpfc_iocbq *rspiocb)
132{ 150{
133 struct lpfc_dmabuf *pcmd, *prsp; 151 struct lpfc_dmabuf *pcmd, *prsp;
134 uint32_t *lp; 152 uint32_t *lp;
@@ -176,10 +194,12 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
176 194
177 /* Abort outstanding I/O on NPort <nlp_DID> */ 195 /* Abort outstanding I/O on NPort <nlp_DID> */
178 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 196 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
179 "%d:0205 Abort outstanding I/O on NPort x%x " 197 "%d (%d):0205 Abort outstanding I/O on NPort x%x "
180 "Data: x%x x%x x%x\n", 198 "Data: x%x x%x x%x\n",
181 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, 199 phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID,
182 ndlp->nlp_state, ndlp->nlp_rpi); 200 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
201
202 lpfc_fabric_abort_nport(ndlp);
183 203
184 /* First check the txq */ 204 /* First check the txq */
185 spin_lock_irq(&phba->hbalock); 205 spin_lock_irq(&phba->hbalock);
@@ -198,15 +218,16 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
198 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 218 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
199 /* Check to see if iocb matches the nport we are looking 219 /* Check to see if iocb matches the nport we are looking
200 for */ 220 for */
201 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) 221 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
202 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 222 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
223 }
203 } 224 }
204 spin_unlock_irq(&phba->hbalock); 225 spin_unlock_irq(&phba->hbalock);
205 226
206 while (!list_empty(&completions)) { 227 while (!list_empty(&completions)) {
207 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 228 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
208 cmd = &iocb->iocb; 229 cmd = &iocb->iocb;
209 list_del(&iocb->list); 230 list_del_init(&iocb->list);
210 231
211 if (!iocb->iocb_cmpl) 232 if (!iocb->iocb_cmpl)
212 lpfc_sli_release_iocbq(phba, iocb); 233 lpfc_sli_release_iocbq(phba, iocb);
@@ -225,7 +246,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
225 246
226static int 247static int
227lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 248lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
228 struct lpfc_iocbq *cmdiocb) 249 struct lpfc_iocbq *cmdiocb)
229{ 250{
230 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 251 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
231 struct lpfc_hba *phba = vport->phba; 252 struct lpfc_hba *phba = vport->phba;
@@ -244,7 +265,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
244 * the FLOGI and resend it first. 265 * the FLOGI and resend it first.
245 */ 266 */
246 if (vport->fc_flag & FC_PT2PT) { 267 if (vport->fc_flag & FC_PT2PT) {
247 lpfc_els_abort_flogi(phba); 268 lpfc_els_abort_flogi(phba);
248 if (!(vport->fc_flag & FC_PT2PT_PLOGI)) { 269 if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
249 /* If the other side is supposed to initiate 270 /* If the other side is supposed to initiate
250 * the PLOGI anyway, just ACC it now and 271 * the PLOGI anyway, just ACC it now and
@@ -279,8 +300,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
279 300
280 /* PLOGI chkparm OK */ 301 /* PLOGI chkparm OK */
281 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 302 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
282 "%d:0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", 303 "%d (%d):0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
283 phba->brd_no, 304 phba->brd_no, vport->vpi,
284 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, 305 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
285 ndlp->nlp_rpi); 306 ndlp->nlp_rpi);
286 307
@@ -314,8 +335,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
314 return 1; 335 return 1;
315 } 336 }
316 337
317 if ((vport->fc_flag & FC_PT2PT) 338 if ((vport->fc_flag & FC_PT2PT) &&
318 && !(vport->fc_flag & FC_PT2PT_PLOGI)) { 339 !(vport->fc_flag & FC_PT2PT_PLOGI)) {
319 /* rcv'ed PLOGI decides what our NPortId will be */ 340 /* rcv'ed PLOGI decides what our NPortId will be */
320 vport->fc_myDID = icmd->un.rcvels.parmRo; 341 vport->fc_myDID = icmd->un.rcvels.parmRo;
321 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 342 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -327,7 +348,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
327 rc = lpfc_sli_issue_mbox 348 rc = lpfc_sli_issue_mbox
328 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); 349 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
329 if (rc == MBX_NOT_FINISHED) { 350 if (rc == MBX_NOT_FINISHED) {
330 mempool_free( mbox, phba->mbox_mem_pool); 351 mempool_free(mbox, phba->mbox_mem_pool);
331 goto out; 352 goto out;
332 } 353 }
333 354
@@ -337,8 +358,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
337 if (!mbox) 358 if (!mbox)
338 goto out; 359 goto out;
339 360
340 rc = lpfc_reg_login(phba, icmd->un.rcvels.remoteID, (uint8_t *) sp, 361 rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID,
341 mbox, 0); 362 (uint8_t *) sp, mbox, 0);
342 if (rc) { 363 if (rc) {
343 mempool_free(mbox, phba->mbox_mem_pool); 364 mempool_free(mbox, phba->mbox_mem_pool);
344 goto out; 365 goto out;
@@ -415,7 +436,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
415 lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp); 436 lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
416 } else { 437 } else {
417 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, 438 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp,
418 NULL, 0); 439 NULL, 0);
419 } 440 }
420 return 1; 441 return 1;
421 } 442 }
@@ -457,7 +478,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
457 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 478 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
458 479
459 if (!(ndlp->nlp_type & NLP_FABRIC) || 480 if (!(ndlp->nlp_type & NLP_FABRIC) ||
460 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 481 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
461 /* Only try to re-login if this is NOT a Fabric Node */ 482 /* Only try to re-login if this is NOT a Fabric Node */
462 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 483 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
463 spin_lock_irq(shost->host_lock); 484 spin_lock_irq(shost->host_lock);
@@ -499,8 +520,7 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
499 520
500 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 521 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
501 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 522 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
502 if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && 523 if (npr->prliType == PRLI_FCP_TYPE) {
503 (npr->prliType == PRLI_FCP_TYPE)) {
504 if (npr->initiatorFunc) 524 if (npr->initiatorFunc)
505 ndlp->nlp_type |= NLP_FCP_INITIATOR; 525 ndlp->nlp_type |= NLP_FCP_INITIATOR;
506 if (npr->targetFunc) 526 if (npr->targetFunc)
@@ -526,15 +546,16 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
526 struct lpfc_hba *phba = vport->phba; 546 struct lpfc_hba *phba = vport->phba;
527 547
528 /* Check config parameter use-adisc or FCP-2 */ 548 /* Check config parameter use-adisc or FCP-2 */
529 if (phba->cfg_use_adisc == 0 && 549 if ((phba->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
530 (vport->fc_flag & FC_RSCN_MODE) == 0 && 550 ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
531 (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) == 0) 551 spin_lock_irq(shost->host_lock);
532 return 0; 552 ndlp->nlp_flag |= NLP_NPR_ADISC;
533 553 spin_unlock_irq(shost->host_lock);
534 spin_lock_irq(shost->host_lock); 554 return 1;
535 ndlp->nlp_flag |= NLP_NPR_ADISC; 555 }
536 spin_unlock_irq(shost->host_lock); 556 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
537 return 1; 557 lpfc_unreg_rpi(vport, ndlp);
558 return 0;
538} 559}
539 560
540static uint32_t 561static uint32_t
@@ -542,9 +563,9 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
542 void *arg, uint32_t evt) 563 void *arg, uint32_t evt)
543{ 564{
544 lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY, 565 lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY,
545 "%d:0253 Illegal State Transition: node x%x event x%x, " 566 "%d (%d):0253 Illegal State Transition: node x%x "
546 "state x%x Data: x%x x%x\n", 567 "event x%x, state x%x Data: x%x x%x\n",
547 vport->phba->brd_no, 568 vport->phba->brd_no, vport->vpi,
548 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 569 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
549 ndlp->nlp_flag); 570 ndlp->nlp_flag);
550 return ndlp->nlp_state; 571 return ndlp->nlp_state;
@@ -629,7 +650,7 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
629 */ 650 */
630 phba->fc_stat.elsLogiCol++; 651 phba->fc_stat.elsLogiCol++;
631 port_cmp = memcmp(&vport->fc_portname, &sp->portName, 652 port_cmp = memcmp(&vport->fc_portname, &sp->portName,
632 sizeof (struct lpfc_name)); 653 sizeof(struct lpfc_name));
633 654
634 if (port_cmp >= 0) { 655 if (port_cmp >= 0) {
635 /* Reject this request because the remote node will accept 656 /* Reject this request because the remote node will accept
@@ -645,12 +666,26 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
645} 666}
646 667
647static uint32_t 668static uint32_t
669lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
670 void *arg, uint32_t evt)
671{
672 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
673 struct ls_rjt stat;
674
675 memset(&stat, 0, sizeof (struct ls_rjt));
676 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
677 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
678 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp);
679 return ndlp->nlp_state;
680}
681
682static uint32_t
648lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 683lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
649 void *arg, uint32_t evt) 684 void *arg, uint32_t evt)
650{ 685{
651 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; 686 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
652 687
653 /* software abort outstanding PLOGI */ 688 /* software abort outstanding PLOGI */
654 lpfc_els_abort(vport->phba, ndlp); 689 lpfc_els_abort(vport->phba, ndlp);
655 690
656 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); 691 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
@@ -724,9 +759,9 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
724 759
725 /* PLOGI chkparm OK */ 760 /* PLOGI chkparm OK */
726 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 761 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
727 "%d:0121 PLOGI chkparm OK " 762 "%d (%d):0121 PLOGI chkparm OK "
728 "Data: x%x x%x x%x x%x\n", 763 "Data: x%x x%x x%x x%x\n",
729 phba->brd_no, 764 phba->brd_no, vport->vpi,
730 ndlp->nlp_DID, ndlp->nlp_state, 765 ndlp->nlp_DID, ndlp->nlp_state,
731 ndlp->nlp_flag, ndlp->nlp_rpi); 766 ndlp->nlp_flag, ndlp->nlp_rpi);
732 767
@@ -748,13 +783,20 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
748 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; 783 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
749 784
750 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 785 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
751 if (!mbox) 786 if (!mbox) {
787 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
788 "%d (%d):0133 PLOGI: no memory for reg_login "
789 "Data: x%x x%x x%x x%x\n",
790 phba->brd_no, vport->vpi,
791 ndlp->nlp_DID, ndlp->nlp_state,
792 ndlp->nlp_flag, ndlp->nlp_rpi);
752 goto out; 793 goto out;
794 }
753 795
754 lpfc_unreg_rpi(vport, ndlp); 796 lpfc_unreg_rpi(vport, ndlp);
755 797
756 if (lpfc_reg_login(phba, irsp->un.elsreq64.remoteID, (uint8_t *) sp, 798 if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID,
757 mbox, 0) == 0) { 799 (uint8_t *) sp, mbox, 0) == 0) {
758 switch (ndlp->nlp_DID) { 800 switch (ndlp->nlp_DID) {
759 case NameServer_DID: 801 case NameServer_DID:
760 mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login; 802 mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
@@ -775,16 +817,37 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
775 return ndlp->nlp_state; 817 return ndlp->nlp_state;
776 } 818 }
777 lpfc_nlp_put(ndlp); 819 lpfc_nlp_put(ndlp);
778 mp = (struct lpfc_dmabuf *)mbox->context1; 820 mp = (struct lpfc_dmabuf *) mbox->context1;
779 lpfc_mbuf_free(phba, mp->virt, mp->phys); 821 lpfc_mbuf_free(phba, mp->virt, mp->phys);
780 kfree(mp); 822 kfree(mp);
781 mempool_free(mbox, phba->mbox_mem_pool); 823 mempool_free(mbox, phba->mbox_mem_pool);
824
825 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
826 "%d (%d):0134 PLOGI: cannot issue reg_login "
827 "Data: x%x x%x x%x x%x\n",
828 phba->brd_no, vport->vpi,
829 ndlp->nlp_DID, ndlp->nlp_state,
830 ndlp->nlp_flag, ndlp->nlp_rpi);
782 } else { 831 } else {
783 mempool_free(mbox, phba->mbox_mem_pool); 832 mempool_free(mbox, phba->mbox_mem_pool);
833
834 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
835 "%d (%d):0135 PLOGI: cannot format reg_login "
836 "Data: x%x x%x x%x x%x\n",
837 phba->brd_no, vport->vpi,
838 ndlp->nlp_DID, ndlp->nlp_state,
839 ndlp->nlp_flag, ndlp->nlp_rpi);
784 } 840 }
785 841
786 842
787 out: 843out:
844 if (ndlp->nlp_DID == NameServer_DID) {
845 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
846 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
847 "%d (%d):0261 Cannot Register NameServer login\n",
848 phba->brd_no, vport->vpi);
849 }
850
788 /* Free this node since the driver cannot login or has the wrong 851 /* Free this node since the driver cannot login or has the wrong
789 sparm */ 852 sparm */
790 lpfc_drop_node(vport, ndlp); 853 lpfc_drop_node(vport, ndlp);
@@ -820,12 +883,18 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
820 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 883 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
821 struct lpfc_hba *phba = vport->phba; 884 struct lpfc_hba *phba = vport->phba;
822 885
886 /* Don't do anything that will mess up processing of the
887 * previous RSCN.
888 */
889 if (vport->fc_flag & FC_RSCN_DEFERRED)
890 return ndlp->nlp_state;
891
823 /* software abort outstanding PLOGI */ 892 /* software abort outstanding PLOGI */
824 lpfc_els_abort(phba, ndlp); 893 lpfc_els_abort(phba, ndlp);
825 894
826 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 895 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
827 spin_lock_irq(shost->host_lock);
828 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 896 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
897 spin_lock_irq(shost->host_lock);
829 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 898 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
830 spin_unlock_irq(shost->host_lock); 899 spin_unlock_irq(shost->host_lock);
831 900
@@ -924,7 +993,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
924 irsp = &rspiocb->iocb; 993 irsp = &rspiocb->iocb;
925 994
926 if ((irsp->ulpStatus) || 995 if ((irsp->ulpStatus) ||
927 (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { 996 (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
928 /* 1 sec timeout */ 997 /* 1 sec timeout */
929 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 998 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
930 spin_lock_irq(shost->host_lock); 999 spin_lock_irq(shost->host_lock);
@@ -980,6 +1049,12 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
980 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1049 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
981 struct lpfc_hba *phba = vport->phba; 1050 struct lpfc_hba *phba = vport->phba;
982 1051
1052 /* Don't do anything that will mess up processing of the
1053 * previous RSCN.
1054 */
1055 if (vport->fc_flag & FC_RSCN_DEFERRED)
1056 return ndlp->nlp_state;
1057
983 /* software abort outstanding ADISC */ 1058 /* software abort outstanding ADISC */
984 lpfc_els_abort(phba, ndlp); 1059 lpfc_els_abort(phba, ndlp);
985 1060
@@ -987,9 +1062,8 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
987 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1062 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
988 spin_lock_irq(shost->host_lock); 1063 spin_lock_irq(shost->host_lock);
989 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1064 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
990 ndlp->nlp_flag |= NLP_NPR_ADISC;
991 spin_unlock_irq(shost->host_lock); 1065 spin_unlock_irq(shost->host_lock);
992 1066 lpfc_disc_set_adisc(vport, ndlp);
993 return ndlp->nlp_state; 1067 return ndlp->nlp_state;
994} 1068}
995 1069
@@ -1035,6 +1109,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1035 if ((mb = phba->sli.mbox_active)) { 1109 if ((mb = phba->sli.mbox_active)) {
1036 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1110 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1037 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1111 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1112 lpfc_nlp_put(ndlp);
1038 mb->context2 = NULL; 1113 mb->context2 = NULL;
1039 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1114 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1040 } 1115 }
@@ -1049,6 +1124,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1049 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1124 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1050 kfree(mp); 1125 kfree(mp);
1051 } 1126 }
1127 lpfc_nlp_put(ndlp);
1052 list_del(&mb->list); 1128 list_del(&mb->list);
1053 mempool_free(mb, phba->mbox_mem_pool); 1129 mempool_free(mb, phba->mbox_mem_pool);
1054 } 1130 }
@@ -1099,8 +1175,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1099 if (mb->mbxStatus) { 1175 if (mb->mbxStatus) {
1100 /* RegLogin failed */ 1176 /* RegLogin failed */
1101 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 1177 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
1102 "%d:0246 RegLogin failed Data: x%x x%x x%x\n", 1178 "%d (%d):0246 RegLogin failed Data: x%x x%x "
1103 phba->brd_no, 1179 "x%x\n",
1180 phba->brd_no, vport->vpi,
1104 did, mb->mbxStatus, vport->port_state); 1181 did, mb->mbxStatus, vport->port_state);
1105 1182
1106 /* 1183 /*
@@ -1167,11 +1244,18 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
1167{ 1244{
1168 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1245 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1169 1246
1247 /* Don't do anything that will mess up processing of the
1248 * previous RSCN.
1249 */
1250 if (vport->fc_flag & FC_RSCN_DEFERRED)
1251 return ndlp->nlp_state;
1252
1170 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1253 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1171 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1254 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1172 spin_lock_irq(shost->host_lock); 1255 spin_lock_irq(shost->host_lock);
1173 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1256 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1174 spin_unlock_irq(shost->host_lock); 1257 spin_unlock_irq(shost->host_lock);
1258 lpfc_disc_set_adisc(vport, ndlp);
1175 return ndlp->nlp_state; 1259 return ndlp->nlp_state;
1176} 1260}
1177 1261
@@ -1239,6 +1323,7 @@ static uint32_t
1239lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1323lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1240 void *arg, uint32_t evt) 1324 void *arg, uint32_t evt)
1241{ 1325{
1326 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1242 struct lpfc_iocbq *cmdiocb, *rspiocb; 1327 struct lpfc_iocbq *cmdiocb, *rspiocb;
1243 struct lpfc_hba *phba = vport->phba; 1328 struct lpfc_hba *phba = vport->phba;
1244 IOCB_t *irsp; 1329 IOCB_t *irsp;
@@ -1267,29 +1352,45 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1267 if (npr->Retry) 1352 if (npr->Retry)
1268 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 1353 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1269 } 1354 }
1355 if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
1356 (vport->port_type == LPFC_NPIV_PORT) &&
1357 phba->cfg_vport_restrict_login) {
1358 spin_lock_irq(shost->host_lock);
1359 ndlp->nlp_flag |= NLP_TARGET_REMOVE;
1360 spin_unlock_irq(shost->host_lock);
1361 lpfc_issue_els_logo(vport, ndlp, 0);
1362
1363 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1364 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
1365 return ndlp->nlp_state;
1366 }
1270 1367
1271 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1368 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1272 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); 1369 if (ndlp->nlp_type & NLP_FCP_TARGET)
1370 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1371 else
1372 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1273 return ndlp->nlp_state; 1373 return ndlp->nlp_state;
1274} 1374}
1275 1375
1276/*! lpfc_device_rm_prli_issue 1376/*! lpfc_device_rm_prli_issue
1277 * 1377 *
1278 * \pre 1378 * \pre
1279 * \post 1379 * \post
1280 * \param phba 1380 * \param phba
1281 * \param ndlp 1381 * \param ndlp
1282 * \param arg 1382 * \param arg
1283 * \param evt 1383 * \param evt
1284 * \return uint32_t 1384 * \return uint32_t
1285 * 1385 *
1286 * \b Description: 1386 * \b Description:
1287 * This routine is envoked when we a request to remove a nport we are in the 1387 * This routine is envoked when we a request to remove a nport we are in the
1288 * process of PRLIing. We should software abort outstanding prli, unreg 1388 * process of PRLIing. We should software abort outstanding prli, unreg
1289 * login, send a logout. We will change node state to UNUSED_NODE, put it 1389 * login, send a logout. We will change node state to UNUSED_NODE, put it
1290 * in plogi state so it can be freed when LOGO completes. 1390 * on plogi list so it can be freed when LOGO completes.
1291 * 1391 *
1292 */ 1392 */
1393
1293static uint32_t 1394static uint32_t
1294lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1395lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1295 void *arg, uint32_t evt) 1396 void *arg, uint32_t evt)
@@ -1312,21 +1413,21 @@ lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1312 1413
1313 1414
1314/*! lpfc_device_recov_prli_issue 1415/*! lpfc_device_recov_prli_issue
1315 * 1416 *
1316 * \pre 1417 * \pre
1317 * \post 1418 * \post
1318 * \param phba 1419 * \param phba
1319 * \param ndlp 1420 * \param ndlp
1320 * \param arg 1421 * \param arg
1321 * \param evt 1422 * \param evt
1322 * \return uint32_t 1423 * \return uint32_t
1323 * 1424 *
1324 * \b Description: 1425 * \b Description:
1325 * The routine is envoked when the state of a device is unknown, like 1426 * The routine is envoked when the state of a device is unknown, like
1326 * during a link down. We should remove the nodelist entry from the 1427 * during a link down. We should remove the nodelist entry from the
1327 * unmapped list, issue a UNREG_LOGIN, do a software abort of the 1428 * unmapped list, issue a UNREG_LOGIN, do a software abort of the
1328 * outstanding PRLI command, then free the node entry. 1429 * outstanding PRLI command, then free the node entry.
1329 */ 1430 */
1330static uint32_t 1431static uint32_t
1331lpfc_device_recov_prli_issue(struct lpfc_vport *vport, 1432lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
1332 struct lpfc_nodelist *ndlp, 1433 struct lpfc_nodelist *ndlp,
@@ -1336,6 +1437,12 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
1336 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1437 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1337 struct lpfc_hba *phba = vport->phba; 1438 struct lpfc_hba *phba = vport->phba;
1338 1439
1440 /* Don't do anything that will mess up processing of the
1441 * previous RSCN.
1442 */
1443 if (vport->fc_flag & FC_RSCN_DEFERRED)
1444 return ndlp->nlp_state;
1445
1339 /* software abort outstanding PRLI */ 1446 /* software abort outstanding PRLI */
1340 lpfc_els_abort(phba, ndlp); 1447 lpfc_els_abort(phba, ndlp);
1341 1448
@@ -1344,6 +1451,7 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
1344 spin_lock_irq(shost->host_lock); 1451 spin_lock_irq(shost->host_lock);
1345 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1452 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1346 spin_unlock_irq(shost->host_lock); 1453 spin_unlock_irq(shost->host_lock);
1454 lpfc_disc_set_adisc(vport, ndlp);
1347 return ndlp->nlp_state; 1455 return ndlp->nlp_state;
1348} 1456}
1349 1457
@@ -1466,7 +1574,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1466 1574
1467 /* flush the target */ 1575 /* flush the target */
1468 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1576 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
1469 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); 1577 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
1470 1578
1471 /* Treat like rcv logo */ 1579 /* Treat like rcv logo */
1472 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO); 1580 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
@@ -1573,8 +1681,9 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1573 * here will affect the counting of discovery threads. 1681 * here will affect the counting of discovery threads.
1574 */ 1682 */
1575 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && 1683 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
1576 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)){ 1684 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
1577 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1685 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1686 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1578 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1687 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1579 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 1688 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1580 lpfc_issue_els_adisc(vport, ndlp, 0); 1689 lpfc_issue_els_adisc(vport, ndlp, 0);
@@ -1719,6 +1828,12 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1719{ 1828{
1720 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1829 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1721 1830
1831 /* Don't do anything that will mess up processing of the
1832 * previous RSCN.
1833 */
1834 if (vport->fc_flag & FC_RSCN_DEFERRED)
1835 return ndlp->nlp_state;
1836
1722 spin_lock_irq(shost->host_lock); 1837 spin_lock_irq(shost->host_lock);
1723 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1838 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1724 spin_unlock_irq(shost->host_lock); 1839 spin_unlock_irq(shost->host_lock);
@@ -1803,7 +1918,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
1803 lpfc_disc_illegal, /* DEVICE_RECOVERY */ 1918 lpfc_disc_illegal, /* DEVICE_RECOVERY */
1804 1919
1805 lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */ 1920 lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
1806 lpfc_rcv_els_plogi_issue, /* RCV_PRLI */ 1921 lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */
1807 lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */ 1922 lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */
1808 lpfc_rcv_els_plogi_issue, /* RCV_ADISC */ 1923 lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
1809 lpfc_rcv_els_plogi_issue, /* RCV_PDISC */ 1924 lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
@@ -1915,9 +2030,9 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1915 2030
1916 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ 2031 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
1917 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 2032 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1918 "%d:0211 DSM in event x%x on NPort x%x in state %d " 2033 "%d (%d):0211 DSM in event x%x on NPort x%x in "
1919 "Data: x%x\n", 2034 "state %d Data: x%x\n",
1920 phba->brd_no, 2035 phba->brd_no, vport->vpi,
1921 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag); 2036 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
1922 2037
1923 func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt]; 2038 func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
@@ -1925,9 +2040,10 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1925 2040
1926 /* DSM out state <rc> on NPort <nlp_DID> */ 2041 /* DSM out state <rc> on NPort <nlp_DID> */
1927 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 2042 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1928 "%d:0212 DSM out state %d on NPort x%x Data: x%x\n", 2043 "%d (%d):0212 DSM out state %d on NPort x%x "
1929 phba->brd_no, 2044 "Data: x%x\n",
1930 rc, ndlp->nlp_DID, ndlp->nlp_flag); 2045 phba->brd_no, vport->vpi,
2046 rc, ndlp->nlp_DID, ndlp->nlp_flag);
1931 2047
1932 lpfc_nlp_put(ndlp); 2048 lpfc_nlp_put(ndlp);
1933 2049
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 90c88733a4f5..af8f8968bfba 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -37,11 +37,160 @@
37#include "lpfc.h" 37#include "lpfc.h"
38#include "lpfc_logmsg.h" 38#include "lpfc_logmsg.h"
39#include "lpfc_crtn.h" 39#include "lpfc_crtn.h"
40#include "lpfc_vport.h"
40 41
41#define LPFC_RESET_WAIT 2 42#define LPFC_RESET_WAIT 2
42#define LPFC_ABORT_WAIT 2 43#define LPFC_ABORT_WAIT 2
43 44
44/* 45/*
46 * This function is called with no lock held when there is a resource
47 * error in driver or in firmware.
48 */
49void
50lpfc_adjust_queue_depth(struct lpfc_hba *phba)
51{
52 unsigned long flags;
53
54 spin_lock_irqsave(&phba->hbalock, flags);
55 atomic_inc(&phba->num_rsrc_err);
56 phba->last_rsrc_error_time = jiffies;
57
58 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
59 spin_unlock_irqrestore(&phba->hbalock, flags);
60 return;
61 }
62
63 phba->last_ramp_down_time = jiffies;
64
65 spin_unlock_irqrestore(&phba->hbalock, flags);
66
67 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
68 if ((phba->pport->work_port_events &
69 WORKER_RAMP_DOWN_QUEUE) == 0) {
70 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
71 }
72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
73
74 spin_lock_irqsave(&phba->hbalock, flags);
75 if (phba->work_wait)
76 wake_up(phba->work_wait);
77 spin_unlock_irqrestore(&phba->hbalock, flags);
78
79 return;
80}
81
82/*
83 * This function is called with no lock held when there is a successful
84 * SCSI command completion.
85 */
86static inline void
87lpfc_rampup_queue_depth(struct lpfc_hba *phba,
88 struct scsi_device *sdev)
89{
90 unsigned long flags;
91 atomic_inc(&phba->num_cmd_success);
92
93 if (phba->cfg_lun_queue_depth <= sdev->queue_depth)
94 return;
95
96 spin_lock_irqsave(&phba->hbalock, flags);
97 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
98 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
99 spin_unlock_irqrestore(&phba->hbalock, flags);
100 return;
101 }
102
103 phba->last_ramp_up_time = jiffies;
104 spin_unlock_irqrestore(&phba->hbalock, flags);
105
106 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
107 if ((phba->pport->work_port_events &
108 WORKER_RAMP_UP_QUEUE) == 0) {
109 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
110 }
111 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
112
113 spin_lock_irqsave(&phba->hbalock, flags);
114 if (phba->work_wait)
115 wake_up(phba->work_wait);
116 spin_unlock_irqrestore(&phba->hbalock, flags);
117}
118
119void
120lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
121{
122 struct lpfc_vport *vport;
123 struct Scsi_Host *host;
124 struct scsi_device *sdev;
125 unsigned long new_queue_depth;
126 unsigned long num_rsrc_err, num_cmd_success;
127
128 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
129 num_cmd_success = atomic_read(&phba->num_cmd_success);
130
131 spin_lock_irq(&phba->hbalock);
132 list_for_each_entry(vport, &phba->port_list, listentry) {
133 host = lpfc_shost_from_vport(vport);
134 if (!scsi_host_get(host))
135 continue;
136
137 spin_unlock_irq(&phba->hbalock);
138
139 shost_for_each_device(sdev, host) {
140 new_queue_depth = sdev->queue_depth * num_rsrc_err /
141 (num_rsrc_err + num_cmd_success);
142 if (!new_queue_depth)
143 new_queue_depth = sdev->queue_depth - 1;
144 else
145 new_queue_depth =
146 sdev->queue_depth - new_queue_depth;
147
148 if (sdev->ordered_tags)
149 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
150 new_queue_depth);
151 else
152 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
153 new_queue_depth);
154 }
155 spin_lock_irq(&phba->hbalock);
156 scsi_host_put(host);
157 }
158 spin_unlock_irq(&phba->hbalock);
159 atomic_set(&phba->num_rsrc_err, 0);
160 atomic_set(&phba->num_cmd_success, 0);
161}
162
163void
164lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
165{
166 struct lpfc_vport *vport;
167 struct Scsi_Host *host;
168 struct scsi_device *sdev;
169
170 spin_lock_irq(&phba->hbalock);
171 list_for_each_entry(vport, &phba->port_list, listentry) {
172 host = lpfc_shost_from_vport(vport);
173 if (!scsi_host_get(host))
174 continue;
175
176 spin_unlock_irq(&phba->hbalock);
177 shost_for_each_device(sdev, host) {
178 if (sdev->ordered_tags)
179 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
180 sdev->queue_depth+1);
181 else
182 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
183 sdev->queue_depth+1);
184 }
185 spin_lock_irq(&phba->hbalock);
186 scsi_host_put(host);
187 }
188 spin_unlock_irq(&phba->hbalock);
189 atomic_set(&phba->num_rsrc_err, 0);
190 atomic_set(&phba->num_cmd_success, 0);
191}
192
193/*
45 * This routine allocates a scsi buffer, which contains all the necessary 194 * This routine allocates a scsi buffer, which contains all the necessary
46 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 195 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
47 * contains information to build the IOCB. The DMAable region contains 196 * contains information to build the IOCB. The DMAable region contains
@@ -154,7 +303,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
154} 303}
155 304
156static void 305static void
157lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 306lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
158{ 307{
159 unsigned long iflag = 0; 308 unsigned long iflag = 0;
160 309
@@ -165,13 +314,16 @@ lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
165} 314}
166 315
167static int 316static int
168lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) 317lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
169{ 318{
170 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 319 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
171 struct scatterlist *sgel = NULL; 320 struct scatterlist *sgel = NULL;
172 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 321 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
173 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 322 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
174 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 323 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
324 uint32_t vpi = (lpfc_cmd->cur_iocbq.vport
325 ? lpfc_cmd->cur_iocbq.vport->vpi
326 : 0);
175 dma_addr_t physaddr; 327 dma_addr_t physaddr;
176 uint32_t i, num_bde = 0; 328 uint32_t i, num_bde = 0;
177 int datadir = scsi_cmnd->sc_data_direction; 329 int datadir = scsi_cmnd->sc_data_direction;
@@ -235,9 +387,9 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
235 dma_error = dma_mapping_error(physaddr); 387 dma_error = dma_mapping_error(physaddr);
236 if (dma_error) { 388 if (dma_error) {
237 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 389 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
238 "%d:0718 Unable to dma_map_single " 390 "%d (%d):0718 Unable to dma_map_single "
239 "request_buffer: x%x\n", 391 "request_buffer: x%x\n",
240 phba->brd_no, dma_error); 392 phba->brd_no, vpi, dma_error);
241 return 1; 393 return 1;
242 } 394 }
243 395
@@ -299,6 +451,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
299 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 451 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
300 struct lpfc_hba *phba = vport->phba; 452 struct lpfc_hba *phba = vport->phba;
301 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 453 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
454 uint32_t vpi = vport->vpi;
302 uint32_t resp_info = fcprsp->rspStatus2; 455 uint32_t resp_info = fcprsp->rspStatus2;
303 uint32_t scsi_status = fcprsp->rspStatus3; 456 uint32_t scsi_status = fcprsp->rspStatus3;
304 uint32_t *lp; 457 uint32_t *lp;
@@ -331,9 +484,9 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
331 logit = LOG_FCP; 484 logit = LOG_FCP;
332 485
333 lpfc_printf_log(phba, KERN_WARNING, logit, 486 lpfc_printf_log(phba, KERN_WARNING, logit,
334 "%d:0730 FCP command x%x failed: x%x SNS x%x x%x " 487 "%d (%d):0730 FCP command x%x failed: x%x SNS x%x x%x "
335 "Data: x%x x%x x%x x%x x%x\n", 488 "Data: x%x x%x x%x x%x x%x\n",
336 phba->brd_no, cmnd->cmnd[0], scsi_status, 489 phba->brd_no, vpi, cmnd->cmnd[0], scsi_status,
337 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 490 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
338 be32_to_cpu(fcprsp->rspResId), 491 be32_to_cpu(fcprsp->rspResId),
339 be32_to_cpu(fcprsp->rspSnsLen), 492 be32_to_cpu(fcprsp->rspSnsLen),
@@ -354,10 +507,11 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
354 cmnd->resid = be32_to_cpu(fcprsp->rspResId); 507 cmnd->resid = be32_to_cpu(fcprsp->rspResId);
355 508
356 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 509 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
357 "%d:0716 FCP Read Underrun, expected %d, " 510 "%d (%d):0716 FCP Read Underrun, expected %d, "
358 "residual %d Data: x%x x%x x%x\n", phba->brd_no, 511 "residual %d Data: x%x x%x x%x\n",
359 be32_to_cpu(fcpcmd->fcpDl), cmnd->resid, 512 phba->brd_no, vpi, be32_to_cpu(fcpcmd->fcpDl),
360 fcpi_parm, cmnd->cmnd[0], cmnd->underflow); 513 cmnd->resid, fcpi_parm, cmnd->cmnd[0],
514 cmnd->underflow);
361 515
362 /* 516 /*
363 * If there is an under run check if under run reported by 517 * If there is an under run check if under run reported by
@@ -368,12 +522,12 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
368 fcpi_parm && 522 fcpi_parm &&
369 (cmnd->resid != fcpi_parm)) { 523 (cmnd->resid != fcpi_parm)) {
370 lpfc_printf_log(phba, KERN_WARNING, 524 lpfc_printf_log(phba, KERN_WARNING,
371 LOG_FCP | LOG_FCP_ERROR, 525 LOG_FCP | LOG_FCP_ERROR,
372 "%d:0735 FCP Read Check Error and Underrun " 526 "%d (%d):0735 FCP Read Check Error "
373 "Data: x%x x%x x%x x%x\n", phba->brd_no, 527 "and Underrun Data: x%x x%x x%x x%x\n",
374 be32_to_cpu(fcpcmd->fcpDl), 528 phba->brd_no, vpi,
375 cmnd->resid, 529 be32_to_cpu(fcpcmd->fcpDl),
376 fcpi_parm, cmnd->cmnd[0]); 530 cmnd->resid, fcpi_parm, cmnd->cmnd[0]);
377 cmnd->resid = cmnd->request_bufflen; 531 cmnd->resid = cmnd->request_bufflen;
378 host_status = DID_ERROR; 532 host_status = DID_ERROR;
379 } 533 }
@@ -387,19 +541,20 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
387 (scsi_status == SAM_STAT_GOOD) && 541 (scsi_status == SAM_STAT_GOOD) &&
388 (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) { 542 (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) {
389 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 543 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
390 "%d:0717 FCP command x%x residual " 544 "%d (%d):0717 FCP command x%x residual "
391 "underrun converted to error " 545 "underrun converted to error "
392 "Data: x%x x%x x%x\n", phba->brd_no, 546 "Data: x%x x%x x%x\n",
393 cmnd->cmnd[0], cmnd->request_bufflen, 547 phba->brd_no, vpi, cmnd->cmnd[0],
394 cmnd->resid, cmnd->underflow); 548 cmnd->request_bufflen, cmnd->resid,
549 cmnd->underflow);
395 550
396 host_status = DID_ERROR; 551 host_status = DID_ERROR;
397 } 552 }
398 } else if (resp_info & RESID_OVER) { 553 } else if (resp_info & RESID_OVER) {
399 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 554 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
400 "%d:0720 FCP command x%x residual " 555 "%d (%d):0720 FCP command x%x residual "
401 "overrun error. Data: x%x x%x \n", 556 "overrun error. Data: x%x x%x \n",
402 phba->brd_no, cmnd->cmnd[0], 557 phba->brd_no, vpi, cmnd->cmnd[0],
403 cmnd->request_bufflen, cmnd->resid); 558 cmnd->request_bufflen, cmnd->resid);
404 host_status = DID_ERROR; 559 host_status = DID_ERROR;
405 560
@@ -410,11 +565,12 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
410 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 565 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
411 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 566 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
412 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 567 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
413 "%d:0734 FCP Read Check Error Data: " 568 "%d (%d):0734 FCP Read Check Error Data: "
414 "x%x x%x x%x x%x\n", phba->brd_no, 569 "x%x x%x x%x x%x\n",
415 be32_to_cpu(fcpcmd->fcpDl), 570 phba->brd_no, vpi,
416 be32_to_cpu(fcprsp->rspResId), 571 be32_to_cpu(fcpcmd->fcpDl),
417 fcpi_parm, cmnd->cmnd[0]); 572 be32_to_cpu(fcprsp->rspResId),
573 fcpi_parm, cmnd->cmnd[0]);
418 host_status = DID_ERROR; 574 host_status = DID_ERROR;
419 cmnd->resid = cmnd->request_bufflen; 575 cmnd->resid = cmnd->request_bufflen;
420 } 576 }
@@ -433,6 +589,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
433 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 589 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
434 struct lpfc_nodelist *pnode = rdata->pnode; 590 struct lpfc_nodelist *pnode = rdata->pnode;
435 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 591 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
592 uint32_t vpi = (lpfc_cmd->cur_iocbq.vport
593 ? lpfc_cmd->cur_iocbq.vport->vpi
594 : 0);
436 int result; 595 int result;
437 struct scsi_device *sdev, *tmp_sdev; 596 struct scsi_device *sdev, *tmp_sdev;
438 int depth = 0; 597 int depth = 0;
@@ -448,11 +607,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
448 lpfc_cmd->status = IOSTAT_DEFAULT; 607 lpfc_cmd->status = IOSTAT_DEFAULT;
449 608
450 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 609 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
451 "%d:0729 FCP cmd x%x failed <%d/%d> status: " 610 "%d (%d):0729 FCP cmd x%x failed <%d/%d> "
452 "x%x result: x%x Data: x%x x%x\n", 611 "status: x%x result: x%x Data: x%x x%x\n",
453 phba->brd_no, cmd->cmnd[0], cmd->device->id, 612 phba->brd_no, vpi, cmd->cmnd[0],
454 cmd->device->lun, lpfc_cmd->status, 613 cmd->device ? cmd->device->id : 0xffff,
455 lpfc_cmd->result, pIocbOut->iocb.ulpContext, 614 cmd->device ? cmd->device->lun : 0xffff,
615 lpfc_cmd->status, lpfc_cmd->result,
616 pIocbOut->iocb.ulpContext,
456 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 617 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
457 618
458 switch (lpfc_cmd->status) { 619 switch (lpfc_cmd->status) {
@@ -464,6 +625,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
464 case IOSTAT_FABRIC_BSY: 625 case IOSTAT_FABRIC_BSY:
465 cmd->result = ScsiResult(DID_BUS_BUSY, 0); 626 cmd->result = ScsiResult(DID_BUS_BUSY, 0);
466 break; 627 break;
628 case IOSTAT_LOCAL_REJECT:
629 if (lpfc_cmd->result == RJT_UNAVAIL_PERM ||
630 lpfc_cmd->result == IOERR_NO_RESOURCES ||
631 lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
632 cmd->result = ScsiResult(DID_REQUEUE, 0);
633 break;
634 } /* else: fall through */
467 default: 635 default:
468 cmd->result = ScsiResult(DID_ERROR, 0); 636 cmd->result = ScsiResult(DID_ERROR, 0);
469 break; 637 break;
@@ -480,9 +648,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
480 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 648 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
481 649
482 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 650 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
483 "%d:0710 Iodone <%d/%d> cmd %p, error x%x " 651 "%d (%d):0710 Iodone <%d/%d> cmd %p, error "
484 "SNS x%x x%x Data: x%x x%x\n", 652 "x%x SNS x%x x%x Data: x%x x%x\n",
485 phba->brd_no, cmd->device->id, 653 phba->brd_no, vpi, cmd->device->id,
486 cmd->device->lun, cmd, cmd->result, 654 cmd->device->lun, cmd, cmd->result,
487 *lp, *(lp + 3), cmd->retries, cmd->resid); 655 *lp, *(lp + 3), cmd->retries, cmd->resid);
488 } 656 }
@@ -497,6 +665,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
497 return; 665 return;
498 } 666 }
499 667
668
669 if (!result)
670 lpfc_rampup_queue_depth(phba, sdev);
671
500 if (!result && pnode != NULL && 672 if (!result && pnode != NULL &&
501 ((jiffies - pnode->last_ramp_up_time) > 673 ((jiffies - pnode->last_ramp_up_time) >
502 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 674 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
@@ -545,8 +717,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
545 717
546 if (depth) { 718 if (depth) {
547 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 719 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
548 "%d:0711 detected queue full - lun queue depth " 720 "%d (%d):0711 detected queue full - "
549 " adjusted to %d.\n", phba->brd_no, depth); 721 "lun queue depth adjusted to %d.\n",
722 phba->brd_no, vpi, depth);
550 } 723 }
551 } 724 }
552 725
@@ -733,10 +906,10 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
733 906
734 /* Issue Target Reset to TGT <num> */ 907 /* Issue Target Reset to TGT <num> */
735 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 908 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
736 "%d:0702 Issue Target Reset to TGT %d " 909 "%d (%d):0702 Issue Target Reset to TGT %d "
737 "Data: x%x x%x\n", 910 "Data: x%x x%x\n",
738 phba->brd_no, tgt_id, rdata->pnode->nlp_rpi, 911 phba->brd_no, vport->vpi, tgt_id,
739 rdata->pnode->nlp_flag); 912 rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
740 913
741 ret = lpfc_sli_issue_iocb_wait(phba, 914 ret = lpfc_sli_issue_iocb_wait(phba,
742 &phba->sli.ring[phba->sli.fcp_ring], 915 &phba->sli.ring[phba->sli.fcp_ring],
@@ -842,9 +1015,12 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
842 } 1015 }
843 lpfc_cmd = lpfc_get_scsi_buf(phba); 1016 lpfc_cmd = lpfc_get_scsi_buf(phba);
844 if (lpfc_cmd == NULL) { 1017 if (lpfc_cmd == NULL) {
1018 lpfc_adjust_queue_depth(phba);
1019
845 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 1020 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
846 "%d:0707 driver's buffer pool is empty, " 1021 "%d (%d):0707 driver's buffer pool is empty, "
847 "IO busied\n", phba->brd_no); 1022 "IO busied\n",
1023 phba->brd_no, vport->vpi);
848 goto out_host_busy; 1024 goto out_host_busy;
849 } 1025 }
850 1026
@@ -865,7 +1041,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
865 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 1041 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
866 1042
867 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 1043 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
868 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 1044 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
869 if (err) 1045 if (err)
870 goto out_host_busy_free_buf; 1046 goto out_host_busy_free_buf;
871 1047
@@ -986,18 +1162,19 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
986 if (lpfc_cmd->pCmd == cmnd) { 1162 if (lpfc_cmd->pCmd == cmnd) {
987 ret = FAILED; 1163 ret = FAILED;
988 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1164 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
989 "%d:0748 abort handler timed out waiting for " 1165 "%d (%d):0748 abort handler timed out waiting "
990 "abort to complete: ret %#x, ID %d, LUN %d, " 1166 "for abort to complete: ret %#x, ID %d, "
991 "snum %#lx\n", 1167 "LUN %d, snum %#lx\n",
992 phba->brd_no, ret, cmnd->device->id, 1168 phba->brd_no, vport->vpi, ret,
993 cmnd->device->lun, cmnd->serial_number); 1169 cmnd->device->id, cmnd->device->lun,
1170 cmnd->serial_number);
994 } 1171 }
995 1172
996 out: 1173 out:
997 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1174 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
998 "%d:0749 SCSI Layer I/O Abort Request " 1175 "%d (%d):0749 SCSI Layer I/O Abort Request "
999 "Status x%x ID %d LUN %d snum %#lx\n", 1176 "Status x%x ID %d LUN %d snum %#lx\n",
1000 phba->brd_no, ret, cmnd->device->id, 1177 phba->brd_no, vport->vpi, ret, cmnd->device->id,
1001 cmnd->device->lun, cmnd->serial_number); 1178 cmnd->device->lun, cmnd->serial_number);
1002 1179
1003 return ret; 1180 return ret;
@@ -1024,7 +1201,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1024 * If target is not in a MAPPED state, delay the reset until 1201 * If target is not in a MAPPED state, delay the reset until
1025 * target is rediscovered or devloss timeout expires. 1202 * target is rediscovered or devloss timeout expires.
1026 */ 1203 */
1027 while ( 1 ) { 1204 while (1) {
1028 if (!pnode) 1205 if (!pnode)
1029 goto out; 1206 goto out;
1030 1207
@@ -1035,9 +1212,10 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1035 if (!rdata || 1212 if (!rdata ||
1036 (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) { 1213 (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) {
1037 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1214 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1038 "%d:0721 LUN Reset rport failure:" 1215 "%d (%d):0721 LUN Reset rport "
1039 " cnt x%x rdata x%p\n", 1216 "failure: cnt x%x rdata x%p\n",
1040 phba->brd_no, loopcnt, rdata); 1217 phba->brd_no, vport->vpi,
1218 loopcnt, rdata);
1041 goto out; 1219 goto out;
1042 } 1220 }
1043 pnode = rdata->pnode; 1221 pnode = rdata->pnode;
@@ -1068,8 +1246,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1068 goto out_free_scsi_buf; 1246 goto out_free_scsi_buf;
1069 1247
1070 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 1248 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1071 "%d:0703 Issue target reset to TGT %d LUN %d rpi x%x " 1249 "%d (%d):0703 Issue target reset to TGT %d LUN %d "
1072 "nlp_flag x%x\n", phba->brd_no, cmnd->device->id, 1250 "rpi x%x nlp_flag x%x\n",
1251 phba->brd_no, vport->vpi, cmnd->device->id,
1073 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 1252 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1074 1253
1075 iocb_status = lpfc_sli_issue_iocb_wait(phba, 1254 iocb_status = lpfc_sli_issue_iocb_wait(phba,
@@ -1103,7 +1282,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1103 cmnd->device->id, cmnd->device->lun, 1282 cmnd->device->id, cmnd->device->lun,
1104 0, LPFC_CTX_LUN); 1283 0, LPFC_CTX_LUN);
1105 loopcnt = 0; 1284 loopcnt = 0;
1106 while (cnt) { 1285 while(cnt) {
1107 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1286 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
1108 1287
1109 if (++loopcnt 1288 if (++loopcnt
@@ -1118,8 +1297,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1118 1297
1119 if (cnt) { 1298 if (cnt) {
1120 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1299 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1121 "%d:0719 device reset I/O flush failure: cnt x%x\n", 1300 "%d (%d):0719 device reset I/O flush failure: "
1122 phba->brd_no, cnt); 1301 "cnt x%x\n",
1302 phba->brd_no, vport->vpi, cnt);
1123 ret = FAILED; 1303 ret = FAILED;
1124 } 1304 }
1125 1305
@@ -1128,10 +1308,10 @@ out_free_scsi_buf:
1128 lpfc_release_scsi_buf(phba, lpfc_cmd); 1308 lpfc_release_scsi_buf(phba, lpfc_cmd);
1129 } 1309 }
1130 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1310 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1131 "%d:0713 SCSI layer issued device reset (%d, %d) " 1311 "%d (%d):0713 SCSI layer issued device reset (%d, %d) "
1132 "return x%x status x%x result x%x\n", 1312 "return x%x status x%x result x%x\n",
1133 phba->brd_no, cmnd->device->id, cmnd->device->lun, 1313 phba->brd_no, vport->vpi, cmnd->device->id,
1134 ret, cmd_status, cmd_result); 1314 cmnd->device->lun, ret, cmd_status, cmd_result);
1135 1315
1136out: 1316out:
1137 return ret; 1317 return ret;
@@ -1184,8 +1364,9 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1184 ndlp->rport->dd_data); 1364 ndlp->rport->dd_data);
1185 if (ret != SUCCESS) { 1365 if (ret != SUCCESS) {
1186 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1366 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1187 "%d:0700 Bus Reset on target %d failed\n", 1367 "%d (%d):0700 Bus Reset on target %d "
1188 phba->brd_no, i); 1368 "failed\n",
1369 phba->brd_no, vport->vpi, i);
1189 err_count++; 1370 err_count++;
1190 break; 1371 break;
1191 } 1372 }
@@ -1210,7 +1391,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1210 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1391 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
1211 0, 0, 0, LPFC_CTX_HOST); 1392 0, 0, 0, LPFC_CTX_HOST);
1212 loopcnt = 0; 1393 loopcnt = 0;
1213 while (cnt) { 1394 while(cnt) {
1214 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1395 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
1215 1396
1216 if (++loopcnt 1397 if (++loopcnt
@@ -1224,16 +1405,15 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1224 1405
1225 if (cnt) { 1406 if (cnt) {
1226 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1407 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1227 "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n", 1408 "%d (%d):0715 Bus Reset I/O flush failure: "
1228 phba->brd_no, cnt, i); 1409 "cnt x%x left x%x\n",
1410 phba->brd_no, vport->vpi, cnt, i);
1229 ret = FAILED; 1411 ret = FAILED;
1230 } 1412 }
1231 1413
1232 lpfc_printf_log(phba, 1414 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1233 KERN_ERR, 1415 "%d (%d):0714 SCSI layer issued Bus Reset Data: x%x\n",
1234 LOG_FCP, 1416 phba->brd_no, vport->vpi, ret);
1235 "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
1236 phba->brd_no, ret);
1237out: 1417out:
1238 return ret; 1418 return ret;
1239} 1419}
@@ -1263,17 +1443,24 @@ lpfc_slave_alloc(struct scsi_device *sdev)
1263 */ 1443 */
1264 total = phba->total_scsi_bufs; 1444 total = phba->total_scsi_bufs;
1265 num_to_alloc = phba->cfg_lun_queue_depth + 2; 1445 num_to_alloc = phba->cfg_lun_queue_depth + 2;
1266 if (total >= phba->cfg_hba_queue_depth) { 1446
1447 /* Allow some exchanges to be available always to complete discovery */
1448 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1267 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1449 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1268 "%d:0704 At limitation of %d preallocated " 1450 "%d (%d):0704 At limitation of %d "
1269 "command buffers\n", phba->brd_no, total); 1451 "preallocated command buffers\n",
1452 phba->brd_no, vport->vpi, total);
1270 return 0; 1453 return 0;
1271 } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) { 1454
1455 /* Allow some exchanges to be available always to complete discovery */
1456 } else if (total + num_to_alloc >
1457 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1272 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1458 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1273 "%d:0705 Allocation request of %d command " 1459 "%d (%d):0705 Allocation request of %d "
1274 "buffers will exceed max of %d. Reducing " 1460 "command buffers will exceed max of %d. "
1275 "allocation request to %d.\n", phba->brd_no, 1461 "Reducing allocation request to %d.\n",
1276 num_to_alloc, phba->cfg_hba_queue_depth, 1462 phba->brd_no, vport->vpi, num_to_alloc,
1463 phba->cfg_hba_queue_depth,
1277 (phba->cfg_hba_queue_depth - total)); 1464 (phba->cfg_hba_queue_depth - total));
1278 num_to_alloc = phba->cfg_hba_queue_depth - total; 1465 num_to_alloc = phba->cfg_hba_queue_depth - total;
1279 } 1466 }
@@ -1282,8 +1469,9 @@ lpfc_slave_alloc(struct scsi_device *sdev)
1282 scsi_buf = lpfc_new_scsi_buf(vport); 1469 scsi_buf = lpfc_new_scsi_buf(vport);
1283 if (!scsi_buf) { 1470 if (!scsi_buf) {
1284 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1471 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1285 "%d:0706 Failed to allocate command " 1472 "%d (%d):0706 Failed to allocate "
1286 "buffer\n", phba->brd_no); 1473 "command buffer\n",
1474 phba->brd_no, vport->vpi);
1287 break; 1475 break;
1288 } 1476 }
1289 1477
@@ -1331,6 +1519,7 @@ lpfc_slave_destroy(struct scsi_device *sdev)
1331 return; 1519 return;
1332} 1520}
1333 1521
1522
1334struct scsi_host_template lpfc_template = { 1523struct scsi_host_template lpfc_template = {
1335 .module = THIS_MODULE, 1524 .module = THIS_MODULE,
1336 .name = LPFC_DRIVER_NAME, 1525 .name = LPFC_DRIVER_NAME,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 6e0b42bcebe7..a2927dc3161f 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -44,14 +44,15 @@
44 * This allows multiple uses of lpfc_msgBlk0311 44 * This allows multiple uses of lpfc_msgBlk0311
45 * w/o perturbing log msg utility. 45 * w/o perturbing log msg utility.
46 */ 46 */
47#define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \ 47#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
48 lpfc_printf_log(phba, \ 48 lpfc_printf_log(phba, \
49 KERN_INFO, \ 49 KERN_INFO, \
50 LOG_MBOX | LOG_SLI, \ 50 LOG_MBOX | LOG_SLI, \
51 "%d:0311 Mailbox command x%x cannot issue " \ 51 "%d (%d):0311 Mailbox command x%x cannot " \
52 "Data: x%x x%x x%x\n", \ 52 "issue Data: x%x x%x x%x\n", \
53 phba->brd_no, \ 53 phba->brd_no, \
54 mb->mbxCommand, \ 54 pmbox->vport ? pmbox->vport->vpi : 0, \
55 pmbox->mb.mbxCommand, \
55 phba->pport->port_state, \ 56 phba->pport->port_state, \
56 psli->sli_flag, \ 57 psli->sli_flag, \
57 flag) 58 flag)
@@ -65,11 +66,10 @@ typedef enum _lpfc_iocb_type {
65 LPFC_ABORT_IOCB 66 LPFC_ABORT_IOCB
66} lpfc_iocb_type; 67} lpfc_iocb_type;
67 68
68/* 69 /* SLI-2/SLI-3 provide different sized iocbs. Given a pointer
69 * SLI-2/SLI-3 provide different sized iocbs. Given a pointer to the start of 70 * to the start of the ring, and the slot number of the
70 * the ring, and the slot number of the desired iocb entry, calc a pointer to 71 * desired iocb entry, calc a pointer to that entry.
71 * that entry. 72 */
72 */
73static inline IOCB_t * 73static inline IOCB_t *
74lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 74lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
75{ 75{
@@ -229,13 +229,11 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
229 lpfc_config_ring(phba, i, pmb); 229 lpfc_config_ring(phba, i, pmb);
230 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 230 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
231 if (rc != MBX_SUCCESS) { 231 if (rc != MBX_SUCCESS) {
232 lpfc_printf_log(phba, 232 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
233 KERN_ERR, 233 "%d:0446 Adapter failed to init (%d), "
234 LOG_INIT,
235 "%d:0446 Adapter failed to init, "
236 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 234 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
237 "ring %d\n", 235 "ring %d\n",
238 phba->brd_no, 236 phba->brd_no, rc,
239 pmbox->mbxCommand, 237 pmbox->mbxCommand,
240 pmbox->mbxStatus, 238 pmbox->mbxStatus,
241 i); 239 i);
@@ -254,9 +252,16 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
254{ 252{
255 list_add_tail(&piocb->list, &pring->txcmplq); 253 list_add_tail(&piocb->list, &pring->txcmplq);
256 pring->txcmplq_cnt++; 254 pring->txcmplq_cnt++;
257 if (unlikely(pring->ringno == LPFC_ELS_RING)) 255 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
258 mod_timer(&piocb->vport->els_tmofunc, 256 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
259 jiffies + HZ * (phba->fc_ratov << 1)); 257 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
258 if (!piocb->vport)
259 BUG();
260 else
261 mod_timer(&piocb->vport->els_tmofunc,
262 jiffies + HZ * (phba->fc_ratov << 1));
263 }
264
260 265
261 return 0; 266 return 0;
262} 267}
@@ -311,8 +316,10 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
311 */ 316 */
312 phba->work_ha |= HA_ERATT; 317 phba->work_ha |= HA_ERATT;
313 phba->work_hs = HS_FFER3; 318 phba->work_hs = HS_FFER3;
319
320 /* hbalock should already be held */
314 if (phba->work_wait) 321 if (phba->work_wait)
315 wake_up(phba->work_wait); 322 lpfc_worker_wake_up(phba);
316 323
317 return NULL; 324 return NULL;
318 } 325 }
@@ -399,7 +406,7 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
399 /* 406 /*
400 * Issue iocb command to adapter 407 * Issue iocb command to adapter
401 */ 408 */
402 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t)); 409 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
403 wmb(); 410 wmb();
404 pring->stats.iocb_cmd++; 411 pring->stats.iocb_cmd++;
405 412
@@ -520,14 +527,14 @@ lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
520 hbqp->next_hbqPutIdx = 0; 527 hbqp->next_hbqPutIdx = 0;
521 528
522 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 529 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
523 uint32_t raw_index = readl(&phba->hbq_get[hbqno]); 530 uint32_t raw_index = phba->hbq_get[hbqno];
524 uint32_t getidx = le32_to_cpu(raw_index); 531 uint32_t getidx = le32_to_cpu(raw_index);
525 532
526 hbqp->local_hbqGetIdx = getidx; 533 hbqp->local_hbqGetIdx = getidx;
527 534
528 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 535 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
529 lpfc_printf_log(phba, KERN_ERR, 536 lpfc_printf_log(phba, KERN_ERR,
530 LOG_SLI, 537 LOG_SLI | LOG_VPORT,
531 "%d:1802 HBQ %d: local_hbqGetIdx " 538 "%d:1802 HBQ %d: local_hbqGetIdx "
532 "%u is > than hbqp->entry_count %u\n", 539 "%u is > than hbqp->entry_count %u\n",
533 phba->brd_no, hbqno, 540 phba->brd_no, hbqno,
@@ -548,117 +555,121 @@ lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
548void 555void
549lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 556lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
550{ 557{
551 uint32_t i; 558 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
559 struct hbq_dmabuf *hbq_buf;
552 560
553 if (!phba->hbq_buffer_pool)
554 return;
555 /* Return all memory used by all HBQs */ 561 /* Return all memory used by all HBQs */
556 for (i = 0; i < phba->hbq_buffer_count; i++) { 562 list_for_each_entry_safe(dmabuf, next_dmabuf,
557 lpfc_hbq_free(phba, phba->hbq_buffer_pool[i].dbuf.virt, 563 &phba->hbq_buffer_list, list) {
558 phba->hbq_buffer_pool[i].dbuf.phys); 564 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
565 list_del(&hbq_buf->dbuf.list);
566 lpfc_hbq_free(phba, hbq_buf->dbuf.virt, hbq_buf->dbuf.phys);
567 kfree(hbq_buf);
559 } 568 }
560 kfree(phba->hbq_buffer_pool);
561 phba->hbq_buffer_pool = NULL;
562} 569}
563 570
564static void 571static void
565lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 572lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
566 struct hbq_dmabuf *hbq_buf_desc) 573 struct hbq_dmabuf *hbq_buf)
567{ 574{
568 struct lpfc_hbq_entry *hbqe; 575 struct lpfc_hbq_entry *hbqe;
576 dma_addr_t physaddr = hbq_buf->dbuf.phys;
569 577
570 /* Get next HBQ entry slot to use */ 578 /* Get next HBQ entry slot to use */
571 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 579 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
572 if (hbqe) { 580 if (hbqe) {
573 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 581 struct hbq_s *hbqp = &phba->hbqs[hbqno];
574 582
575 hbqe->bde.addrHigh = putPaddrHigh(hbq_buf_desc->dbuf.phys); 583 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
576 hbqe->bde.addrLow = putPaddrLow(hbq_buf_desc->dbuf.phys); 584 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
577 hbqe->bde.tus.f.bdeSize = FCELSSIZE; 585 hbqe->bde.tus.f.bdeSize = FCELSSIZE;
578 hbqe->bde.tus.f.bdeFlags = 0; 586 hbqe->bde.tus.f.bdeFlags = 0;
579 hbqe->buffer_tag = hbq_buf_desc->tag; 587 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
580 /* Sync SLIM */ 588 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
589 /* Sync SLIM */
581 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 590 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
582 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 591 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
583 /* flush */ 592 /* flush */
584 readl(phba->hbq_put + hbqno); 593 readl(phba->hbq_put + hbqno);
585 phba->hbq_buff_count++; 594 list_add_tail(&hbq_buf->dbuf.list, &phba->hbq_buffer_list);
586 } 595 }
587} 596}
588 597
589static void 598static struct lpfc_hbq_init lpfc_els_hbq = {
590lpfc_sli_fill_hbq(struct lpfc_hba *phba, uint32_t hbqno, uint32_t buffer_index) 599 .rn = 1,
591{ 600 .entry_count = 200,
592 struct hbq_dmabuf *hbq_buf_desc; 601 .mask_count = 0,
593 uint32_t i; 602 .profile = 0,
603 .ring_mask = 1 << LPFC_ELS_RING,
604 .buffer_count = 0,
605 .init_count = 20,
606 .add_count = 5,
607};
594 608
595 for (i = 0; i < phba->hbqs[hbqno].entry_count; i++) { 609static struct lpfc_hbq_init *lpfc_hbq_defs[] = {
596 /* Search hbqbufq, from the begining, 610 &lpfc_els_hbq,
597 * looking for an unused entry 611};
598 */
599 phba->hbq_buffer_pool[buffer_index + i].tag |= hbqno << 16;
600 hbq_buf_desc = phba->hbq_buffer_pool + buffer_index + i;
601 lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf_desc);
602 }
603}
604 612
605int 613int
606lpfc_sli_hbqbuf_fill_hbq(struct lpfc_hba *phba) 614lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
607{ 615{
608 return 0; 616 uint32_t i, start, end;
609} 617 struct hbq_dmabuf *hbq_buffer;
610 618
611static int 619 start = lpfc_hbq_defs[hbqno]->buffer_count;
612lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba) 620 end = count + lpfc_hbq_defs[hbqno]->buffer_count;
613{ 621 if (end > lpfc_hbq_defs[hbqno]->entry_count) {
614 uint32_t buffer_index = 0; 622 end = lpfc_hbq_defs[hbqno]->entry_count;
615 uint32_t hbqno; 623 }
616 624
617 /* Populate HBQ entries */ 625 /* Populate HBQ entries */
618 for (hbqno = 0; hbqno < phba->hbq_count; ++hbqno) { 626 for (i = start; i < end; i++) {
619 /* Find ring associated with HBQ */ 627 hbq_buffer = kmalloc(sizeof(struct hbq_dmabuf),
620 628 GFP_KERNEL);
621 lpfc_sli_fill_hbq(phba, hbqno, buffer_index); 629 if (!hbq_buffer)
622 buffer_index += phba->hbqs[hbqno].entry_count; 630 return 1;
631 hbq_buffer->dbuf.virt = lpfc_hbq_alloc(phba, MEM_PRI,
632 &hbq_buffer->dbuf.phys);
633 if (hbq_buffer->dbuf.virt == NULL)
634 return 1;
635 hbq_buffer->tag = (i | (hbqno << 16));
636 lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer);
637 lpfc_hbq_defs[hbqno]->buffer_count++;
623 } 638 }
624 return 0; 639 return 0;
625} 640}
626 641
627struct hbq_dmabuf * 642int
628lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 643lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
629{ 644{
630 if ((tag & 0xffff) < phba->hbq_buffer_count) 645 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
631 return phba->hbq_buffer_pool + (tag & 0xffff); 646 lpfc_hbq_defs[qno]->add_count));
647}
632 648
633 lpfc_printf_log(phba, KERN_ERR, 649int
634 LOG_SLI, 650lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
635 "%d:1803 Bad hbq tag. Data: x%x x%x\n", 651{
636 phba->brd_no, tag, 652 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
637 phba->hbq_buffer_count); 653 lpfc_hbq_defs[qno]->init_count));
638 return NULL;
639} 654}
640 655
641void 656struct hbq_dmabuf *
642lpfc_sli_hbqbuf_free(struct lpfc_hba *phba, void *virt, dma_addr_t phys) 657lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
643{ 658{
644 uint32_t i, hbqno; 659 struct lpfc_dmabuf *d_buf;
660 struct hbq_dmabuf *hbq_buf;
645 661
646 for (i = 0; i < phba->hbq_buffer_count; i++) { 662 list_for_each_entry(d_buf, &phba->hbq_buffer_list, list) {
647 /* Search hbqbufq, from the begining, looking for a match on 663 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
648 phys */ 664 if ((hbq_buf->tag & 0xffff) == tag) {
649 if (phba->hbq_buffer_pool[i].dbuf.phys == phys) { 665 return hbq_buf;
650 hbqno = phba->hbq_buffer_pool[i].tag >> 16;
651 lpfc_sli_hbq_to_firmware(phba, hbqno,
652 phba->hbq_buffer_pool + i);
653 return;
654 } 666 }
655 } 667 }
656 668 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
657 lpfc_printf_log(phba, KERN_ERR, 669 "%d:1803 Bad hbq tag. Data: x%x x%x\n",
658 LOG_SLI, 670 phba->brd_no, tag,
659 "%d:1804 Cannot find virtual addr for " 671 lpfc_hbq_defs[tag >> 16]->buffer_count);
660 "mapped buf. Data x%llx\n", 672 return NULL;
661 phba->brd_no, (unsigned long long) phys);
662} 673}
663 674
664void 675void
@@ -723,6 +734,8 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
723 case MBX_FLASH_WR_ULA: 734 case MBX_FLASH_WR_ULA:
724 case MBX_SET_DEBUG: 735 case MBX_SET_DEBUG:
725 case MBX_LOAD_EXP_ROM: 736 case MBX_LOAD_EXP_ROM:
737 case MBX_REG_VPI:
738 case MBX_UNREG_VPI:
726 ret = mbxCommand; 739 ret = mbxCommand;
727 break; 740 break;
728 default: 741 default:
@@ -770,8 +783,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
770 !pmb->mb.mbxStatus) { 783 !pmb->mb.mbxStatus) {
771 784
772 rpi = pmb->mb.un.varWords[0]; 785 rpi = pmb->mb.un.varWords[0];
773 lpfc_unreg_login(phba, rpi, pmb); 786 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb);
774 pmb->mbox_cmpl=lpfc_sli_def_mbox_cmpl; 787 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
775 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 788 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
776 if (rc != MBX_NOT_FINISHED) 789 if (rc != MBX_NOT_FINISHED)
777 return; 790 return;
@@ -784,60 +797,25 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
784int 797int
785lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 798lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
786{ 799{
787 MAILBOX_t *mbox, *pmbox; 800 MAILBOX_t *pmbox;
788 LPFC_MBOXQ_t *pmb; 801 LPFC_MBOXQ_t *pmb;
789 int i, rc; 802 int rc;
790 uint32_t process_next; 803 LIST_HEAD(cmplq);
791 unsigned long iflags;
792
793 /* We should only get here if we are in SLI2 mode */
794 if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) {
795 return 1;
796 }
797 804
798 phba->sli.slistat.mbox_event++; 805 phba->sli.slistat.mbox_event++;
799 806
800 /* Get a Mailbox buffer to setup mailbox commands for callback */ 807 /* Get all completed mailboxe buffers into the cmplq */
801 if ((pmb = phba->sli.mbox_active)) { 808 spin_lock_irq(&phba->hbalock);
802 pmbox = &pmb->mb; 809 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
803 mbox = &phba->slim2p->mbx; 810 spin_unlock_irq(&phba->hbalock);
804
805 /* First check out the status word */
806 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t));
807
808 /* Sanity check to ensure the host owns the mailbox */
809 if (pmbox->mbxOwner != OWN_HOST) {
810 /* Lets try for a while */
811 for (i = 0; i < 10240; i++) {
812 /* First copy command data */
813 lpfc_sli_pcimem_bcopy(mbox, pmbox,
814 sizeof (uint32_t));
815 if (pmbox->mbxOwner == OWN_HOST)
816 goto mbout;
817 }
818 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
819 <status> */
820 lpfc_printf_log(phba,
821 KERN_WARNING,
822 LOG_MBOX | LOG_SLI,
823 "%d:0304 Stray Mailbox Interrupt "
824 "mbxCommand x%x mbxStatus x%x\n",
825 phba->brd_no,
826 pmbox->mbxCommand,
827 pmbox->mbxStatus);
828
829 spin_lock_irq(&phba->hbalock);
830 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
831 spin_unlock_irq(&phba->hbalock);
832 return 1;
833 }
834 811
835 mbout: 812 /* Get a Mailbox buffer to setup mailbox commands for callback */
836 del_timer_sync(&phba->sli.mbox_tmo); 813 do {
814 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
815 if (pmb == NULL)
816 break;
837 817
838 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 818 pmbox = &pmb->mb;
839 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
840 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
841 819
842 /* 820 /*
843 * It is a fatal error if unknown mbox command completion. 821 * It is a fatal error if unknown mbox command completion.
@@ -846,33 +824,33 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
846 MBX_SHUTDOWN) { 824 MBX_SHUTDOWN) {
847 825
848 /* Unknow mailbox command compl */ 826 /* Unknow mailbox command compl */
849 lpfc_printf_log(phba, 827 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
850 KERN_ERR, 828 "%d (%d):0323 Unknown Mailbox command "
851 LOG_MBOX | LOG_SLI, 829 "%x Cmpl\n",
852 "%d:0323 Unknown Mailbox command %x Cmpl\n", 830 phba->brd_no,
853 phba->brd_no, 831 pmb->vport ? pmb->vport->vpi : 0,
854 pmbox->mbxCommand); 832 pmbox->mbxCommand);
855 phba->link_state = LPFC_HBA_ERROR; 833 phba->link_state = LPFC_HBA_ERROR;
856 phba->work_hs = HS_FFER3; 834 phba->work_hs = HS_FFER3;
857 lpfc_handle_eratt(phba); 835 lpfc_handle_eratt(phba);
858 return 0; 836 continue;
859 } 837 }
860 838
861 phba->sli.mbox_active = NULL;
862 if (pmbox->mbxStatus) { 839 if (pmbox->mbxStatus) {
863 phba->sli.slistat.mbox_stat_err++; 840 phba->sli.slistat.mbox_stat_err++;
864 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 841 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
865 /* Mbox cmd cmpl error - RETRYing */ 842 /* Mbox cmd cmpl error - RETRYing */
866 lpfc_printf_log(phba, 843 lpfc_printf_log(phba, KERN_INFO,
867 KERN_INFO, 844 LOG_MBOX | LOG_SLI,
868 LOG_MBOX | LOG_SLI, 845 "%d (%d):0305 Mbox cmd cmpl "
869 "%d:0305 Mbox cmd cmpl error - " 846 "error - RETRYing Data: x%x "
870 "RETRYing Data: x%x x%x x%x x%x\n", 847 "x%x x%x x%x\n",
871 phba->brd_no, 848 phba->brd_no,
872 pmbox->mbxCommand, 849 pmb->vport ? pmb->vport->vpi :0,
873 pmbox->mbxStatus, 850 pmbox->mbxCommand,
874 pmbox->un.varWords[0], 851 pmbox->mbxStatus,
875 phba->pport->port_state); 852 pmbox->un.varWords[0],
853 pmb->vport->port_state);
876 pmbox->mbxStatus = 0; 854 pmbox->mbxStatus = 0;
877 pmbox->mbxOwner = OWN_HOST; 855 pmbox->mbxOwner = OWN_HOST;
878 spin_lock_irq(&phba->hbalock); 856 spin_lock_irq(&phba->hbalock);
@@ -880,17 +858,16 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
880 spin_unlock_irq(&phba->hbalock); 858 spin_unlock_irq(&phba->hbalock);
881 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 859 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
882 if (rc == MBX_SUCCESS) 860 if (rc == MBX_SUCCESS)
883 return 0; 861 continue;
884 } 862 }
885 } 863 }
886 864
887 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 865 /* Mailbox cmd <cmd> Cmpl <cmpl> */
888 lpfc_printf_log(phba, 866 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
889 KERN_INFO, 867 "%d (%d):0307 Mailbox cmd x%x Cmpl x%p "
890 LOG_MBOX | LOG_SLI,
891 "%d:0307 Mailbox cmd x%x Cmpl x%p "
892 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 868 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
893 phba->brd_no, 869 phba->brd_no,
870 pmb->vport ? pmb->vport->vpi : 0,
894 pmbox->mbxCommand, 871 pmbox->mbxCommand,
895 pmb->mbox_cmpl, 872 pmb->mbox_cmpl,
896 *((uint32_t *) pmbox), 873 *((uint32_t *) pmbox),
@@ -903,39 +880,35 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
903 pmbox->un.varWords[6], 880 pmbox->un.varWords[6],
904 pmbox->un.varWords[7]); 881 pmbox->un.varWords[7]);
905 882
906 if (pmb->mbox_cmpl) { 883 if (pmb->mbox_cmpl)
907 lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE);
908 pmb->mbox_cmpl(phba,pmb); 884 pmb->mbox_cmpl(phba,pmb);
909 } 885 } while (1);
910 } 886 return 0;
911 887}
912
913 do {
914 process_next = 0; /* by default don't loop */
915 spin_lock_irq(&phba->hbalock);
916 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
917
918 /* Process next mailbox command if there is one */
919 if ((pmb = lpfc_mbox_get(phba))) {
920 spin_unlock_irq(&phba->hbalock);
921 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
922 if (rc == MBX_NOT_FINISHED) {
923 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
924 pmb->mbox_cmpl(phba,pmb);
925 process_next = 1;
926 continue; /* loop back */
927 }
928 } else {
929 spin_unlock_irq(&phba->hbalock);
930 /* Turn on IOCB processing */
931 for (i = 0; i < phba->sli.num_rings; i++)
932 lpfc_sli_turn_on_ring(phba, i);
933 }
934 888
935 } while (process_next); 889static struct lpfc_dmabuf *
890lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
891{
892 struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
936 893
937 return 0; 894 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
895 if (hbq_entry == NULL)
896 return NULL;
897 list_del(&hbq_entry->dbuf.list);
898 new_hbq_entry = kmalloc(sizeof(struct hbq_dmabuf), GFP_ATOMIC);
899 if (new_hbq_entry == NULL)
900 return &hbq_entry->dbuf;
901 new_hbq_entry->dbuf = hbq_entry->dbuf;
902 new_hbq_entry->tag = -1;
903 hbq_entry->dbuf.virt = lpfc_hbq_alloc(phba, 0, &hbq_entry->dbuf.phys);
904 if (hbq_entry->dbuf.virt == NULL) {
905 kfree(new_hbq_entry);
906 return &hbq_entry->dbuf;
907 }
908 lpfc_sli_free_hbq(phba, hbq_entry);
909 return &new_hbq_entry->dbuf;
938} 910}
911
939static int 912static int
940lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 913lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
941 struct lpfc_iocbq *saveq) 914 struct lpfc_iocbq *saveq)
@@ -962,14 +935,24 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
962 935
963 /* Firmware Workaround */ 936 /* Firmware Workaround */
964 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 937 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
965 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 938 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
966 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 939 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
967 Rctl = FC_ELS_REQ; 940 Rctl = FC_ELS_REQ;
968 Type = FC_ELS_DATA; 941 Type = FC_ELS_DATA;
969 w5p->hcsw.Rctl = Rctl; 942 w5p->hcsw.Rctl = Rctl;
970 w5p->hcsw.Type = Type; 943 w5p->hcsw.Type = Type;
971 } 944 }
972 } 945 }
946
947 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
948 if (irsp->ulpBdeCount != 0)
949 saveq->context2 = lpfc_sli_replace_hbqbuff(phba,
950 irsp->un.ulpWord[3]);
951 if (irsp->ulpBdeCount == 2)
952 saveq->context3 = lpfc_sli_replace_hbqbuff(phba,
953 irsp->un.ulpWord[15]);
954 }
955
973 /* unSolicited Responses */ 956 /* unSolicited Responses */
974 if (pring->prt[0].profile) { 957 if (pring->prt[0].profile) {
975 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 958 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
@@ -997,17 +980,15 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
997 /* Unexpected Rctl / Type received */ 980 /* Unexpected Rctl / Type received */
998 /* Ring <ringno> handler: unexpected 981 /* Ring <ringno> handler: unexpected
999 Rctl <Rctl> Type <Type> received */ 982 Rctl <Rctl> Type <Type> received */
1000 lpfc_printf_log(phba, 983 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1001 KERN_WARNING,
1002 LOG_SLI,
1003 "%d:0313 Ring %d handler: unexpected Rctl x%x " 984 "%d:0313 Ring %d handler: unexpected Rctl x%x "
1004 "Type x%x received \n", 985 "Type x%x received\n",
1005 phba->brd_no, 986 phba->brd_no,
1006 pring->ringno, 987 pring->ringno,
1007 Rctl, 988 Rctl,
1008 Type); 989 Type);
1009 } 990 }
1010 return(1); 991 return 1;
1011} 992}
1012 993
1013static struct lpfc_iocbq * 994static struct lpfc_iocbq *
@@ -1022,7 +1003,7 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
1022 1003
1023 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 1004 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
1024 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 1005 cmd_iocb = phba->sli.iocbq_lookup[iotag];
1025 list_del(&cmd_iocb->list); 1006 list_del_init(&cmd_iocb->list);
1026 pring->txcmplq_cnt--; 1007 pring->txcmplq_cnt--;
1027 return cmd_iocb; 1008 return cmd_iocb;
1028 } 1009 }
@@ -1079,18 +1060,18 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1079 * Ring <ringno> handler: unexpected completion IoTag 1060 * Ring <ringno> handler: unexpected completion IoTag
1080 * <IoTag> 1061 * <IoTag>
1081 */ 1062 */
1082 lpfc_printf_log(phba, 1063 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1083 KERN_WARNING, 1064 "%d (%d):0322 Ring %d handler: "
1084 LOG_SLI, 1065 "unexpected completion IoTag x%x "
1085 "%d:0322 Ring %d handler: unexpected " 1066 "Data: x%x x%x x%x x%x\n",
1086 "completion IoTag x%x Data: x%x x%x x%x x%x\n", 1067 phba->brd_no,
1087 phba->brd_no, 1068 cmdiocbp->vport->vpi,
1088 pring->ringno, 1069 pring->ringno,
1089 saveq->iocb.ulpIoTag, 1070 saveq->iocb.ulpIoTag,
1090 saveq->iocb.ulpStatus, 1071 saveq->iocb.ulpStatus,
1091 saveq->iocb.un.ulpWord[4], 1072 saveq->iocb.un.ulpWord[4],
1092 saveq->iocb.ulpCommand, 1073 saveq->iocb.ulpCommand,
1093 saveq->iocb.ulpContext); 1074 saveq->iocb.ulpContext);
1094 } 1075 }
1095 } 1076 }
1096 1077
@@ -1103,7 +1084,6 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1103 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? 1084 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1104 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : 1085 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1105 &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1086 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1106
1107 /* 1087 /*
1108 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 1088 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1109 * rsp ring <portRspMax> 1089 * rsp ring <portRspMax>
@@ -1123,8 +1103,10 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1123 */ 1103 */
1124 phba->work_ha |= HA_ERATT; 1104 phba->work_ha |= HA_ERATT;
1125 phba->work_hs = HS_FFER3; 1105 phba->work_hs = HS_FFER3;
1106
1107 /* hbalock should already be held */
1126 if (phba->work_wait) 1108 if (phba->work_wait)
1127 wake_up(phba->work_wait); 1109 lpfc_worker_wake_up(phba);
1128 1110
1129 return; 1111 return;
1130} 1112}
@@ -1171,7 +1153,7 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
1171 1153
1172 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1154 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1173 (uint32_t *) &rspiocbq.iocb, 1155 (uint32_t *) &rspiocbq.iocb,
1174 sizeof(IOCB_t)); 1156 phba->iocb_rsp_size);
1175 irsp = &rspiocbq.iocb; 1157 irsp = &rspiocbq.iocb;
1176 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 1158 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1177 pring->stats.iocb_rsp++; 1159 pring->stats.iocb_rsp++;
@@ -1342,16 +1324,30 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1342 rsp_cmpl++; 1324 rsp_cmpl++;
1343 1325
1344 if (unlikely(irsp->ulpStatus)) { 1326 if (unlikely(irsp->ulpStatus)) {
1327 /*
1328 * If resource errors reported from HBA, reduce
1329 * queuedepths of the SCSI device.
1330 */
1331 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1332 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1333 spin_unlock_irqrestore(&phba->hbalock, iflag);
1334 lpfc_adjust_queue_depth(phba);
1335 spin_lock_irqsave(&phba->hbalock, iflag);
1336 }
1337
1345 /* Rsp ring <ringno> error: IOCB */ 1338 /* Rsp ring <ringno> error: IOCB */
1346 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1339 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1347 "%d:0336 Rsp Ring %d error: IOCB Data: " 1340 "%d:0336 Rsp Ring %d error: IOCB Data: "
1348 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1341 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1349 phba->brd_no, pring->ringno, 1342 phba->brd_no, pring->ringno,
1350 irsp->un.ulpWord[0], irsp->un.ulpWord[1], 1343 irsp->un.ulpWord[0],
1351 irsp->un.ulpWord[2], irsp->un.ulpWord[3], 1344 irsp->un.ulpWord[1],
1352 irsp->un.ulpWord[4], irsp->un.ulpWord[5], 1345 irsp->un.ulpWord[2],
1353 *(((uint32_t *) irsp) + 6), 1346 irsp->un.ulpWord[3],
1354 *(((uint32_t *) irsp) + 7)); 1347 irsp->un.ulpWord[4],
1348 irsp->un.ulpWord[5],
1349 *(((uint32_t *) irsp) + 6),
1350 *(((uint32_t *) irsp) + 7));
1355 } 1351 }
1356 1352
1357 switch (type) { 1353 switch (type) {
@@ -1365,7 +1361,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1365 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1361 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1366 "%d:0333 IOCB cmd 0x%x" 1362 "%d:0333 IOCB cmd 0x%x"
1367 " processed. Skipping" 1363 " processed. Skipping"
1368 " completion\n", phba->brd_no, 1364 " completion\n",
1365 phba->brd_no,
1369 irsp->ulpCommand); 1366 irsp->ulpCommand);
1370 break; 1367 break;
1371 } 1368 }
@@ -1402,11 +1399,13 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1402 } else { 1399 } else {
1403 /* Unknown IOCB command */ 1400 /* Unknown IOCB command */
1404 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1401 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1405 "%d:0334 Unknown IOCB command " 1402 "%d:0334 Unknown IOCB command "
1406 "Data: x%x, x%x x%x x%x x%x\n", 1403 "Data: x%x, x%x x%x x%x x%x\n",
1407 phba->brd_no, type, irsp->ulpCommand, 1404 phba->brd_no, type,
1408 irsp->ulpStatus, irsp->ulpIoTag, 1405 irsp->ulpCommand,
1409 irsp->ulpContext); 1406 irsp->ulpStatus,
1407 irsp->ulpIoTag,
1408 irsp->ulpContext);
1410 } 1409 }
1411 break; 1410 break;
1412 } 1411 }
@@ -1446,7 +1445,6 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1446 return rc; 1445 return rc;
1447} 1446}
1448 1447
1449
1450int 1448int
1451lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 1449lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1452 struct lpfc_sli_ring *pring, uint32_t mask) 1450 struct lpfc_sli_ring *pring, uint32_t mask)
@@ -1484,8 +1482,8 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1484 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1482 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1485 "%d:0303 Ring %d handler: portRspPut %d " 1483 "%d:0303 Ring %d handler: portRspPut %d "
1486 "is bigger then rsp ring %d\n", 1484 "is bigger then rsp ring %d\n",
1487 phba->brd_no, 1485 phba->brd_no, pring->ringno, portRspPut,
1488 pring->ringno, portRspPut, portRspMax); 1486 portRspMax);
1489 1487
1490 phba->link_state = LPFC_HBA_ERROR; 1488 phba->link_state = LPFC_HBA_ERROR;
1491 spin_unlock_irqrestore(&phba->hbalock, iflag); 1489 spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -1551,6 +1549,17 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1551 1549
1552 pring->stats.iocb_rsp++; 1550 pring->stats.iocb_rsp++;
1553 1551
1552 /*
1553 * If resource errors reported from HBA, reduce
1554 * queuedepths of the SCSI device.
1555 */
1556 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1557 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1558 spin_unlock_irqrestore(&phba->hbalock, iflag);
1559 lpfc_adjust_queue_depth(phba);
1560 spin_lock_irqsave(&phba->hbalock, iflag);
1561 }
1562
1554 if (irsp->ulpStatus) { 1563 if (irsp->ulpStatus) {
1555 /* Rsp ring <ringno> error: IOCB */ 1564 /* Rsp ring <ringno> error: IOCB */
1556 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1565 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
@@ -1634,16 +1643,15 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1634 phba->brd_no, adaptermsg); 1643 phba->brd_no, adaptermsg);
1635 } else { 1644 } else {
1636 /* Unknown IOCB command */ 1645 /* Unknown IOCB command */
1637 lpfc_printf_log(phba, 1646 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1638 KERN_ERR, 1647 "%d:0335 Unknown IOCB "
1639 LOG_SLI, 1648 "command Data: x%x "
1640 "%d:0335 Unknown IOCB command " 1649 "x%x x%x x%x\n",
1641 "Data: x%x x%x x%x x%x\n", 1650 phba->brd_no,
1642 phba->brd_no, 1651 irsp->ulpCommand,
1643 irsp->ulpCommand, 1652 irsp->ulpStatus,
1644 irsp->ulpStatus, 1653 irsp->ulpIoTag,
1645 irsp->ulpIoTag, 1654 irsp->ulpContext);
1646 irsp->ulpContext);
1647 } 1655 }
1648 } 1656 }
1649 1657
@@ -1656,6 +1664,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1656 } 1664 }
1657 __lpfc_sli_release_iocbq(phba, saveq); 1665 __lpfc_sli_release_iocbq(phba, saveq);
1658 } 1666 }
1667 rspiocbp = NULL;
1659 } 1668 }
1660 1669
1661 /* 1670 /*
@@ -1668,7 +1677,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1668 } 1677 }
1669 } /* while (pring->rspidx != portRspPut) */ 1678 } /* while (pring->rspidx != portRspPut) */
1670 1679
1671 if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) { 1680 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
1672 /* At least one response entry has been freed */ 1681 /* At least one response entry has been freed */
1673 pring->stats.iocb_rsp_full++; 1682 pring->stats.iocb_rsp_full++;
1674 /* SET RxRE_RSP in Chip Att register */ 1683 /* SET RxRE_RSP in Chip Att register */
@@ -1700,6 +1709,10 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1700 struct lpfc_iocbq *iocb, *next_iocb; 1709 struct lpfc_iocbq *iocb, *next_iocb;
1701 IOCB_t *cmd = NULL; 1710 IOCB_t *cmd = NULL;
1702 1711
1712 if (pring->ringno == LPFC_ELS_RING) {
1713 lpfc_fabric_abort_hba(phba);
1714 }
1715
1703 /* Error everything on txq and txcmplq 1716 /* Error everything on txq and txcmplq
1704 * First do the txq. 1717 * First do the txq.
1705 */ 1718 */
@@ -1716,7 +1729,7 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1716 while (!list_empty(&completions)) { 1729 while (!list_empty(&completions)) {
1717 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 1730 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1718 cmd = &iocb->iocb; 1731 cmd = &iocb->iocb;
1719 list_del(&iocb->list); 1732 list_del_init(&iocb->list);
1720 1733
1721 if (!iocb->iocb_cmpl) 1734 if (!iocb->iocb_cmpl)
1722 lpfc_sli_release_iocbq(phba, iocb); 1735 lpfc_sli_release_iocbq(phba, iocb);
@@ -1757,7 +1770,7 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
1757 1770
1758 if (i == 15) { 1771 if (i == 15) {
1759 /* Do post */ 1772 /* Do post */
1760 phba->pport->port_state = LPFC_STATE_UNKNOWN; 1773 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1761 lpfc_sli_brdrestart(phba); 1774 lpfc_sli_brdrestart(phba);
1762 } 1775 }
1763 /* Read the HBA Host Status Register */ 1776 /* Read the HBA Host Status Register */
@@ -1862,8 +1875,8 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
1862 1875
1863 /* Kill HBA */ 1876 /* Kill HBA */
1864 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1877 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1865 "%d:0329 Kill HBA Data: x%x x%x\n", 1878 "%d:0329 Kill HBA Data: x%x x%x\n",
1866 phba->brd_no, phba->pport->port_state, psli->sli_flag); 1879 phba->brd_no, phba->pport->port_state, psli->sli_flag);
1867 1880
1868 if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 1881 if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
1869 GFP_KERNEL)) == 0) 1882 GFP_KERNEL)) == 0)
@@ -2087,7 +2100,7 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
2087 2100
2088 if (i == 15) { 2101 if (i == 15) {
2089 /* Do post */ 2102 /* Do post */
2090 phba->pport->port_state = LPFC_STATE_UNKNOWN; 2103 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2091 lpfc_sli_brdrestart(phba); 2104 lpfc_sli_brdrestart(phba);
2092 } 2105 }
2093 /* Read the HBA Host Status Register */ 2106 /* Read the HBA Host Status Register */
@@ -2117,55 +2130,10 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
2117 return 0; 2130 return 0;
2118} 2131}
2119 2132
2120static struct hbq_dmabuf *
2121lpfc_alloc_hbq_buffers(struct lpfc_hba *phba, int count)
2122{
2123 struct hbq_dmabuf *hbq_buffer_pool;
2124 int i;
2125
2126 hbq_buffer_pool = kmalloc(count * sizeof(struct hbq_dmabuf),
2127 GFP_KERNEL);
2128 if (!hbq_buffer_pool)
2129 goto out;
2130
2131 for (i = 0; i < count; ++i) {
2132 hbq_buffer_pool[i].dbuf.virt =
2133 lpfc_hbq_alloc(phba, MEM_PRI,
2134 &hbq_buffer_pool[i].dbuf.phys);
2135 if (hbq_buffer_pool[i].dbuf.virt == NULL)
2136 goto alloc_failed;
2137 hbq_buffer_pool[i].tag = i;
2138 }
2139 goto out;
2140
2141alloc_failed:
2142 while (--i >= 0)
2143 lpfc_hbq_free(phba, hbq_buffer_pool[i].dbuf.virt,
2144 hbq_buffer_pool[i].dbuf.phys);
2145 kfree(hbq_buffer_pool);
2146 hbq_buffer_pool = NULL;
2147
2148out:
2149 phba->hbq_buffer_pool = hbq_buffer_pool;
2150 return hbq_buffer_pool;
2151}
2152
2153static struct lpfc_hbq_init lpfc_els_hbq = {
2154 .rn = 1,
2155 .entry_count = 1200,
2156 .mask_count = 0,
2157 .profile = 0,
2158 .ring_mask = 1 << LPFC_ELS_RING,
2159};
2160
2161static struct lpfc_hbq_init *lpfc_hbq_definitions[] = {
2162 &lpfc_els_hbq,
2163};
2164
2165static int 2133static int
2166lpfc_sli_hbq_count(void) 2134lpfc_sli_hbq_count(void)
2167{ 2135{
2168 return ARRAY_SIZE(lpfc_hbq_definitions); 2136 return ARRAY_SIZE(lpfc_hbq_defs);
2169} 2137}
2170 2138
2171static int 2139static int
@@ -2176,7 +2144,7 @@ lpfc_sli_hbq_entry_count(void)
2176 int i; 2144 int i;
2177 2145
2178 for (i = 0; i < hbq_count; ++i) 2146 for (i = 0; i < hbq_count; ++i)
2179 count += lpfc_hbq_definitions[i]->entry_count; 2147 count += lpfc_hbq_defs[i]->entry_count;
2180 return count; 2148 return count;
2181} 2149}
2182 2150
@@ -2194,18 +2162,10 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2194 MAILBOX_t *pmbox; 2162 MAILBOX_t *pmbox;
2195 uint32_t hbqno; 2163 uint32_t hbqno;
2196 uint32_t hbq_entry_index; 2164 uint32_t hbq_entry_index;
2197 uint32_t hbq_buffer_count;
2198
2199 /* count hbq buffers */
2200 hbq_buffer_count = lpfc_sli_hbq_entry_count();
2201 if (!lpfc_alloc_hbq_buffers(phba, hbq_buffer_count))
2202 return -ENOMEM;
2203 2165
2204 phba->hbq_buffer_count = hbq_buffer_count; 2166 /* Get a Mailbox buffer to setup mailbox
2205 2167 * commands for HBA initialization
2206 /* Get a Mailbox buffer to setup mailbox 2168 */
2207 * commands for HBA initialization
2208 */
2209 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2169 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2210 2170
2211 if (!pmb) 2171 if (!pmb)
@@ -2222,9 +2182,9 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2222 phba->hbqs[hbqno].hbqPutIdx = 0; 2182 phba->hbqs[hbqno].hbqPutIdx = 0;
2223 phba->hbqs[hbqno].local_hbqGetIdx = 0; 2183 phba->hbqs[hbqno].local_hbqGetIdx = 0;
2224 phba->hbqs[hbqno].entry_count = 2184 phba->hbqs[hbqno].entry_count =
2225 lpfc_hbq_definitions[hbqno]->entry_count; 2185 lpfc_hbq_defs[hbqno]->entry_count;
2226 lpfc_config_hbq(phba, lpfc_hbq_definitions[hbqno], 2186 lpfc_config_hbq(phba, lpfc_hbq_defs[hbqno], hbq_entry_index,
2227 hbq_entry_index, pmb); 2187 pmb);
2228 hbq_entry_index += phba->hbqs[hbqno].entry_count; 2188 hbq_entry_index += phba->hbqs[hbqno].entry_count;
2229 2189
2230 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 2190 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
@@ -2232,7 +2192,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2232 mbxStatus <status>, ring <num> */ 2192 mbxStatus <status>, ring <num> */
2233 2193
2234 lpfc_printf_log(phba, KERN_ERR, 2194 lpfc_printf_log(phba, KERN_ERR,
2235 LOG_SLI, 2195 LOG_SLI | LOG_VPORT,
2236 "%d:1805 Adapter failed to init. " 2196 "%d:1805 Adapter failed to init. "
2237 "Data: x%x x%x x%x\n", 2197 "Data: x%x x%x x%x\n",
2238 phba->brd_no, pmbox->mbxCommand, 2198 phba->brd_no, pmbox->mbxCommand,
@@ -2240,17 +2200,18 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2240 2200
2241 phba->link_state = LPFC_HBA_ERROR; 2201 phba->link_state = LPFC_HBA_ERROR;
2242 mempool_free(pmb, phba->mbox_mem_pool); 2202 mempool_free(pmb, phba->mbox_mem_pool);
2243 /* Free all HBQ memory */
2244 lpfc_sli_hbqbuf_free_all(phba);
2245 return ENXIO; 2203 return ENXIO;
2246 } 2204 }
2247 } 2205 }
2248 phba->hbq_count = hbq_count; 2206 phba->hbq_count = hbq_count;
2249 2207
2250 /* Initially populate or replenish the HBQs */
2251 lpfc_sli_hbqbuf_fill_hbqs(phba);
2252 mempool_free(pmb, phba->mbox_mem_pool); 2208 mempool_free(pmb, phba->mbox_mem_pool);
2253 2209
2210 /* Initially populate or replenish the HBQs */
2211 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2212 if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno))
2213 return -ENOMEM;
2214 }
2254 return 0; 2215 return 0;
2255} 2216}
2256 2217
@@ -2271,7 +2232,7 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
2271 spin_lock_irq(&phba->hbalock); 2232 spin_lock_irq(&phba->hbalock);
2272 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 2233 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2273 spin_unlock_irq(&phba->hbalock); 2234 spin_unlock_irq(&phba->hbalock);
2274 phba->pport->port_state = LPFC_STATE_UNKNOWN; 2235 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2275 lpfc_sli_brdrestart(phba); 2236 lpfc_sli_brdrestart(phba);
2276 msleep(2500); 2237 msleep(2500);
2277 rc = lpfc_sli_chipset_init(phba); 2238 rc = lpfc_sli_chipset_init(phba);
@@ -2301,20 +2262,20 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
2301 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 2262 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
2302 if (rc != MBX_SUCCESS) { 2263 if (rc != MBX_SUCCESS) {
2303 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2264 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2304 "%d:0442 Adapter failed to init, " 2265 "%d:0442 Adapter failed to init, mbxCmd x%x "
2305 "mbxCmd x%x CONFIG_PORT, mbxStatus " 2266 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
2306 "x%x Data: x%x\n", 2267 phba->brd_no, pmb->mb.mbxCommand,
2307 phba->brd_no, pmb->mb.mbxCommand, 2268 pmb->mb.mbxStatus, 0);
2308 pmb->mb.mbxStatus, 0);
2309 spin_lock_irq(&phba->hbalock); 2269 spin_lock_irq(&phba->hbalock);
2310 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 2270 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
2311 spin_unlock_irq(&phba->hbalock); 2271 spin_unlock_irq(&phba->hbalock);
2312 rc = -ENXIO; 2272 rc = -ENXIO;
2313 } else { 2273 } else {
2314 done = 1; 2274 done = 1;
2315 /* DBG: Do we need max_vpi, reg_vpi for that matter 2275 phba->max_vpi = (phba->max_vpi &&
2316 phba->max_vpi = 0; 2276 pmb->mb.un.varCfgPort.gmv) != 0
2317 */ 2277 ? pmb->mb.un.varCfgPort.max_vpi
2278 : 0;
2318 } 2279 }
2319 } 2280 }
2320 2281
@@ -2324,13 +2285,13 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
2324 } 2285 }
2325 2286
2326 if ((pmb->mb.un.varCfgPort.sli_mode == 3) && 2287 if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
2327 (!pmb->mb.un.varCfgPort.cMA)) { 2288 (!pmb->mb.un.varCfgPort.cMA)) {
2328 rc = -ENXIO; 2289 rc = -ENXIO;
2329 goto do_prep_failed; 2290 goto do_prep_failed;
2330 } 2291 }
2331 return rc; 2292 return rc;
2332 2293
2333 do_prep_failed: 2294do_prep_failed:
2334 mempool_free(pmb, phba->mbox_mem_pool); 2295 mempool_free(pmb, phba->mbox_mem_pool);
2335 return rc; 2296 return rc;
2336} 2297}
@@ -2339,17 +2300,24 @@ int
2339lpfc_sli_hba_setup(struct lpfc_hba *phba) 2300lpfc_sli_hba_setup(struct lpfc_hba *phba)
2340{ 2301{
2341 uint32_t rc; 2302 uint32_t rc;
2342 int mode = 3; 2303 int mode = 3;
2343 2304
2344 switch (lpfc_sli_mode) { 2305 switch (lpfc_sli_mode) {
2345 case 2: 2306 case 2:
2307 if (lpfc_npiv_enable) {
2308 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2309 "%d:1824 NPIV enabled: Override lpfc_sli_mode "
2310 "parameter (%d) to auto (0).\n",
2311 phba->brd_no, lpfc_sli_mode);
2312 break;
2313 }
2346 mode = 2; 2314 mode = 2;
2347 break; 2315 break;
2348 case 0: 2316 case 0:
2349 case 3: 2317 case 3:
2350 break; 2318 break;
2351 default: 2319 default:
2352 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2320 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2353 "%d:1819 Unrecognized lpfc_sli_mode " 2321 "%d:1819 Unrecognized lpfc_sli_mode "
2354 "parameter: %d.\n", 2322 "parameter: %d.\n",
2355 phba->brd_no, lpfc_sli_mode); 2323 phba->brd_no, lpfc_sli_mode);
@@ -2359,7 +2327,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
2359 2327
2360 rc = lpfc_do_config_port(phba, mode); 2328 rc = lpfc_do_config_port(phba, mode);
2361 if (rc && lpfc_sli_mode == 3) 2329 if (rc && lpfc_sli_mode == 3)
2362 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2363 "%d:1820 Unable to select SLI-3. " 2331 "%d:1820 Unable to select SLI-3. "
2364 "Not supported by adapter.\n", 2332 "Not supported by adapter.\n",
2365 phba->brd_no); 2333 phba->brd_no);
@@ -2377,18 +2345,18 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
2377 } else { 2345 } else {
2378 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 2346 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
2379 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 2347 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
2380 phba->sli3_options = 0x0; 2348 phba->sli3_options = 0;
2381 } 2349 }
2382 2350
2383 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2351 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2384 "%d:0444 Firmware in SLI %x mode.\n", 2352 "%d:0444 Firmware in SLI %x mode. Max_vpi %d\n",
2385 phba->brd_no, phba->sli_rev); 2353 phba->brd_no, phba->sli_rev, phba->max_vpi);
2386 rc = lpfc_sli_ring_map(phba); 2354 rc = lpfc_sli_ring_map(phba);
2387 2355
2388 if (rc) 2356 if (rc)
2389 goto lpfc_sli_hba_setup_error; 2357 goto lpfc_sli_hba_setup_error;
2390 2358
2391 /* Init HBQs */ 2359 /* Init HBQs */
2392 2360
2393 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2361 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2394 rc = lpfc_sli_hbq_setup(phba); 2362 rc = lpfc_sli_hbq_setup(phba);
@@ -2404,7 +2372,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
2404 2372
2405 return rc; 2373 return rc;
2406 2374
2407 lpfc_sli_hba_setup_error: 2375lpfc_sli_hba_setup_error:
2408 phba->link_state = LPFC_HBA_ERROR; 2376 phba->link_state = LPFC_HBA_ERROR;
2409 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2377 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2410 "%d:0445 Firmware initialization failed\n", 2378 "%d:0445 Firmware initialization failed\n",
@@ -2428,19 +2396,21 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
2428void 2396void
2429lpfc_mbox_timeout(unsigned long ptr) 2397lpfc_mbox_timeout(unsigned long ptr)
2430{ 2398{
2431 struct lpfc_hba *phba = (struct lpfc_hba *) phba; 2399 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
2432 unsigned long iflag; 2400 unsigned long iflag;
2433 uint32_t tmo_posted; 2401 uint32_t tmo_posted;
2434 2402
2435 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 2403 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
2436 tmo_posted = (phba->pport->work_port_events & WORKER_MBOX_TMO); 2404 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
2437 if (!tmo_posted) 2405 if (!tmo_posted)
2438 phba->pport->work_port_events |= WORKER_MBOX_TMO; 2406 phba->pport->work_port_events |= WORKER_MBOX_TMO;
2439 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 2407 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
2440 2408
2441 if (!tmo_posted) { 2409 if (!tmo_posted) {
2410 spin_lock_irqsave(&phba->hbalock, iflag);
2442 if (phba->work_wait) 2411 if (phba->work_wait)
2443 wake_up(phba->work_wait); 2412 lpfc_worker_wake_up(phba);
2413 spin_unlock_irqrestore(&phba->hbalock, iflag);
2444 } 2414 }
2445} 2415}
2446 2416
@@ -2458,12 +2428,13 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2458 2428
2459 /* Mbox cmd <mbxCommand> timeout */ 2429 /* Mbox cmd <mbxCommand> timeout */
2460 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2430 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2461 "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 2431 "%d:0310 Mailbox command x%x timeout Data: x%x x%x "
2462 phba->brd_no, 2432 "x%p\n",
2463 mb->mbxCommand, 2433 phba->brd_no,
2464 phba->pport->port_state, 2434 mb->mbxCommand,
2465 phba->sli.sli_flag, 2435 phba->pport->port_state,
2466 phba->sli.mbox_active); 2436 phba->sli.sli_flag,
2437 phba->sli.mbox_active);
2467 2438
2468 /* Setting state unknown so lpfc_sli_abort_iocb_ring 2439 /* Setting state unknown so lpfc_sli_abort_iocb_ring
2469 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 2440 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
@@ -2510,10 +2481,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2510 void __iomem *to_slim; 2481 void __iomem *to_slim;
2511 2482
2512 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 2483 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
2513 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 2484 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
2514 if(!pmbox->vport) { 2485 if(!pmbox->vport) {
2515 lpfc_printf_log(phba, KERN_ERR, 2486 lpfc_printf_log(phba, KERN_ERR,
2516 LOG_MBOX, 2487 LOG_MBOX | LOG_VPORT,
2517 "%d:1806 Mbox x%x failed. No vport\n", 2488 "%d:1806 Mbox x%x failed. No vport\n",
2518 phba->brd_no, 2489 phba->brd_no,
2519 pmbox->mb.mbxCommand); 2490 pmbox->mb.mbxCommand);
@@ -2522,12 +2493,15 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2522 } 2493 }
2523 } 2494 }
2524 2495
2496
2525 /* If the PCI channel is in offline state, do not post mbox. */ 2497 /* If the PCI channel is in offline state, do not post mbox. */
2526 if (unlikely(pci_channel_offline(phba->pcidev))) 2498 if (unlikely(pci_channel_offline(phba->pcidev)))
2527 return MBX_NOT_FINISHED; 2499 return MBX_NOT_FINISHED;
2528 2500
2529 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2501 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2530 psli = &phba->sli; 2502 psli = &phba->sli;
2503
2504
2531 mb = &pmbox->mb; 2505 mb = &pmbox->mb;
2532 status = MBX_SUCCESS; 2506 status = MBX_SUCCESS;
2533 2507
@@ -2535,14 +2509,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2535 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2509 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2536 2510
2537 /* Mbox command <mbxCommand> cannot issue */ 2511 /* Mbox command <mbxCommand> cannot issue */
2538 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2512 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2539 return MBX_NOT_FINISHED; 2513 return MBX_NOT_FINISHED;
2540 } 2514 }
2541 2515
2542 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 2516 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
2543 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 2517 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2544 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2518 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2545 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2519 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2546 return MBX_NOT_FINISHED; 2520 return MBX_NOT_FINISHED;
2547 } 2521 }
2548 2522
@@ -2556,14 +2530,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2556 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2530 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2557 2531
2558 /* Mbox command <mbxCommand> cannot issue */ 2532 /* Mbox command <mbxCommand> cannot issue */
2559 LOG_MBOX_CANNOT_ISSUE_DATA(phba, mb, psli, flag); 2533 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2560 return MBX_NOT_FINISHED; 2534 return MBX_NOT_FINISHED;
2561 } 2535 }
2562 2536
2563 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 2537 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
2564 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2538 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2565 /* Mbox command <mbxCommand> cannot issue */ 2539 /* Mbox command <mbxCommand> cannot issue */
2566 LOG_MBOX_CANNOT_ISSUE_DATA(phba, mb, psli, flag); 2540 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2567 return MBX_NOT_FINISHED; 2541 return MBX_NOT_FINISHED;
2568 } 2542 }
2569 2543
@@ -2589,10 +2563,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2589 2563
2590 /* Mbox cmd issue - BUSY */ 2564 /* Mbox cmd issue - BUSY */
2591 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2565 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2592 "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n", 2566 "%d (%d):0308 Mbox cmd issue - BUSY Data: "
2593 phba->brd_no, 2567 "x%x x%x x%x x%x\n",
2594 mb->mbxCommand, phba->pport->port_state, 2568 phba->brd_no,
2595 psli->sli_flag, flag); 2569 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
2570 mb->mbxCommand, phba->pport->port_state,
2571 psli->sli_flag, flag);
2596 2572
2597 psli->slistat.mbox_busy++; 2573 psli->slistat.mbox_busy++;
2598 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2574 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -2626,7 +2602,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2626 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2602 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2627 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2603 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2628 /* Mbox command <mbxCommand> cannot issue */ 2604 /* Mbox command <mbxCommand> cannot issue */
2629 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag); 2605 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2630 return MBX_NOT_FINISHED; 2606 return MBX_NOT_FINISHED;
2631 } 2607 }
2632 /* timeout active mbox command */ 2608 /* timeout active mbox command */
@@ -2636,10 +2612,11 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2636 2612
2637 /* Mailbox cmd <cmd> issue */ 2613 /* Mailbox cmd <cmd> issue */
2638 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2614 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2639 "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n", 2615 "%d (%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
2640 phba->brd_no, 2616 "x%x\n",
2641 mb->mbxCommand, phba->pport->port_state, 2617 phba->brd_no, pmbox->vport ? pmbox->vport->vpi : 0,
2642 psli->sli_flag, flag); 2618 mb->mbxCommand, phba->pport->port_state,
2619 psli->sli_flag, flag);
2643 2620
2644 psli->slistat.mbox_cmd++; 2621 psli->slistat.mbox_cmd++;
2645 evtctr = psli->slistat.mbox_event; 2622 evtctr = psli->slistat.mbox_event;
@@ -2654,7 +2631,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2654 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2631 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2655 /* copy command data into host mbox for cmpl */ 2632 /* copy command data into host mbox for cmpl */
2656 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, 2633 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
2657 MAILBOX_CMD_SIZE); 2634 MAILBOX_CMD_SIZE);
2658 } 2635 }
2659 2636
2660 /* First copy mbox command data to HBA SLIM, skip past first 2637 /* First copy mbox command data to HBA SLIM, skip past first
@@ -2756,14 +2733,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2756 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2733 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2757 /* copy results back to user */ 2734 /* copy results back to user */
2758 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb, 2735 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
2759 MAILBOX_CMD_SIZE); 2736 MAILBOX_CMD_SIZE);
2760 } else { 2737 } else {
2761 /* First copy command data */ 2738 /* First copy command data */
2762 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 2739 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
2763 MAILBOX_CMD_SIZE); 2740 MAILBOX_CMD_SIZE);
2764 if ((mb->mbxCommand == MBX_DUMP_MEMORY) && 2741 if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
2765 pmbox->context2) { 2742 pmbox->context2) {
2766 lpfc_memcpy_from_slim((void *) pmbox->context2, 2743 lpfc_memcpy_from_slim((void *)pmbox->context2,
2767 phba->MBslimaddr + DMP_RSP_OFFSET, 2744 phba->MBslimaddr + DMP_RSP_OFFSET,
2768 mb->un.varDmp.word_cnt); 2745 mb->un.varDmp.word_cnt);
2769 } 2746 }
@@ -2780,17 +2757,16 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2780 return status; 2757 return status;
2781} 2758}
2782 2759
2760/*
2761 * Caller needs to hold lock.
2762 */
2783static int 2763static int
2784lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2764__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2785 struct lpfc_iocbq *piocb) 2765 struct lpfc_iocbq *piocb)
2786{ 2766{
2787 unsigned long iflags;
2788
2789 /* Insert the caller's iocb in the txq tail for later processing. */ 2767 /* Insert the caller's iocb in the txq tail for later processing. */
2790 spin_lock_irqsave(&phba->hbalock, iflags);
2791 list_add_tail(&piocb->list, &pring->txq); 2768 list_add_tail(&piocb->list, &pring->txq);
2792 pring->txq_cnt++; 2769 pring->txq_cnt++;
2793 spin_unlock_irqrestore(&phba->hbalock, iflags);
2794 return 0; 2770 return 0;
2795} 2771}
2796 2772
@@ -2809,14 +2785,29 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2809 return nextiocb; 2785 return nextiocb;
2810} 2786}
2811 2787
2788/*
2789 * Lockless version of lpfc_sli_issue_iocb.
2790 */
2812int 2791int
2813lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2792__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2814 struct lpfc_iocbq *piocb, uint32_t flag) 2793 struct lpfc_iocbq *piocb, uint32_t flag)
2815{ 2794{
2816 struct lpfc_iocbq *nextiocb; 2795 struct lpfc_iocbq *nextiocb;
2817 unsigned long iflags;
2818 IOCB_t *iocb; 2796 IOCB_t *iocb;
2819 2797
2798 if (piocb->iocb_cmpl && (!piocb->vport) &&
2799 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
2800 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
2801 lpfc_printf_log(phba, KERN_ERR,
2802 LOG_SLI | LOG_VPORT,
2803 "%d:1807 IOCB x%x failed. No vport\n",
2804 phba->brd_no,
2805 piocb->iocb.ulpCommand);
2806 dump_stack();
2807 return IOCB_ERROR;
2808 }
2809
2810
2820 /* If the PCI channel is in offline state, do not post iocbs. */ 2811 /* If the PCI channel is in offline state, do not post iocbs. */
2821 if (unlikely(pci_channel_offline(phba->pcidev))) 2812 if (unlikely(pci_channel_offline(phba->pcidev)))
2822 return IOCB_ERROR; 2813 return IOCB_ERROR;
@@ -2862,10 +2853,10 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2862 * attention events. 2853 * attention events.
2863 */ 2854 */
2864 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 2855 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
2865 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) 2856 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
2866 goto iocb_busy; 2857 goto iocb_busy;
2858 }
2867 2859
2868 spin_lock_irqsave(&phba->hbalock, iflags);
2869 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 2860 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2870 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 2861 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
2871 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 2862 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
@@ -2874,7 +2865,6 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2874 lpfc_sli_update_ring(phba, pring); 2865 lpfc_sli_update_ring(phba, pring);
2875 else 2866 else
2876 lpfc_sli_update_full_ring(phba, pring); 2867 lpfc_sli_update_full_ring(phba, pring);
2877 spin_unlock_irqrestore(&phba->hbalock, iflags);
2878 2868
2879 if (!piocb) 2869 if (!piocb)
2880 return IOCB_SUCCESS; 2870 return IOCB_SUCCESS;
@@ -2882,20 +2872,33 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2882 goto out_busy; 2872 goto out_busy;
2883 2873
2884 iocb_busy: 2874 iocb_busy:
2885 spin_lock_irqsave(&phba->hbalock, iflags);
2886 pring->stats.iocb_cmd_delay++; 2875 pring->stats.iocb_cmd_delay++;
2887 spin_unlock_irqrestore(&phba->hbalock, iflags);
2888 2876
2889 out_busy: 2877 out_busy:
2890 2878
2891 if (!(flag & SLI_IOCB_RET_IOCB)) { 2879 if (!(flag & SLI_IOCB_RET_IOCB)) {
2892 lpfc_sli_ringtx_put(phba, pring, piocb); 2880 __lpfc_sli_ringtx_put(phba, pring, piocb);
2893 return IOCB_SUCCESS; 2881 return IOCB_SUCCESS;
2894 } 2882 }
2895 2883
2896 return IOCB_BUSY; 2884 return IOCB_BUSY;
2897} 2885}
2898 2886
2887
2888int
2889lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2890 struct lpfc_iocbq *piocb, uint32_t flag)
2891{
2892 unsigned long iflags;
2893 int rc;
2894
2895 spin_lock_irqsave(&phba->hbalock, iflags);
2896 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag);
2897 spin_unlock_irqrestore(&phba->hbalock, iflags);
2898
2899 return rc;
2900}
2901
2899static int 2902static int
2900lpfc_extra_ring_setup( struct lpfc_hba *phba) 2903lpfc_extra_ring_setup( struct lpfc_hba *phba)
2901{ 2904{
@@ -2960,14 +2963,14 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2960 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 2963 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2961 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 2964 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2962 pring->sizeCiocb = (phba->sli_rev == 3) ? 2965 pring->sizeCiocb = (phba->sli_rev == 3) ?
2963 SLI3_IOCB_CMD_SIZE : 2966 SLI3_IOCB_CMD_SIZE :
2964 SLI2_IOCB_CMD_SIZE; 2967 SLI2_IOCB_CMD_SIZE;
2965 pring->sizeRiocb = (phba->sli_rev == 3) ? 2968 pring->sizeRiocb = (phba->sli_rev == 3) ?
2966 SLI3_IOCB_RSP_SIZE : 2969 SLI3_IOCB_RSP_SIZE :
2967 SLI2_IOCB_RSP_SIZE; 2970 SLI2_IOCB_RSP_SIZE;
2968 pring->iotag_ctr = 0; 2971 pring->iotag_ctr = 0;
2969 pring->iotag_max = 2972 pring->iotag_max =
2970 (phba->cfg_hba_queue_depth * 2); 2973 (phba->cfg_hba_queue_depth * 2);
2971 pring->fast_iotag = pring->iotag_max; 2974 pring->fast_iotag = pring->iotag_max;
2972 pring->num_mask = 0; 2975 pring->num_mask = 0;
2973 break; 2976 break;
@@ -2976,11 +2979,11 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2976 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 2979 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
2977 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 2980 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
2978 pring->sizeCiocb = (phba->sli_rev == 3) ? 2981 pring->sizeCiocb = (phba->sli_rev == 3) ?
2979 SLI3_IOCB_CMD_SIZE : 2982 SLI3_IOCB_CMD_SIZE :
2980 SLI2_IOCB_CMD_SIZE; 2983 SLI2_IOCB_CMD_SIZE;
2981 pring->sizeRiocb = (phba->sli_rev == 3) ? 2984 pring->sizeRiocb = (phba->sli_rev == 3) ?
2982 SLI3_IOCB_RSP_SIZE : 2985 SLI3_IOCB_RSP_SIZE :
2983 SLI2_IOCB_RSP_SIZE; 2986 SLI2_IOCB_RSP_SIZE;
2984 pring->iotag_max = phba->cfg_hba_queue_depth; 2987 pring->iotag_max = phba->cfg_hba_queue_depth;
2985 pring->num_mask = 0; 2988 pring->num_mask = 0;
2986 break; 2989 break;
@@ -2989,11 +2992,11 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2989 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 2992 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
2990 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 2993 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
2991 pring->sizeCiocb = (phba->sli_rev == 3) ? 2994 pring->sizeCiocb = (phba->sli_rev == 3) ?
2992 SLI3_IOCB_CMD_SIZE : 2995 SLI3_IOCB_CMD_SIZE :
2993 SLI2_IOCB_CMD_SIZE; 2996 SLI2_IOCB_CMD_SIZE;
2994 pring->sizeRiocb = (phba->sli_rev == 3) ? 2997 pring->sizeRiocb = (phba->sli_rev == 3) ?
2995 SLI3_IOCB_RSP_SIZE : 2998 SLI3_IOCB_RSP_SIZE :
2996 SLI2_IOCB_RSP_SIZE; 2999 SLI2_IOCB_RSP_SIZE;
2997 pring->fast_iotag = 0; 3000 pring->fast_iotag = 0;
2998 pring->iotag_ctr = 0; 3001 pring->iotag_ctr = 0;
2999 pring->iotag_max = 4096; 3002 pring->iotag_max = 4096;
@@ -3002,30 +3005,30 @@ lpfc_sli_setup(struct lpfc_hba *phba)
3002 pring->prt[0].rctl = FC_ELS_REQ; 3005 pring->prt[0].rctl = FC_ELS_REQ;
3003 pring->prt[0].type = FC_ELS_DATA; 3006 pring->prt[0].type = FC_ELS_DATA;
3004 pring->prt[0].lpfc_sli_rcv_unsol_event = 3007 pring->prt[0].lpfc_sli_rcv_unsol_event =
3005 lpfc_els_unsol_event; 3008 lpfc_els_unsol_event;
3006 pring->prt[1].profile = 0; /* Mask 1 */ 3009 pring->prt[1].profile = 0; /* Mask 1 */
3007 pring->prt[1].rctl = FC_ELS_RSP; 3010 pring->prt[1].rctl = FC_ELS_RSP;
3008 pring->prt[1].type = FC_ELS_DATA; 3011 pring->prt[1].type = FC_ELS_DATA;
3009 pring->prt[1].lpfc_sli_rcv_unsol_event = 3012 pring->prt[1].lpfc_sli_rcv_unsol_event =
3010 lpfc_els_unsol_event; 3013 lpfc_els_unsol_event;
3011 pring->prt[2].profile = 0; /* Mask 2 */ 3014 pring->prt[2].profile = 0; /* Mask 2 */
3012 /* NameServer Inquiry */ 3015 /* NameServer Inquiry */
3013 pring->prt[2].rctl = FC_UNSOL_CTL; 3016 pring->prt[2].rctl = FC_UNSOL_CTL;
3014 /* NameServer */ 3017 /* NameServer */
3015 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP; 3018 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
3016 pring->prt[2].lpfc_sli_rcv_unsol_event = 3019 pring->prt[2].lpfc_sli_rcv_unsol_event =
3017 lpfc_ct_unsol_event; 3020 lpfc_ct_unsol_event;
3018 pring->prt[3].profile = 0; /* Mask 3 */ 3021 pring->prt[3].profile = 0; /* Mask 3 */
3019 /* NameServer response */ 3022 /* NameServer response */
3020 pring->prt[3].rctl = FC_SOL_CTL; 3023 pring->prt[3].rctl = FC_SOL_CTL;
3021 /* NameServer */ 3024 /* NameServer */
3022 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP; 3025 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
3023 pring->prt[3].lpfc_sli_rcv_unsol_event = 3026 pring->prt[3].lpfc_sli_rcv_unsol_event =
3024 lpfc_ct_unsol_event; 3027 lpfc_ct_unsol_event;
3025 break; 3028 break;
3026 } 3029 }
3027 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 3030 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
3028 (pring->numRiocb * pring->sizeRiocb); 3031 (pring->numRiocb * pring->sizeRiocb);
3029 } 3032 }
3030 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 3033 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
3031 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 3034 /* Too many cmd / rsp ring entries in SLI2 SLIM */
@@ -3051,6 +3054,7 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
3051 psli = &phba->sli; 3054 psli = &phba->sli;
3052 spin_lock_irq(&phba->hbalock); 3055 spin_lock_irq(&phba->hbalock);
3053 INIT_LIST_HEAD(&psli->mboxq); 3056 INIT_LIST_HEAD(&psli->mboxq);
3057 INIT_LIST_HEAD(&psli->mboxq_cmpl);
3054 /* Initialize list headers for txq and txcmplq as double linked lists */ 3058 /* Initialize list headers for txq and txcmplq as double linked lists */
3055 for (i = 0; i < psli->num_rings; i++) { 3059 for (i = 0; i < psli->num_rings; i++) {
3056 pring = &psli->ring[i]; 3060 pring = &psli->ring[i];
@@ -3068,6 +3072,64 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
3068} 3072}
3069 3073
3070int 3074int
3075lpfc_sli_host_down(struct lpfc_vport *vport)
3076{
3077 struct lpfc_hba *phba = vport->phba;
3078 struct lpfc_sli *psli = &phba->sli;
3079 struct lpfc_sli_ring *pring;
3080 struct lpfc_iocbq *iocb, *next_iocb;
3081 IOCB_t *icmd = NULL;
3082 int i;
3083 unsigned long flags = 0;
3084 uint16_t prev_pring_flag;
3085
3086 lpfc_cleanup_discovery_resources(vport);
3087
3088 spin_lock_irqsave(&phba->hbalock, flags);
3089
3090 for (i = 0; i < psli->num_rings; i++) {
3091 pring = &psli->ring[i];
3092 prev_pring_flag = pring->flag;
3093 pring->flag |= LPFC_DEFERRED_RING_EVENT;
3094
3095 /*
3096 * Error everything on the txq since these iocbs have not been
3097 * given to the FW yet.
3098 */
3099
3100 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
3101 if (iocb->vport != vport)
3102 continue;
3103 list_del_init(&iocb->list);
3104 pring->txq_cnt--;
3105 if (iocb->iocb_cmpl) {
3106 icmd = &iocb->iocb;
3107 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3108 icmd->un.ulpWord[4] = IOERR_SLI_DOWN;
3109 spin_unlock_irqrestore(&phba->hbalock, flags);
3110 (iocb->iocb_cmpl) (phba, iocb, iocb);
3111 spin_lock_irqsave(&phba->hbalock, flags);
3112 } else
3113 lpfc_sli_release_iocbq(phba, iocb);
3114 }
3115
3116 /* Next issue ABTS for everything on the txcmplq */
3117 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
3118 list) {
3119 if (iocb->vport != vport)
3120 continue;
3121 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3122 }
3123
3124 pring->flag = prev_pring_flag;
3125 }
3126
3127 spin_unlock_irqrestore(&phba->hbalock, flags);
3128
3129 return 1;
3130}
3131
3132int
3071lpfc_sli_hba_down(struct lpfc_hba *phba) 3133lpfc_sli_hba_down(struct lpfc_hba *phba)
3072{ 3134{
3073 LIST_HEAD(completions); 3135 LIST_HEAD(completions);
@@ -3081,6 +3143,8 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
3081 3143
3082 lpfc_hba_down_prep(phba); 3144 lpfc_hba_down_prep(phba);
3083 3145
3146 lpfc_fabric_abort_hba(phba);
3147
3084 spin_lock_irqsave(&phba->hbalock, flags); 3148 spin_lock_irqsave(&phba->hbalock, flags);
3085 for (i = 0; i < psli->num_rings; i++) { 3149 for (i = 0; i < psli->num_rings; i++) {
3086 pring = &psli->ring[i]; 3150 pring = &psli->ring[i];
@@ -3097,9 +3161,8 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
3097 spin_unlock_irqrestore(&phba->hbalock, flags); 3161 spin_unlock_irqrestore(&phba->hbalock, flags);
3098 3162
3099 while (!list_empty(&completions)) { 3163 while (!list_empty(&completions)) {
3100 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 3164 list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
3101 cmd = &iocb->iocb; 3165 cmd = &iocb->iocb;
3102 list_del(&iocb->list);
3103 3166
3104 if (!iocb->iocb_cmpl) 3167 if (!iocb->iocb_cmpl)
3105 lpfc_sli_release_iocbq(phba, iocb); 3168 lpfc_sli_release_iocbq(phba, iocb);
@@ -3112,34 +3175,33 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
3112 3175
3113 /* Return any active mbox cmds */ 3176 /* Return any active mbox cmds */
3114 del_timer_sync(&psli->mbox_tmo); 3177 del_timer_sync(&psli->mbox_tmo);
3178 spin_lock_irqsave(&phba->hbalock, flags);
3115 3179
3116 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 3180 spin_lock(&phba->pport->work_port_lock);
3117 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 3181 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3118 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 3182 spin_unlock(&phba->pport->work_port_lock);
3119 3183
3120 spin_lock_irqsave(&phba->hbalock, flags); 3184 if (psli->mbox_active) {
3121 pmb = psli->mbox_active; 3185 list_add_tail(&psli->mbox_active->list, &completions);
3122 if (pmb) {
3123 psli->mbox_active = NULL; 3186 psli->mbox_active = NULL;
3124 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3125 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3187 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3126 if (pmb->mbox_cmpl) {
3127 pmb->mbox_cmpl(phba,pmb);
3128 }
3129 } 3188 }
3130 3189
3131 /* Return any pending mbox cmds */ 3190 /* Return any pending or completed mbox cmds */
3132 while ((pmb = lpfc_mbox_get(phba)) != NULL) { 3191 list_splice_init(&phba->sli.mboxq, &completions);
3192 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
3193 INIT_LIST_HEAD(&psli->mboxq);
3194 INIT_LIST_HEAD(&psli->mboxq_cmpl);
3195
3196 spin_unlock_irqrestore(&phba->hbalock, flags);
3197
3198 while (!list_empty(&completions)) {
3199 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
3133 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 3200 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3134 if (pmb->mbox_cmpl) { 3201 if (pmb->mbox_cmpl) {
3135 pmb->mbox_cmpl(phba,pmb); 3202 pmb->mbox_cmpl(phba,pmb);
3136 } 3203 }
3137 } 3204 }
3138 INIT_LIST_HEAD(&psli->mboxq);
3139
3140 /* Free all HBQ memory */
3141 lpfc_sli_hbqbuf_free_all(phba);
3142
3143 return 1; 3205 return 1;
3144} 3206}
3145 3207
@@ -3196,7 +3258,7 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3196 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3258 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3197 "%d:0410 Cannot find virtual addr for mapped buf on " 3259 "%d:0410 Cannot find virtual addr for mapped buf on "
3198 "ring %d Data x%llx x%p x%p x%x\n", 3260 "ring %d Data x%llx x%p x%p x%x\n",
3199 phba->brd_no, pring->ringno, (unsigned long long) phys, 3261 phba->brd_no, pring->ringno, (unsigned long long)phys,
3200 slp->next, slp->prev, pring->postbufq_cnt); 3262 slp->next, slp->prev, pring->postbufq_cnt);
3201 return NULL; 3263 return NULL;
3202} 3264}
@@ -3207,7 +3269,7 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3207{ 3269{
3208 IOCB_t *irsp = &rspiocb->iocb; 3270 IOCB_t *irsp = &rspiocb->iocb;
3209 uint16_t abort_iotag, abort_context; 3271 uint16_t abort_iotag, abort_context;
3210 struct lpfc_iocbq *abort_iocb, *rsp_ab_iocb; 3272 struct lpfc_iocbq *abort_iocb;
3211 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3273 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3212 3274
3213 abort_iocb = NULL; 3275 abort_iocb = NULL;
@@ -3220,11 +3282,13 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3220 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) 3282 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
3221 abort_iocb = phba->sli.iocbq_lookup[abort_iotag]; 3283 abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
3222 3284
3223 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3285 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
3224 "%d:0327 Cannot abort els iocb %p" 3286 "%d:0327 Cannot abort els iocb %p "
3225 " with tag %x context %x\n", 3287 "with tag %x context %x, abort status %x, "
3226 phba->brd_no, abort_iocb, 3288 "abort code %x\n",
3227 abort_iotag, abort_context); 3289 phba->brd_no, abort_iocb, abort_iotag,
3290 abort_context, irsp->ulpStatus,
3291 irsp->un.ulpWord[4]);
3228 3292
3229 /* 3293 /*
3230 * make sure we have the right iocbq before taking it 3294 * make sure we have the right iocbq before taking it
@@ -3235,23 +3299,14 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3235 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0) 3299 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
3236 spin_unlock_irq(&phba->hbalock); 3300 spin_unlock_irq(&phba->hbalock);
3237 else { 3301 else {
3238 list_del(&abort_iocb->list); 3302 list_del_init(&abort_iocb->list);
3239 pring->txcmplq_cnt--; 3303 pring->txcmplq_cnt--;
3240 spin_unlock_irq(&phba->hbalock); 3304 spin_unlock_irq(&phba->hbalock);
3241 3305
3242 rsp_ab_iocb = lpfc_sli_get_iocbq(phba); 3306 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3243 if (rsp_ab_iocb == NULL) 3307 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3244 lpfc_sli_release_iocbq(phba, abort_iocb); 3308 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
3245 else { 3309 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
3246 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3247 rsp_ab_iocb->iocb.ulpStatus =
3248 IOSTAT_LOCAL_REJECT;
3249 rsp_ab_iocb->iocb.un.ulpWord[4] =
3250 IOERR_SLI_ABORTED;
3251 (abort_iocb->iocb_cmpl)(phba, abort_iocb,
3252 rsp_ab_iocb);
3253 lpfc_sli_release_iocbq(phba, rsp_ab_iocb);
3254 }
3255 } 3310 }
3256 } 3311 }
3257 3312
@@ -3259,6 +3314,23 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3259 return; 3314 return;
3260} 3315}
3261 3316
3317static void
3318lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3319 struct lpfc_iocbq *rspiocb)
3320{
3321 IOCB_t *irsp = &rspiocb->iocb;
3322
3323 /* ELS cmd tag <ulpIoTag> completes */
3324 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3325 "%d (X):0133 Ignoring ELS cmd tag x%x completion Data: "
3326 "x%x x%x x%x\n",
3327 phba->brd_no, irsp->ulpIoTag, irsp->ulpStatus,
3328 irsp->un.ulpWord[4], irsp->ulpTimeout);
3329
3330 lpfc_els_free_iocb(phba, cmdiocb);
3331 return;
3332}
3333
3262int 3334int
3263lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3335lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3264 struct lpfc_iocbq *cmdiocb) 3336 struct lpfc_iocbq *cmdiocb)
@@ -3269,22 +3341,30 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3269 IOCB_t *iabt = NULL; 3341 IOCB_t *iabt = NULL;
3270 int retval = IOCB_ERROR; 3342 int retval = IOCB_ERROR;
3271 3343
3272 /* There are certain command types we don't want 3344 /*
3273 * to abort. 3345 * There are certain command types we don't want to abort. And we
3346 * don't want to abort commands that are already in the process of
3347 * being aborted.
3274 */ 3348 */
3275 icmd = &cmdiocb->iocb; 3349 icmd = &cmdiocb->iocb;
3276 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 3350 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
3277 icmd->ulpCommand == CMD_CLOSE_XRI_CN) 3351 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
3352 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
3278 return 0; 3353 return 0;
3279 3354
3280 /* If we're unloading, interrupts are disabled so we 3355 /* If we're unloading, don't abort the iocb, but change the callback so
3281 * need to cleanup the iocb here. 3356 * that nothing happens when it finishes.
3282 */ 3357 */
3283 if (vport->load_flag & FC_UNLOADING) 3358 if (vport->load_flag & FC_UNLOADING) {
3359 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
3360 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
3361 else
3362 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
3284 goto abort_iotag_exit; 3363 goto abort_iotag_exit;
3364 }
3285 3365
3286 /* issue ABTS for this IOCB based on iotag */ 3366 /* issue ABTS for this IOCB based on iotag */
3287 abtsiocbp = lpfc_sli_get_iocbq(phba); 3367 abtsiocbp = __lpfc_sli_get_iocbq(phba);
3288 if (abtsiocbp == NULL) 3368 if (abtsiocbp == NULL)
3289 return 0; 3369 return 0;
3290 3370
@@ -3308,11 +3388,12 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3308 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 3388 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
3309 3389
3310 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3390 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3311 "%d:0339 Abort xri x%x, original iotag x%x, abort " 3391 "%d (%d):0339 Abort xri x%x, original iotag x%x, "
3312 "cmd iotag x%x\n", 3392 "abort cmd iotag x%x\n",
3313 phba->brd_no, iabt->un.acxri.abortContextTag, 3393 phba->brd_no, vport->vpi,
3394 iabt->un.acxri.abortContextTag,
3314 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 3395 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
3315 retval = lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); 3396 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
3316 3397
3317abort_iotag_exit: 3398abort_iotag_exit:
3318 /* 3399 /*
@@ -3471,6 +3552,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
3471 * lpfc_sli_issue_call since the wake routine sets a unique value and by 3552 * lpfc_sli_issue_call since the wake routine sets a unique value and by
3472 * definition this is a wait function. 3553 * definition this is a wait function.
3473 */ 3554 */
3555
3474int 3556int
3475lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 3557lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
3476 struct lpfc_sli_ring *pring, 3558 struct lpfc_sli_ring *pring,
@@ -3558,9 +3640,8 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
3558 int retval; 3640 int retval;
3559 3641
3560 /* The caller must leave context1 empty. */ 3642 /* The caller must leave context1 empty. */
3561 if (pmboxq->context1 != 0) { 3643 if (pmboxq->context1 != 0)
3562 return MBX_NOT_FINISHED; 3644 return MBX_NOT_FINISHED;
3563 }
3564 3645
3565 /* setup wake call as IOCB callback */ 3646 /* setup wake call as IOCB callback */
3566 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 3647 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
@@ -3630,6 +3711,10 @@ lpfc_intr_handler(int irq, void *dev_id)
3630 int i; 3711 int i;
3631 uint32_t control; 3712 uint32_t control;
3632 3713
3714 MAILBOX_t *mbox, *pmbox;
3715 LPFC_MBOXQ_t *pmb;
3716 int rc;
3717
3633 /* 3718 /*
3634 * Get the driver's phba structure from the dev_id and 3719 * Get the driver's phba structure from the dev_id and
3635 * assume the HBA is not interrupting. 3720 * assume the HBA is not interrupting.
@@ -3729,10 +3814,71 @@ lpfc_intr_handler(int irq, void *dev_id)
3729 phba->pport->stopped = 1; 3814 phba->pport->stopped = 1;
3730 } 3815 }
3731 3816
3817 if ((work_ha_copy & HA_MBATT) &&
3818 (phba->sli.mbox_active)) {
3819 pmb = phba->sli.mbox_active;
3820 pmbox = &pmb->mb;
3821 mbox = &phba->slim2p->mbx;
3822
3823 /* First check out the status word */
3824 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
3825 if (pmbox->mbxOwner != OWN_HOST) {
3826 /*
3827 * Stray Mailbox Interrupt, mbxCommand <cmd>
3828 * mbxStatus <status>
3829 */
3830 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX |
3831 LOG_SLI,
3832 "%d (%d):0304 Stray Mailbox "
3833 "Interrupt mbxCommand x%x "
3834 "mbxStatus x%x\n",
3835 phba->brd_no,
3836 (pmb->vport
3837 ? pmb->vport->vpi
3838 : 0),
3839 pmbox->mbxCommand,
3840 pmbox->mbxStatus);
3841 }
3842 del_timer_sync(&phba->sli.mbox_tmo);
3843
3844 spin_lock(&phba->pport->work_port_lock);
3845 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3846 spin_unlock(&phba->pport->work_port_lock);
3847 phba->sli.mbox_active = NULL;
3848 if (pmb->mbox_cmpl) {
3849 lpfc_sli_pcimem_bcopy(mbox, pmbox,
3850 MAILBOX_CMD_SIZE);
3851 }
3852 lpfc_mbox_cmpl_put(phba, pmb);
3853 }
3854 if ((work_ha_copy & HA_MBATT) &&
3855 (phba->sli.mbox_active == NULL)) {
3856send_next_mbox:
3857 spin_lock(&phba->hbalock);
3858 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3859 pmb = lpfc_mbox_get(phba);
3860 spin_unlock(&phba->hbalock);
3861
3862 /* Process next mailbox command if there is one */
3863 if (pmb != NULL) {
3864 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3865 if (rc == MBX_NOT_FINISHED) {
3866 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3867 lpfc_mbox_cmpl_put(phba, pmb);
3868 goto send_next_mbox;
3869 }
3870 } else {
3871 /* Turn on IOCB processing */
3872 for (i = 0; i < phba->sli.num_rings; i++)
3873 lpfc_sli_turn_on_ring(phba, i);
3874 }
3875
3876 }
3877
3732 spin_lock(&phba->hbalock); 3878 spin_lock(&phba->hbalock);
3733 phba->work_ha |= work_ha_copy; 3879 phba->work_ha |= work_ha_copy;
3734 if (phba->work_wait) 3880 if (phba->work_wait)
3735 wake_up(phba->work_wait); 3881 lpfc_worker_wake_up(phba);
3736 spin_unlock(&phba->hbalock); 3882 spin_unlock(&phba->hbalock);
3737 } 3883 }
3738 3884
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 190d55a69b2a..4c43a8fd699c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -44,6 +44,7 @@ struct lpfc_iocbq {
44#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */ 44#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */
45#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ 45#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
46#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ 46#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
47#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
47 48
48 uint8_t abort_count; 49 uint8_t abort_count;
49 uint8_t rsvd2; 50 uint8_t rsvd2;
@@ -58,6 +59,8 @@ struct lpfc_iocbq {
58 struct lpfcMboxq *mbox; 59 struct lpfcMboxq *mbox;
59 } context_un; 60 } context_un;
60 61
62 void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
63 struct lpfc_iocbq *);
61 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 64 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
62 struct lpfc_iocbq *); 65 struct lpfc_iocbq *);
63 66
@@ -173,7 +176,7 @@ struct lpfc_sli_ring {
173/* Structure used for configuring rings to a specific profile or rctl / type */ 176/* Structure used for configuring rings to a specific profile or rctl / type */
174struct lpfc_hbq_init { 177struct lpfc_hbq_init {
175 uint32_t rn; /* Receive buffer notification */ 178 uint32_t rn; /* Receive buffer notification */
176 uint32_t entry_count; /* # of entries in HBQ */ 179 uint32_t entry_count; /* max # of entries in HBQ */
177 uint32_t headerLen; /* 0 if not profile 4 or 5 */ 180 uint32_t headerLen; /* 0 if not profile 4 or 5 */
178 uint32_t logEntry; /* Set to 1 if this HBQ used for LogEntry */ 181 uint32_t logEntry; /* Set to 1 if this HBQ used for LogEntry */
179 uint32_t profile; /* Selection profile 0=all, 7=logentry */ 182 uint32_t profile; /* Selection profile 0=all, 7=logentry */
@@ -188,6 +191,11 @@ struct lpfc_hbq_init {
188 uint32_t cmdmatch[8]; 191 uint32_t cmdmatch[8];
189 uint32_t mask_count; /* number of mask entries in prt array */ 192 uint32_t mask_count; /* number of mask entries in prt array */
190 struct hbq_mask hbqMasks[6]; 193 struct hbq_mask hbqMasks[6];
194
195 /* Non-config rings fields to keep track of buffer allocations */
196 uint32_t buffer_count; /* number of buffers allocated */
197 uint32_t init_count; /* number to allocate when initialized */
198 uint32_t add_count; /* number to allocate when starved */
191} ; 199} ;
192 200
193#define LPFC_MAX_HBQ 16 201#define LPFC_MAX_HBQ 16
@@ -238,6 +246,7 @@ struct lpfc_sli {
238 uint16_t mboxq_cnt; /* current length of queue */ 246 uint16_t mboxq_cnt; /* current length of queue */
239 uint16_t mboxq_max; /* max length */ 247 uint16_t mboxq_max; /* max length */
240 LPFC_MBOXQ_t *mbox_active; /* active mboxq information */ 248 LPFC_MBOXQ_t *mbox_active; /* active mboxq information */
249 struct list_head mboxq_cmpl;
241 250
242 struct timer_list mbox_tmo; /* Hold clk to timeout active mbox 251 struct timer_list mbox_tmo; /* Hold clk to timeout active mbox
243 cmd */ 252 cmd */
@@ -250,12 +259,6 @@ struct lpfc_sli {
250 struct lpfc_lnk_stat lnk_stat_offsets; 259 struct lpfc_lnk_stat lnk_stat_offsets;
251}; 260};
252 261
253/* Given a pointer to the start of the ring, and the slot number of
254 * the desired iocb entry, calc a pointer to that entry.
255 * (assume iocb entry size is 32 bytes, or 8 words)
256 */
257#define IOCB_ENTRY(ring,slot) ((IOCB_t *)(((char *)(ring)) + ((slot) * 32)))
258
259#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox 262#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
260 command */ 263 command */
261#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write 264#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index fd10fa16980e..7aff29bac0de 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.1.12_sli3" 21#define LPFC_DRIVER_VERSION "8.2.0"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
new file mode 100644
index 000000000000..786125b7ad4c
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -0,0 +1,508 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kthread.h>
28#include <linux/pci.h>
29#include <linux/spinlock.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h>
35#include "lpfc_hw.h"
36#include "lpfc_sli.h"
37#include "lpfc_disc.h"
38#include "lpfc_scsi.h"
39#include "lpfc.h"
40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h"
42#include "lpfc_version.h"
43#include "lpfc_vport.h"
44
45inline void lpfc_vport_set_state(struct lpfc_vport *vport,
46 enum fc_vport_state new_state)
47{
48 struct fc_vport *fc_vport = vport->fc_vport;
49
50 if (fc_vport) {
51 /*
52 * When the transport defines fc_vport_set state we will replace
53 * this code with the following line
54 */
55 /* fc_vport_set_state(fc_vport, new_state); */
56 if (new_state != FC_VPORT_INITIALIZING)
57 fc_vport->vport_last_state = fc_vport->vport_state;
58 fc_vport->vport_state = new_state;
59 }
60
61 /* for all the error states we will set the invternal state to FAILED */
62 switch (new_state) {
63 case FC_VPORT_NO_FABRIC_SUPP:
64 case FC_VPORT_NO_FABRIC_RSCS:
65 case FC_VPORT_FABRIC_LOGOUT:
66 case FC_VPORT_FABRIC_REJ_WWN:
67 case FC_VPORT_FAILED:
68 vport->port_state = LPFC_VPORT_FAILED;
69 break;
70 case FC_VPORT_LINKDOWN:
71 vport->port_state = LPFC_VPORT_UNKNOWN;
72 break;
73 default:
74 /* do nothing */
75 break;
76 }
77}
78
79static int
80lpfc_alloc_vpi(struct lpfc_hba *phba)
81{
82 int vpi;
83
84 spin_lock_irq(&phba->hbalock);
85 vpi = find_next_zero_bit(phba->vpi_bmask, phba->max_vpi, 1);
86 if (vpi > phba->max_vpi)
87 vpi = 0;
88 else
89 set_bit(vpi, phba->vpi_bmask);
90 spin_unlock_irq(&phba->hbalock);
91 return vpi;
92}
93
94static void
95lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
96{
97 spin_lock_irq(&phba->hbalock);
98 clear_bit(vpi, phba->vpi_bmask);
99 spin_unlock_irq(&phba->hbalock);
100}
101
102static int
103lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
104{
105 LPFC_MBOXQ_t *pmb;
106 MAILBOX_t *mb;
107 struct lpfc_dmabuf *mp;
108 int rc;
109
110 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
111 if (!pmb) {
112 return -ENOMEM;
113 }
114 mb = &pmb->mb;
115
116 lpfc_read_sparam(phba, pmb, vport->vpi);
117 /*
118 * Grab buffer pointer and clear context1 so we can use
119 * lpfc_sli_issue_box_wait
120 */
121 mp = (struct lpfc_dmabuf *) pmb->context1;
122 pmb->context1 = NULL;
123
124 pmb->vport = vport;
125 rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
126 if (rc != MBX_SUCCESS) {
127 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
128 "%d (%d):1818 VPort failed init, mbxCmd x%x "
129 "READ_SPARM mbxStatus x%x, rc = x%x\n",
130 phba->brd_no, vport->vpi,
131 mb->mbxCommand, mb->mbxStatus, rc);
132 lpfc_mbuf_free(phba, mp->virt, mp->phys);
133 kfree(mp);
134 mempool_free(pmb, phba->mbox_mem_pool);
135 return -EIO;
136 }
137
138 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
139 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
140 sizeof (struct lpfc_name));
141 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
142 sizeof (struct lpfc_name));
143
144 lpfc_mbuf_free(phba, mp->virt, mp->phys);
145 kfree(mp);
146 mempool_free(pmb, phba->mbox_mem_pool);
147
148 return 0;
149}
150
151static int
152lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
153 const char *name_type)
154{
155 /* ensure that IEEE format 1 addresses
156 * contain zeros in bits 59-48
157 */
158 if (!((wwn->u.wwn[0] >> 4) == 1 &&
159 ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
160 return 1;
161
162 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
163 "%d:1822 Invalid %s: %02x:%02x:%02x:%02x:"
164 "%02x:%02x:%02x:%02x\n",
165 phba->brd_no, name_type,
166 wwn->u.wwn[0], wwn->u.wwn[1],
167 wwn->u.wwn[2], wwn->u.wwn[3],
168 wwn->u.wwn[4], wwn->u.wwn[5],
169 wwn->u.wwn[6], wwn->u.wwn[7]);
170 return 0;
171}
172
173static int
174lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
175{
176 struct lpfc_vport *vport;
177
178 list_for_each_entry(vport, &phba->port_list, listentry) {
179 if (vport == new_vport)
180 continue;
181 /* If they match, return not unique */
182 if (memcmp(&vport->fc_sparam.portName,
183 &new_vport->fc_sparam.portName,
184 sizeof(struct lpfc_name)) == 0)
185 return 0;
186 }
187 return 1;
188}
189
190int
191lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
192{
193 struct lpfc_nodelist *ndlp;
194 struct lpfc_vport *pport =
195 (struct lpfc_vport *) fc_vport->shost->hostdata;
196 struct lpfc_hba *phba = pport->phba;
197 struct lpfc_vport *vport = NULL;
198 int instance;
199 int vpi;
200 int rc = VPORT_ERROR;
201
202 if ((phba->sli_rev < 3) ||
203 !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
204 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
205 "%d:1808 Create VPORT failed: "
206 "NPIV is not enabled: SLImode:%d\n",
207 phba->brd_no, phba->sli_rev);
208 rc = VPORT_INVAL;
209 goto error_out;
210 }
211
212 vpi = lpfc_alloc_vpi(phba);
213 if (vpi == 0) {
214 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
215 "%d:1809 Create VPORT failed: "
216 "Max VPORTs (%d) exceeded\n",
217 phba->brd_no, phba->max_vpi);
218 rc = VPORT_NORESOURCES;
219 goto error_out;
220 }
221
222
223 /* Assign an unused board number */
224 if ((instance = lpfc_get_instance()) < 0) {
225 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
226 "%d:1810 Create VPORT failed: Cannot get "
227 "instance number\n", phba->brd_no);
228 lpfc_free_vpi(phba, vpi);
229 rc = VPORT_NORESOURCES;
230 goto error_out;
231 }
232
233 vport = lpfc_create_port(phba, instance, fc_vport);
234 if (!vport) {
235 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
236 "%d:1811 Create VPORT failed: vpi x%x\n",
237 phba->brd_no, vpi);
238 lpfc_free_vpi(phba, vpi);
239 rc = VPORT_NORESOURCES;
240 goto error_out;
241 }
242
243 vport->vpi = vpi;
244 if (lpfc_vport_sparm(phba, vport)) {
245 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
246 "%d:1813 Create VPORT failed: vpi:%d "
247 "Cannot get sparam\n",
248 phba->brd_no, vpi);
249 lpfc_free_vpi(phba, vpi);
250 destroy_port(vport);
251 rc = VPORT_NORESOURCES;
252 goto error_out;
253 }
254
255 memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
256 memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
257
258 if (fc_vport->node_name != 0)
259 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
260 if (fc_vport->port_name != 0)
261 u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
262
263 memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
264 memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
265
266 if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
267 !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
268 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
269 "%d:1821 Create VPORT failed: vpi:%d "
270 "Invalid WWN format\n",
271 phba->brd_no, vpi);
272 lpfc_free_vpi(phba, vpi);
273 destroy_port(vport);
274 rc = VPORT_INVAL;
275 goto error_out;
276 }
277
278 if (!lpfc_unique_wwpn(phba, vport)) {
279 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
280 "%d:1823 Create VPORT failed: vpi:%d "
281 "Duplicate WWN on HBA\n",
282 phba->brd_no, vpi);
283 lpfc_free_vpi(phba, vpi);
284 destroy_port(vport);
285 rc = VPORT_INVAL;
286 goto error_out;
287 }
288
289 *(struct lpfc_vport **)fc_vport->dd_data = vport;
290 vport->fc_vport = fc_vport;
291
292 if ((phba->link_state < LPFC_LINK_UP) ||
293 (phba->fc_topology == TOPOLOGY_LOOP)) {
294 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
295 rc = VPORT_OK;
296 goto out;
297 }
298
299 if (disable) {
300 rc = VPORT_OK;
301 goto out;
302 }
303
304 /* Use the Physical nodes Fabric NDLP to determine if the link is
305 * up and ready to FDISC.
306 */
307 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
308 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
309 lpfc_set_disctmo(vport);
310 lpfc_initial_fdisc(vport);
311 } else {
312 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
313 }
314 rc = VPORT_OK;
315
316out:
317 lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
318error_out:
319 return rc;
320}
321
322int
323disable_vport(struct fc_vport *fc_vport)
324{
325 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
326 struct lpfc_hba *phba = vport->phba;
327 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
328 long timeout;
329
330 ndlp = lpfc_findnode_did(vport, Fabric_DID);
331 if (ndlp && phba->link_state >= LPFC_LINK_UP) {
332 vport->unreg_vpi_cmpl = VPORT_INVAL;
333 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
334 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
335 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
336 timeout = schedule_timeout(timeout);
337 }
338
339 lpfc_sli_host_down(vport);
340
341 /* Mark all nodes for discovery so we can remove them by
342 * calling lpfc_cleanup_rpis(vport, 1)
343 */
344 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
345 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
346 continue;
347 lpfc_disc_state_machine(vport, ndlp, NULL,
348 NLP_EVT_DEVICE_RECOVERY);
349 }
350 lpfc_cleanup_rpis(vport, 1);
351
352 lpfc_stop_vport_timers(vport);
353 lpfc_unreg_all_rpis(vport);
354 lpfc_unreg_default_rpis(vport);
355 /*
356 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
357 * scsi_host_put() to release the vport.
358 */
359 lpfc_mbx_unreg_vpi(vport);
360
361 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
362 return VPORT_OK;
363}
364
365int
366enable_vport(struct fc_vport *fc_vport)
367{
368 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
369 struct lpfc_hba *phba = vport->phba;
370 struct lpfc_nodelist *ndlp = NULL;
371
372 if ((phba->link_state < LPFC_LINK_UP) ||
373 (phba->fc_topology == TOPOLOGY_LOOP)) {
374 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
375 return VPORT_OK;
376 }
377
378 vport->load_flag |= FC_LOADING;
379 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
380
381 /* Use the Physical nodes Fabric NDLP to determine if the link is
382 * up and ready to FDISC.
383 */
384 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
385 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
386 lpfc_set_disctmo(vport);
387 lpfc_initial_fdisc(vport);
388 } else {
389 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
390 }
391
392 return VPORT_OK;
393}
394
395int
396lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
397{
398 if (disable)
399 return disable_vport(fc_vport);
400 else
401 return enable_vport(fc_vport);
402}
403
404
405int
406lpfc_vport_delete(struct fc_vport *fc_vport)
407{
408 struct lpfc_nodelist *ndlp = NULL;
409 struct lpfc_nodelist *next_ndlp;
410 struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost;
411 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
412 struct lpfc_hba *phba = vport->phba;
413 long timeout;
414 int rc = VPORT_ERROR;
415
416 /*
417 * This is a bit of a mess. We want to ensure the shost doesn't get
418 * torn down until we're done with the embedded lpfc_vport structure.
419 *
420 * Beyond holding a reference for this function, we also need a
421 * reference for outstanding I/O requests we schedule during delete
422 * processing. But once we scsi_remove_host() we can no longer obtain
423 * a reference through scsi_host_get().
424 *
425 * So we take two references here. We release one reference at the
426 * bottom of the function -- after delinking the vport. And we
427 * release the other at the completion of the unreg_vpi that get's
428 * initiated after we've disposed of all other resources associated
429 * with the port.
430 */
431 if (!scsi_host_get(shost) || !scsi_host_get(shost))
432 return VPORT_INVAL;
433
434 if (vport->port_type == LPFC_PHYSICAL_PORT) {
435 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
436 "%d:1812 vport_delete failed: Cannot delete "
437 "physical host\n", phba->brd_no);
438 goto out;
439 }
440
441 vport->load_flag |= FC_UNLOADING;
442
443 kfree(vport->vname);
444 fc_remove_host(lpfc_shost_from_vport(vport));
445 scsi_remove_host(lpfc_shost_from_vport(vport));
446
447 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
448 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
449 phba->link_state >= LPFC_LINK_UP) {
450
451 /* First look for the Fabric ndlp */
452 ndlp = lpfc_findnode_did(vport, Fabric_DID);
453 if (!ndlp) {
454 /* Cannot find existing Fabric ndlp, allocate one */
455 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
456 if (!ndlp)
457 goto skip_logo;
458 lpfc_nlp_init(vport, ndlp, Fabric_DID);
459 } else {
460 lpfc_dequeue_node(vport, ndlp);
461 }
462 vport->unreg_vpi_cmpl = VPORT_INVAL;
463 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
464 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
465 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
466 timeout = schedule_timeout(timeout);
467 }
468
469skip_logo:
470 lpfc_sli_host_down(vport);
471
472 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
473 lpfc_disc_state_machine(vport, ndlp, NULL,
474 NLP_EVT_DEVICE_RECOVERY);
475 lpfc_disc_state_machine(vport, ndlp, NULL,
476 NLP_EVT_DEVICE_RM);
477 }
478
479 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
480 /* free any ndlp's in unused state */
481 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
482 lpfc_drop_node(vport, ndlp);
483 }
484
485 lpfc_stop_vport_timers(vport);
486 lpfc_unreg_all_rpis(vport);
487 lpfc_unreg_default_rpis(vport);
488 /*
489 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
490 * scsi_host_put() to release the vport.
491 */
492 lpfc_mbx_unreg_vpi(vport);
493
494 lpfc_free_vpi(phba, vport->vpi);
495 vport->work_port_events = 0;
496 spin_lock_irq(&phba->hbalock);
497 list_del_init(&vport->listentry);
498 spin_unlock_irq(&phba->hbalock);
499
500 rc = VPORT_OK;
501out:
502 scsi_host_put(shost);
503 return rc;
504}
505
506
507EXPORT_SYMBOL(lpfc_vport_create);
508EXPORT_SYMBOL(lpfc_vport_delete);
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
new file mode 100644
index 000000000000..f223550f8cba
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -0,0 +1,113 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21
22#ifndef _H_LPFC_VPORT
23#define _H_LPFC_VPORT
24
25/* API version values (each will be an individual bit) */
26#define VPORT_API_VERSION_1 0x01
27
28/* Values returned via lpfc_vport_getinfo() */
29struct vport_info {
30
31 uint32_t api_versions;
32 uint8_t linktype;
33#define VPORT_TYPE_PHYSICAL 0
34#define VPORT_TYPE_VIRTUAL 1
35
36 uint8_t state;
37#define VPORT_STATE_OFFLINE 0
38#define VPORT_STATE_ACTIVE 1
39#define VPORT_STATE_FAILED 2
40
41 uint8_t fail_reason;
42 uint8_t prev_fail_reason;
43#define VPORT_FAIL_UNKNOWN 0
44#define VPORT_FAIL_LINKDOWN 1
45#define VPORT_FAIL_FAB_UNSUPPORTED 2
46#define VPORT_FAIL_FAB_NORESOURCES 3
47#define VPORT_FAIL_FAB_LOGOUT 4
48#define VPORT_FAIL_ADAP_NORESOURCES 5
49
50 uint8_t node_name[8]; /* WWNN */
51 uint8_t port_name[8]; /* WWPN */
52
53 struct Scsi_Host *shost;
54
55/* Following values are valid only on physical links */
56 uint32_t vports_max;
57 uint32_t vports_inuse;
58 uint32_t rpi_max;
59 uint32_t rpi_inuse;
60#define VPORT_CNT_INVALID 0xFFFFFFFF
61};
62
63/* data used in link creation */
64struct vport_data {
65 uint32_t api_version;
66
67 uint32_t options;
68#define VPORT_OPT_AUTORETRY 0x01
69
70 uint8_t node_name[8]; /* WWNN */
71 uint8_t port_name[8]; /* WWPN */
72
73/*
74 * Upon successful creation, vport_shost will point to the new Scsi_Host
75 * structure for the new virtual link.
76 */
77 struct Scsi_Host *vport_shost;
78};
79
80/* API function return codes */
81#define VPORT_OK 0
82#define VPORT_ERROR -1
83#define VPORT_INVAL -2
84#define VPORT_NOMEM -3
85#define VPORT_NORESOURCES -4
86
87int lpfc_vport_create(struct fc_vport *, bool);
88int lpfc_vport_delete(struct fc_vport *);
89int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
90int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
91
92/*
93 * queuecommand VPORT-specific return codes. Specified in the host byte code.
94 * Returned when the virtual link has failed or is not active.
95 */
96#define DID_VPORT_ERROR 0x0f
97
98#define VPORT_INFO 0x1
99#define VPORT_CREATE 0x2
100#define VPORT_DELETE 0x4
101
102struct vport_cmd_tag {
103 uint32_t cmd;
104 struct vport_data cdata;
105 struct vport_info cinfo;
106 void *vport;
107 int vport_num;
108};
109
110void lpfc_vport_set_state(struct lpfc_vport *vport,
111 enum fc_vport_state new_state);
112
113#endif /* H_LPFC_VPORT */